diff --git a/lib/ansible/cli/__init__.py b/lib/ansible/cli/__init__.py
index 2178d74e042..23c0fb95b96 100644
--- a/lib/ansible/cli/__init__.py
+++ b/lib/ansible/cli/__init__.py
@@ -284,7 +284,8 @@ class CLI(with_metaclass(ABCMeta, object)):
@staticmethod
def base_parser(usage="", output_opts=False, runas_opts=False, meta_opts=False, runtask_opts=False, vault_opts=False, module_opts=False,
- async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False, runas_prompt_opts=False):
+ async_opts=False, connect_opts=False, subset_opts=False, check_opts=False, inventory_opts=False, epilog=None, fork_opts=False,
+ runas_prompt_opts=False):
''' create an options parser for most ansible scripts '''
# TODO: implement epilog parsing
@@ -448,7 +449,9 @@ class CLI(with_metaclass(ABCMeta, object)):
if hasattr(self.options, 'tags') and self.options.tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.tags) > 1:
- display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.', version=2.5, removed=False)
+ display.deprecated('Specifying --tags multiple times on the command line currently uses the last specified value. '
+ 'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
+ version=2.5, removed=False)
self.options.tags = [self.options.tags[-1]]
tags = set()
@@ -460,7 +463,9 @@ class CLI(with_metaclass(ABCMeta, object)):
if hasattr(self.options, 'skip_tags') and self.options.skip_tags:
if not C.MERGE_MULTIPLE_CLI_TAGS:
if len(self.options.skip_tags) > 1:
- display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.', version=2.5, removed=False)
+ display.deprecated('Specifying --skip-tags multiple times on the command line currently uses the last specified value. '
+ 'In 2.4, values will be merged instead. Set merge_multiple_cli_tags=True in ansible.cfg to get this behavior now.',
+ version=2.5, removed=False)
self.options.skip_tags = [self.options.skip_tags[-1]]
skip_tags = set()
@@ -634,7 +639,8 @@ class CLI(with_metaclass(ABCMeta, object)):
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError as e:
- raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e))
+ raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, "
+ "remove the executable bit from the file." % (' '.join(this_path), e))
stdout, stderr = p.communicate()
if p.returncode != 0:
raise AnsibleError("Vault password script %s returned non-zero (%s): %s" % (this_path, p.returncode, p.stderr))
diff --git a/lib/ansible/cli/galaxy.py b/lib/ansible/cli/galaxy.py
index 143fd80ad85..3fea680c439 100644
--- a/lib/ansible/cli/galaxy.py
+++ b/lib/ansible/cli/galaxy.py
@@ -71,7 +71,8 @@ class GalaxyCLI(CLI):
# common
self.parser.add_option('-s', '--server', dest='api_server', default=C.GALAXY_SERVER, help='The API server destination')
- self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS, help='Ignore SSL certificate validation errors.')
+ self.parser.add_option('-c', '--ignore-certs', action='store_true', dest='ignore_certs', default=C.GALAXY_IGNORE_CERTS,
+ help='Ignore SSL certificate validation errors.')
# specific to actions
if self.action == "delete":
@@ -79,20 +80,25 @@ class GalaxyCLI(CLI):
elif self.action == "import":
self.parser.set_usage("usage: %prog import [options] github_user github_repo")
self.parser.add_option('--no-wait', dest='wait', action='store_false', default=True, help='Don\'t wait for import results.')
- self.parser.add_option('--branch', dest='reference', help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
+ self.parser.add_option('--branch', dest='reference',
+ help='The name of a branch to import. Defaults to the repository\'s default branch (usually master)')
self.parser.add_option('--role-name', dest='role_name', help='The name the role should have, if different than the repo name')
- self.parser.add_option('--status', dest='check_status', action='store_true', default=False, help='Check the status of the most recent import request for given github_user/github_repo.')
+ self.parser.add_option('--status', dest='check_status', action='store_true', default=False,
+ help='Check the status of the most recent import request for given github_user/github_repo.')
elif self.action == "info":
self.parser.set_usage("usage: %prog info [options] role_name[,version]")
elif self.action == "init":
self.parser.set_usage("usage: %prog init [options] role_name")
- self.parser.add_option('-p', '--init-path', dest='init_path', default="./", help='The path in which the skeleton role will be created. The default is the current working directory.')
+ self.parser.add_option('-p', '--init-path', dest='init_path', default="./",
+ help='The path in which the skeleton role will be created. The default is the current working directory.')
self.parser.add_option('--container-enabled', dest='container_enabled', action='store_true', default=False,
help='Initialize the skeleton role with default contents for a Container Enabled role.')
- self.parser.add_option('--role-skeleton', dest='role_skeleton', default=None, help='The path to a role skeleton that the new role should be based upon.')
+ self.parser.add_option('--role-skeleton', dest='role_skeleton', default=None,
+ help='The path to a role skeleton that the new role should be based upon.')
elif self.action == "install":
self.parser.set_usage("usage: %prog install [options] [-r FILE | role_name(s)[,version] | scm+role_repo_url[,version] | tar_file(s)]")
- self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False, help='Ignore errors and continue with the next specified role.')
+ self.parser.add_option('-i', '--ignore-errors', dest='ignore_errors', action='store_true', default=False,
+ help='Ignore errors and continue with the next specified role.')
self.parser.add_option('-n', '--no-deps', dest='no_deps', action='store_true', default=False, help='Don\'t download roles listed as dependencies')
self.parser.add_option('-r', '--role-file', dest='role_file', help='A file containing a list of roles to be imported')
elif self.action == "remove":
@@ -103,13 +109,15 @@ class GalaxyCLI(CLI):
self.parser.set_usage("usage: %prog login [options]")
self.parser.add_option('--github-token', dest='token', default=None, help='Identify with github token rather than username and password.')
elif self.action == "search":
- self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] [--author username]")
+ self.parser.set_usage("usage: %prog search [searchterm1 searchterm2] [--galaxy-tags galaxy_tag1,galaxy_tag2] [--platforms platform1,platform2] "
+ "[--author username]")
self.parser.add_option('--platforms', dest='platforms', help='list of OS platforms to filter by')
self.parser.add_option('--galaxy-tags', dest='galaxy_tags', help='list of galaxy tags to filter by')
self.parser.add_option('--author', dest='author', help='GitHub username')
elif self.action == "setup":
self.parser.set_usage("usage: %prog setup [options] source github_user github_repo secret")
- self.parser.add_option('--remove', dest='remove_id', default=None, help='Remove the integration matching the provided ID value. Use --list to see ID values.')
+ self.parser.add_option('--remove', dest='remove_id', default=None,
+ help='Remove the integration matching the provided ID value. Use --list to see ID values.')
self.parser.add_option('--list', dest="setup_list", action='store_true', default=False, help='List all of your integrations.')
# options that apply to more than one action
@@ -119,8 +127,10 @@ class GalaxyCLI(CLI):
if self.action not in ("delete","import","init","login","setup"):
# NOTE: while the option type=str, the default is a list, and the
# callback will set the value to a list.
- self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.expand_paths, type=str, default=C.DEFAULT_ROLES_PATH,
- help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg file (/etc/ansible/roles if not configured)')
+ self.parser.add_option('-p', '--roles-path', dest='roles_path', action="callback", callback=CLI.expand_paths, type=str,
+ default=C.DEFAULT_ROLES_PATH,
+ help='The path to the directory containing your roles. The default is the roles_path configured in your ansible.cfg '
+ 'file (/etc/ansible/roles if not configured)')
if self.action in ("init","install"):
self.parser.add_option('-f', '--force', dest='force', action='store_true', default=False, help='Force overwriting an existing role')
diff --git a/lib/ansible/cli/playbook.py b/lib/ansible/cli/playbook.py
index 9b50754e702..073379b1abf 100644
--- a/lib/ansible/cli/playbook.py
+++ b/lib/ansible/cli/playbook.py
@@ -149,7 +149,8 @@ class PlaybookCLI(CLI):
self._flush_cache(inventory, variable_manager)
# create the playbook executor, which manages running the plays via a task queue manager
- pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options, passwords=passwords)
+ pbex = PlaybookExecutor(playbooks=self.args, inventory=inventory, variable_manager=variable_manager, loader=loader, options=self.options,
+ passwords=passwords)
results = pbex.run()
diff --git a/lib/ansible/constants.py b/lib/ansible/constants.py
index 5f1ee184df8..7abf402968e 100644
--- a/lib/ansible/constants.py
+++ b/lib/ansible/constants.py
@@ -196,7 +196,8 @@ MERGE_MULTIPLE_CLI_TAGS = get_config(p, DEFAULTS, 'merge_multiple_cli_tags', 'AN
DEFAULT_DEBUG = get_config(p, DEFAULTS, 'debug', 'ANSIBLE_DEBUG', False, value_type='boolean')
DEFAULT_VERBOSITY = get_config(p, DEFAULTS, 'verbosity', 'ANSIBLE_VERBOSITY', 0, value_type='integer')
DEFAULT_HOST_LIST = get_config(p, DEFAULTS,'inventory', 'ANSIBLE_INVENTORY', '/etc/ansible/hosts', value_type='path')
-DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', value_type='pathlist', expand_relative_paths=True)
+DEFAULT_ROLES_PATH = get_config(p, DEFAULTS, 'roles_path', 'ANSIBLE_ROLES_PATH', '/etc/ansible/roles', value_type='pathlist',
+ expand_relative_paths=True)
DEFAULT_REMOTE_TMP = get_config(p, DEFAULTS, 'remote_tmp', 'ANSIBLE_REMOTE_TEMP', '~/.ansible/tmp')
DEFAULT_LOCAL_TMP = get_config(p, DEFAULTS, 'local_tmp', 'ANSIBLE_LOCAL_TEMP', '~/.ansible/tmp', value_type='tmppath')
DEFAULT_MODULE_NAME = get_config(p, DEFAULTS, 'module_name', None, 'command')
@@ -230,7 +231,8 @@ DEFAULT_GATHER_SUBSET = get_config(p, DEFAULTS, 'gather_subset', 'ANSIBLE_GA
DEFAULT_GATHER_TIMEOUT = get_config(p, DEFAULTS, 'gather_timeout', 'ANSIBLE_GATHER_TIMEOUT', 10, value_type='integer')
DEFAULT_LOG_PATH = get_config(p, DEFAULTS, 'log_path', 'ANSIBLE_LOG_PATH', '', value_type='path')
DEFAULT_FORCE_HANDLERS = get_config(p, DEFAULTS, 'force_handlers', 'ANSIBLE_FORCE_HANDLERS', False, value_type='boolean')
-DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE', ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], value_type='list')
+DEFAULT_INVENTORY_IGNORE = get_config(p, DEFAULTS, 'inventory_ignore_extensions', 'ANSIBLE_INVENTORY_IGNORE',
+ ["~", ".orig", ".bak", ".ini", ".cfg", ".retry", ".pyc", ".pyo"], value_type='list')
DEFAULT_VAR_COMPRESSION_LEVEL = get_config(p, DEFAULTS, 'var_compression_level', 'ANSIBLE_VAR_COMPRESSION_LEVEL', 0, value_type='integer')
DEFAULT_INTERNAL_POLL_INTERVAL = get_config(p, DEFAULTS, 'internal_poll_interval', None, 0.001, value_type='float')
ERROR_ON_MISSING_HANDLER = get_config(p, DEFAULTS, 'error_on_missing_handler', 'ANSIBLE_ERROR_ON_MISSING_HANDLER', True, value_type='boolean')
@@ -264,11 +266,28 @@ DEFAULT_SUDO_FLAGS = get_config(p, DEFAULTS, 'sudo_flags', 'ANSIBLE_SUDO_
DEFAULT_ASK_SUDO_PASS = get_config(p, DEFAULTS, 'ask_sudo_pass', 'ANSIBLE_ASK_SUDO_PASS', False, value_type='boolean')
# Become
-BECOME_ERROR_STRINGS = {'sudo': 'Sorry, try again.', 'su': 'Authentication failure', 'pbrun': '', 'pfexec': '', 'doas': 'Permission denied', 'dzdo': '', 'ksu': 'Password incorrect'} #FIXME: deal with i18n
-BECOME_MISSING_STRINGS = {'sudo': 'sorry, a password is required to run sudo', 'su': '', 'pbrun': '', 'pfexec': '', 'doas': 'Authorization required', 'dzdo': '', 'ksu': 'No password given'} #FIXME: deal with i18n
+BECOME_ERROR_STRINGS = {
+ 'sudo': 'Sorry, try again.',
+ 'su': 'Authentication failure',
+ 'pbrun': '',
+ 'pfexec': '',
+ 'doas': 'Permission denied',
+ 'dzdo': '',
+ 'ksu': 'Password incorrect'
+} # FIXME: deal with i18n
+BECOME_MISSING_STRINGS = {
+ 'sudo': 'sorry, a password is required to run sudo',
+ 'su': '',
+ 'pbrun': '',
+ 'pfexec': '',
+ 'doas': 'Authorization required',
+ 'dzdo': '',
+ 'ksu': 'No password given'
+} # FIXME: deal with i18n
BECOME_METHODS = ['sudo','su','pbrun','pfexec','doas','dzdo','ksu','runas']
BECOME_ALLOW_SAME_USER = get_config(p, 'privilege_escalation', 'become_allow_same_user', 'ANSIBLE_BECOME_ALLOW_SAME_USER', False, value_type='boolean')
-DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD','sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo' ).lower()
+DEFAULT_BECOME_METHOD = get_config(p, 'privilege_escalation', 'become_method', 'ANSIBLE_BECOME_METHOD',
+ 'sudo' if DEFAULT_SUDO else 'su' if DEFAULT_SU else 'sudo').lower()
DEFAULT_BECOME = get_config(p, 'privilege_escalation', 'become', 'ANSIBLE_BECOME',False, value_type='boolean')
DEFAULT_BECOME_USER = get_config(p, 'privilege_escalation', 'become_user', 'ANSIBLE_BECOME_USER', 'root')
DEFAULT_BECOME_EXE = get_config(p, 'privilege_escalation', 'become_exe', 'ANSIBLE_BECOME_EXE', None)
@@ -283,23 +302,35 @@ DEFAULT_BECOME_ASK_PASS = get_config(p, 'privilege_escalation', 'become_ask_pa
# the module takes both, bad things could happen.
# In the future we should probably generalize this even further
# (mapping of param: squash field)
-DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS', "apk, apt, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper", value_type='list')
+DEFAULT_SQUASH_ACTIONS = get_config(p, DEFAULTS, 'squash_actions', 'ANSIBLE_SQUASH_ACTIONS',
+ "apk, apt, dnf, homebrew, openbsd_pkg, pacman, pkgng, yum, zypper", value_type='list')
# paths
-DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS', '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', value_type='pathlist')
-DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS', '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', value_type='pathlist')
-DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS', '~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', value_type='pathlist')
-DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS', '~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', value_type='pathlist')
-DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS', '~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', value_type='pathlist')
+DEFAULT_ACTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'action_plugins', 'ANSIBLE_ACTION_PLUGINS',
+ '~/.ansible/plugins/action:/usr/share/ansible/plugins/action', value_type='pathlist')
+DEFAULT_CACHE_PLUGIN_PATH = get_config(p, DEFAULTS, 'cache_plugins', 'ANSIBLE_CACHE_PLUGINS',
+ '~/.ansible/plugins/cache:/usr/share/ansible/plugins/cache', value_type='pathlist')
+DEFAULT_CALLBACK_PLUGIN_PATH = get_config(p, DEFAULTS, 'callback_plugins', 'ANSIBLE_CALLBACK_PLUGINS',
+ '~/.ansible/plugins/callback:/usr/share/ansible/plugins/callback', value_type='pathlist')
+DEFAULT_CONNECTION_PLUGIN_PATH = get_config(p, DEFAULTS, 'connection_plugins', 'ANSIBLE_CONNECTION_PLUGINS',
+ '~/.ansible/plugins/connection:/usr/share/ansible/plugins/connection', value_type='pathlist')
+DEFAULT_LOOKUP_PLUGIN_PATH = get_config(p, DEFAULTS, 'lookup_plugins', 'ANSIBLE_LOOKUP_PLUGINS',
+ '~/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup', value_type='pathlist')
DEFAULT_MODULE_PATH = get_config(p, DEFAULTS, 'library', 'ANSIBLE_LIBRARY', None, value_type='pathlist')
DEFAULT_MODULE_UTILS_PATH = get_config(p, DEFAULTS, 'module_utils', 'ANSIBLE_MODULE_UTILS', None, value_type='pathlist')
-DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS', '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', value_type='pathlist')
-DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS', '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', value_type='pathlist')
-DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS', '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', value_type='pathlist')
-DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS', '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', value_type='pathlist')
-DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS', '~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', value_type='pathlist')
-
-NETWORK_GROUP_MODULES = get_config(p, DEFAULTS, 'network_group_modules','NETWORK_GROUP_MODULES', ['eos', 'nxos', 'ios', 'iosxr', 'junos', 'vyos'], value_type='list')
+DEFAULT_INVENTORY_PLUGIN_PATH = get_config(p, DEFAULTS, 'inventory_plugins', 'ANSIBLE_INVENTORY_PLUGINS',
+ '~/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory', value_type='pathlist')
+DEFAULT_VARS_PLUGIN_PATH = get_config(p, DEFAULTS, 'vars_plugins', 'ANSIBLE_VARS_PLUGINS',
+ '~/.ansible/plugins/vars:/usr/share/ansible/plugins/vars', value_type='pathlist')
+DEFAULT_FILTER_PLUGIN_PATH = get_config(p, DEFAULTS, 'filter_plugins', 'ANSIBLE_FILTER_PLUGINS',
+ '~/.ansible/plugins/filter:/usr/share/ansible/plugins/filter', value_type='pathlist')
+DEFAULT_TEST_PLUGIN_PATH = get_config(p, DEFAULTS, 'test_plugins', 'ANSIBLE_TEST_PLUGINS',
+ '~/.ansible/plugins/test:/usr/share/ansible/plugins/test', value_type='pathlist')
+DEFAULT_STRATEGY_PLUGIN_PATH = get_config(p, DEFAULTS, 'strategy_plugins', 'ANSIBLE_STRATEGY_PLUGINS',
+ '~/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy', value_type='pathlist')
+
+NETWORK_GROUP_MODULES = get_config(p, DEFAULTS, 'network_group_modules','NETWORK_GROUP_MODULES', ['eos', 'nxos', 'ios', 'iosxr', 'junos', 'vyos'],
+ value_type='list')
DEFAULT_STRATEGY = get_config(p, DEFAULTS, 'strategy', 'ANSIBLE_STRATEGY', 'linear')
DEFAULT_STDOUT_CALLBACK = get_config(p, DEFAULTS, 'stdout_callback', 'ANSIBLE_STDOUT_CALLBACK', 'default')
@@ -367,9 +398,11 @@ GALAXY_IGNORE_CERTS = get_config(p, 'galaxy', 'ignore_certs', 'ANSIBL
# this can be configured to blacklist SCMS but cannot add new ones unless the code is also updated
GALAXY_SCMS = get_config(p, 'galaxy', 'scms', 'ANSIBLE_GALAXY_SCMS', 'git, hg', value_type='list')
GALAXY_ROLE_SKELETON = get_config(p, 'galaxy', 'role_skeleton', 'ANSIBLE_GALAXY_ROLE_SKELETON', None, value_type='path')
-GALAXY_ROLE_SKELETON_IGNORE = get_config(p, 'galaxy', 'role_skeleton_ignore', 'ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE', ['^.git$', '^.*/.git_keep$'], value_type='list')
+GALAXY_ROLE_SKELETON_IGNORE = get_config(p, 'galaxy', 'role_skeleton_ignore', 'ANSIBLE_GALAXY_ROLE_SKELETON_IGNORE', ['^.git$', '^.*/.git_keep$'],
+ value_type='list')
-STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS', ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], value_type='list' )
+STRING_TYPE_FILTERS = get_config(p, 'jinja2', 'dont_type_filters', 'ANSIBLE_STRING_TYPE_FILTERS',
+ ['string', 'to_json', 'to_nice_json', 'to_yaml', 'ppretty', 'json'], value_type='list' )
# colors
COLOR_HIGHLIGHT = get_config(p, 'colors', 'highlight', 'ANSIBLE_COLOR_HIGHLIGHT', 'white')
diff --git a/lib/ansible/errors/__init__.py b/lib/ansible/errors/__init__.py
index b2bf10dad28..3bfed1dbbf8 100644
--- a/lib/ansible/errors/__init__.py
+++ b/lib/ansible/errors/__init__.py
@@ -123,7 +123,11 @@ class AnsibleError(Exception):
elif ":{{" in stripped_line and "}}" in stripped_line:
error_message += YAML_COMMON_DICT_ERROR
# check for common unquoted colon mistakes
- elif len(target_line) and len(target_line) > 1 and len(target_line) > col_number and target_line[col_number] == ":" and target_line.count(':') > 1:
+ elif (len(target_line) and
+ len(target_line) > 1 and
+ len(target_line) > col_number and
+ target_line[col_number] == ":" and
+ target_line.count(':') > 1):
error_message += YAML_COMMON_UNQUOTED_COLON_ERROR
# otherwise, check for some common quoting mistakes
else:
@@ -138,7 +142,11 @@ class AnsibleError(Exception):
elif middle.startswith('"') and not middle.endswith('"'):
match = True
- if len(middle) > 0 and middle[0] in [ '"', "'" ] and middle[-1] in [ '"', "'" ] and target_line.count("'") > 2 or target_line.count('"') > 2:
+ if (len(middle) > 0 and
+ middle[0] in [ '"', "'" ] and
+ middle[-1] in [ '"', "'" ] and
+ target_line.count("'") > 2 or
+ target_line.count('"') > 2):
unbalanced = True
if match:
diff --git a/lib/ansible/executor/play_iterator.py b/lib/ansible/executor/play_iterator.py
index a226e52894a..616076cc5bd 100644
--- a/lib/ansible/executor/play_iterator.py
+++ b/lib/ansible/executor/play_iterator.py
@@ -80,20 +80,21 @@ class HostState:
ret.append(states[i])
return "|".join(ret)
- return "HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
- self.cur_block,
- self.cur_regular_task,
- self.cur_rescue_task,
- self.cur_always_task,
- _run_state_to_string(self.run_state),
- _failed_state_to_string(self.fail_state),
- self.pending_setup,
- self.tasks_child_state,
- self.rescue_child_state,
- self.always_child_state,
- self.did_rescue,
- self.did_start_at_task,
- )
+ return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), "
+ "rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
+ self.cur_block,
+ self.cur_regular_task,
+ self.cur_rescue_task,
+ self.cur_always_task,
+ _run_state_to_string(self.run_state),
+ _failed_state_to_string(self.fail_state),
+ self.pending_setup,
+ self.tasks_child_state,
+ self.rescue_child_state,
+ self.always_child_state,
+ self.did_rescue,
+ self.did_start_at_task,
+ ))
def __eq__(self, other):
if not isinstance(other, HostState):
diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py
index 64f8a6c3e10..033b3743f4f 100644
--- a/lib/ansible/executor/task_executor.py
+++ b/lib/ansible/executor/task_executor.py
@@ -196,10 +196,12 @@ class TaskExecutor:
if self._task.loop in self._shared_loader_obj.lookup_loader:
if self._task.loop == 'first_found':
# first_found loops are special. If the item is undefined then we want to fall through to the next value rather than failing.
- loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=False, convert_bare=False)
+ loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=False,
+ convert_bare=False)
loop_terms = [t for t in loop_terms if not templar._contains_vars(t)]
else:
- loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True, convert_bare=False)
+ loop_terms = listify_lookup_plugin_terms(terms=self._task.loop_args, templar=templar, loader=self._loader, fail_on_undefined=True,
+ convert_bare=False)
# get lookup
mylookup = self._shared_loader_obj.lookup_loader.get(self._task.loop, loader=self._loader, templar=templar)
@@ -468,7 +470,9 @@ class TaskExecutor:
self._task.args = variable_params
# get the connection and the handler for this execution
- if not self._connection or not getattr(self._connection, 'connected', False) or self._play_context.remote_addr != self._connection._play_context.remote_addr:
+ if (not self._connection or
+ not getattr(self._connection, 'connected', False) or
+ self._play_context.remote_addr != self._connection._play_context.remote_addr):
self._connection = self._get_connection(variables=variables, templar=templar)
hostvars = variables.get('hostvars', None)
if hostvars:
@@ -666,11 +670,14 @@ class TaskExecutor:
# have issues which result in a half-written/unparseable result
# file on disk, which manifests to the user as a timeout happening
# before it's time to timeout.
- if int(async_result.get('finished', 0)) == 1 or ('failed' in async_result and async_result.get('_ansible_parsed', False)) or 'skipped' in async_result:
+ if (int(async_result.get('finished', 0)) == 1 or
+ ('failed' in async_result and async_result.get('_ansible_parsed', False)) or
+ 'skipped' in async_result):
break
except Exception as e:
# Connections can raise exceptions during polling (eg, network bounce, reboot); these should be non-fatal.
- # On an exception, call the connection's reset method if it has one (eg, drop/recreate WinRM connection; some reused connections are in a broken state)
+ # On an exception, call the connection's reset method if it has one
+ # (eg, drop/recreate WinRM connection; some reused connections are in a broken state)
display.vvvv("Exception during async poll, retrying... (%s)" % to_text(e))
display.debug("Async poll exception was:\n%s" % to_text(traceback.format_exc()))
try:
diff --git a/lib/ansible/galaxy/role.py b/lib/ansible/galaxy/role.py
index 67bdc95b57e..6b2579515a8 100644
--- a/lib/ansible/galaxy/role.py
+++ b/lib/ansible/galaxy/role.py
@@ -247,7 +247,9 @@ class GalaxyRole(object):
self.version = 'master'
elif self.version != 'master':
if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]:
- raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version, self.name, role_versions))
+ raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version,
+ self.name,
+ role_versions))
tmp_file = self.fetch(role_data)
@@ -306,7 +308,8 @@ class GalaxyRole(object):
else:
# using --force, remove the old path
if not self.remove():
- raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really want to put the role here." % self.path)
+ raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really "
+ "want to put the role here." % self.path)
else:
os.makedirs(self.path)
diff --git a/lib/ansible/inventory/dir.py b/lib/ansible/inventory/dir.py
index c284dcf1b51..faccb79b853 100644
--- a/lib/ansible/inventory/dir.py
+++ b/lib/ansible/inventory/dir.py
@@ -62,7 +62,8 @@ def get_file_parser(hostsfile, groups, loader):
myerr.append('Attempted to execute "%s" as inventory script: %s' % (hostsfile, to_native(e)))
elif shebang_present:
- myerr.append("The inventory file \'%s\' looks like it should be an executable inventory script, but is not marked executable. Perhaps you want to correct this with `chmod +x %s`?" % (hostsfile, hostsfile))
+ myerr.append("The inventory file \'%s\' looks like it should be an executable inventory script, but is not marked executable. "
+ "Perhaps you want to correct this with `chmod +x %s`?" % (hostsfile, hostsfile))
# YAML/JSON
if not processed and not shebang_present and os.path.splitext(hostsfile)[-1] in C.YAML_FILENAME_EXTENSIONS:
diff --git a/lib/ansible/inventory/script.py b/lib/ansible/inventory/script.py
index da6bb214172..bf9d06c2f13 100644
--- a/lib/ansible/inventory/script.py
+++ b/lib/ansible/inventory/script.py
@@ -63,7 +63,8 @@ class InventoryScript:
try:
self.data = to_text(stdout, errors="strict")
except Exception as e:
- raise AnsibleError("inventory data from {0} contained characters that cannot be interpreted as UTF-8: {1}".format(to_native(self.filename), to_native(e)))
+ raise AnsibleError("inventory data from {0} contained characters that cannot be interpreted as UTF-8: {1}".format(to_native(self.filename),
+ to_native(e)))
# see comment about _meta below
self.host_vars_from_top = None
@@ -82,7 +83,8 @@ class InventoryScript:
if not isinstance(self.raw, Mapping):
sys.stderr.write(to_native(err) + "\n")
- raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted as a json dict".format(to_native(self.filename)))
+ raise AnsibleError("failed to parse executable inventory script results from {0}: data needs to be formatted "
+ "as a json dict".format(to_native(self.filename)))
group = None
for (group_name, data) in self.raw.items():
diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py
index c84faee1a64..be778487ab7 100644
--- a/lib/ansible/module_utils/basic.py
+++ b/lib/ansible/module_utils/basic.py
@@ -620,7 +620,8 @@ def _load_params():
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
- print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", "failed": true}')
+ print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
+ '"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
@@ -691,7 +692,9 @@ class AnsibleModule(object):
self._deprecations = []
self.aliases = {}
- self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity', '_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility', '_ansible_socket']
+ self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity',
+ '_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility',
+ '_ansible_socket']
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
@@ -2147,7 +2150,8 @@ class AnsibleModule(object):
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
- self.fail_json(msg='Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. Please use Python2.x or Python3.5 or greater.', exception=traceback.format_exc())
+ self.fail_json(msg='Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. '
+ 'Please use Python2.x or Python3.5 or greater.', exception=traceback.format_exc())
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
@@ -2232,7 +2236,8 @@ class AnsibleModule(object):
return data
- def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None, use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
+ def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
+ use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py
index 26a54a4b36c..52c2df052d4 100644
--- a/lib/ansible/module_utils/ec2.py
+++ b/lib/ansible/module_utils/ec2.py
@@ -95,7 +95,8 @@ def boto3_conn(module, conn_type=None, resource=None, region=None, endpoint=None
try:
return _boto3_conn(conn_type=conn_type, resource=resource, region=region, endpoint=endpoint, **params)
except ValueError:
- module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type parameter in the boto3_conn function call')
+ module.fail_json(msg='There is an issue in the code of the module. You must specify either both, resource or client to the conn_type '
+ 'parameter in the boto3_conn function call')
def _boto3_conn(conn_type=None, resource=None, region=None, endpoint=None, **params):
profile = params.pop('profile_name', None)
@@ -257,7 +258,8 @@ def connect_to_aws(aws_module, region, **params):
conn = aws_module.connect_to_region(region, **params)
if not conn:
if region not in [aws_module_region.name for aws_module_region in aws_module.regions()]:
- raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade boto or extend with endpoints_path" % (region, aws_module.__name__))
+ raise AnsibleAWSError("Region %s does not seem to be available for aws module %s. If the region definitely exists, you may need to upgrade "
+ "boto or extend with endpoints_path" % (region, aws_module.__name__))
else:
raise AnsibleAWSError("Unknown problem connecting to region %s for aws module %s." % (region, aws_module.__name__))
if params.get('profile_name'):
diff --git a/lib/ansible/module_utils/facts.py b/lib/ansible/module_utils/facts.py
index 50e3448dd96..6a6a28aa0f5 100644
--- a/lib/ansible/module_utils/facts.py
+++ b/lib/ansible/module_utils/facts.py
@@ -677,8 +677,8 @@ class Distribution(object):
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', Raspbian = 'Debian', Slackware = 'Slackware', SLES = 'Suse',
- SLED = 'Suse', openSUSE = 'Suse', openSUSE_Tumbleweed = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', SUSE_LINUX = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
- Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux', SMGL = 'SMGL',
+ SLED = 'Suse', openSUSE = 'Suse', openSUSE_Tumbleweed = 'Suse', SuSE = 'Suse', SLES_SAP = 'Suse', SUSE_LINUX = 'Suse', Gentoo = 'Gentoo',
+ Funtoo = 'Gentoo', Archlinux = 'Archlinux', Manjaro = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake', Altlinux = 'Altlinux', SMGL = 'SMGL',
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
FreeBSD = 'FreeBSD', HPUX = 'HP-UX', openSUSE_Leap = 'Suse', Neon = 'Debian'
@@ -1658,7 +1658,15 @@ class SunOSHardware(Hardware):
for line in fstab.splitlines():
fields = line.split('\t')
size_total, size_available = self._get_mount_size_facts(fields[1])
- self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'time': fields[4], 'size_total': size_total, 'size_available': size_available})
+ self.facts['mounts'].append({
+ 'mount': fields[1],
+ 'device': fields[0],
+ 'fstype' : fields[2],
+ 'options': fields[3],
+ 'time': fields[4],
+ 'size_total': size_total,
+ 'size_available': size_available
+ })
def get_dmi_facts(self):
uname_path = self.module.get_bin_path("prtdiag")
@@ -1785,7 +1793,14 @@ class OpenBSDHardware(Hardware):
if fields[1] == 'none' or fields[3] == 'xx':
continue
size_total, size_available = self._get_mount_size_facts(fields[1])
- self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available})
+ self.facts['mounts'].append({
+ 'mount': fields[1],
+ 'device': fields[0],
+ 'fstype' : fields[2],
+ 'options': fields[3],
+ 'size_total': size_total,
+ 'size_available': size_available
+ })
def get_memory_facts(self):
@@ -1926,7 +1941,14 @@ class FreeBSDHardware(Hardware):
continue
fields = re.sub(r'\s+',' ',line).split()
size_total, size_available = self._get_mount_size_facts(fields[1])
- self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available})
+ self.facts['mounts'].append({
+ 'mount': fields[1],
+ 'device': fields[0],
+ 'fstype': fields[2],
+ 'options': fields[3],
+ 'size_total': size_total,
+ 'size_available': size_available
+ })
def get_device_facts(self):
sysdir = '/dev'
@@ -2057,7 +2079,14 @@ class NetBSDHardware(Hardware):
continue
fields = re.sub(r'\s+',' ',line).split()
size_total, size_available = self._get_mount_size_facts(fields[1])
- self.facts['mounts'].append({'mount': fields[1], 'device': fields[0], 'fstype' : fields[2], 'options': fields[3], 'size_total': size_total, 'size_available': size_available})
+ self.facts['mounts'].append({
+ 'mount': fields[1],
+ 'device': fields[0],
+ 'fstype' : fields[2],
+ 'options': fields[3],
+ 'size_total': size_total,
+ 'size_available': size_available
+ })
def get_dmi_facts(self):
# We don't use dmidecode(1) here because:
@@ -2316,7 +2345,8 @@ class HPUX(Hardware):
#For systems where memory details aren't sent to syslog or the log has rotated, use parsed
#adb output. Unfortunately /dev/kmem doesn't have world-read, so this only works as root.
if os.access("/dev/kmem", os.R_OK):
- rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'", use_unsafe_shell=True)
+ rc, out, err = self.module.run_command("echo 'phys_mem_pages/D' | adb -k /stand/vmunix /dev/kmem | tail -1 | awk '{print $2}'",
+ use_unsafe_shell=True)
if not err:
data = out
self.facts['memtotal_mb'] = int(data) / 256
diff --git a/lib/ansible/module_utils/mysql.py b/lib/ansible/module_utils/mysql.py
index 14726b4740a..e6163c52fc8 100644
--- a/lib/ansible/module_utils/mysql.py
+++ b/lib/ansible/module_utils/mysql.py
@@ -35,7 +35,8 @@ try:
except ImportError:
mysqldb_found = False
-def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None, connect_timeout=30):
+def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, ssl_key=None, ssl_ca=None, db=None, cursor_class=None,
+ connect_timeout=30):
config = {}
if ssl_ca is not None or ssl_key is not None or ssl_cert is not None:
diff --git a/lib/ansible/modules/cloud/amazon/_ec2_vpc.py b/lib/ansible/modules/cloud/amazon/_ec2_vpc.py
index 4ff64f32324..3306820782b 100644
--- a/lib/ansible/modules/cloud/amazon/_ec2_vpc.py
+++ b/lib/ansible/modules/cloud/amazon/_ec2_vpc.py
@@ -71,7 +71,9 @@ options:
resource_tags:
description:
- 'A dictionary array of resource tags of the form C({ tag1: value1, tag2: value2 }).
- - Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.'
+ - Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore,
+ if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7,
+ specifying a resource tag was optional.'
required: true
version_added: "1.6"
internet_gateway:
@@ -82,7 +84,15 @@ options:
choices: [ "yes", "no" ]
route_tables:
description:
- - 'A dictionary array of route tables to add of the form: C({ subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }). Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids, interface-ids, and vpc-peering-connection-ids in addition igw. resource_tags is optional and uses dictionary form: C({ "Name": "public", ... }). This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.'
+ - >
+ A dictionary array of route tables to add of the form:
+ C({ subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }). Where the subnets list is
+ those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword
+ for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids,
+ interface-ids, and vpc-peering-connection-ids in addition igw. resource_tags is optional and uses dictionary form: C({ "Name": "public", ... }).
+ This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated
+ subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes
+ will be modified.
required: false
default: null
wait:
diff --git a/lib/ansible/modules/cloud/amazon/aws_kms.py b/lib/ansible/modules/cloud/amazon/aws_kms.py
index d3fc644d349..81e36e3dfa8 100644
--- a/lib/ansible/modules/cloud/amazon/aws_kms.py
+++ b/lib/ansible/modules/cloud/amazon/aws_kms.py
@@ -283,7 +283,8 @@ def main():
if not g in statement_label:
module.fail_json(msg='{} is an unknown grant type.'.format(g))
- ret = do_grant(kms, module.params['key_arn'], module.params['role_arn'], module.params['grant_types'], mode=mode, dry_run=module.check_mode, clean_invalid_entries=module.params['clean_invalid_entries'])
+ ret = do_grant(kms, module.params['key_arn'], module.params['role_arn'], module.params['grant_types'], mode=mode, dry_run=module.check_mode,
+ clean_invalid_entries=module.params['clean_invalid_entries'])
result.update(ret)
except Exception as err:
diff --git a/lib/ansible/modules/cloud/amazon/cloudformation.py b/lib/ansible/modules/cloud/amazon/cloudformation.py
index 5fca87f1d1f..45baab2bdac 100644
--- a/lib/ansible/modules/cloud/amazon/cloudformation.py
+++ b/lib/ansible/modules/cloud/amazon/cloudformation.py
@@ -33,7 +33,8 @@ short_description: Create or delete an AWS CloudFormation stack
description:
- Launches or updates an AWS CloudFormation stack and waits for it complete.
notes:
- - As of version 2.3, migrated to boto3 to enable new features. To match existing behavior, YAML parsing is done in the module, not given to AWS as YAML. This will change (in fact, it may change before 2.3 is out).
+ - As of version 2.3, migrated to boto3 to enable new features. To match existing behavior, YAML parsing is done in the module, not given to AWS as YAML.
+ This will change (in fact, it may change before 2.3 is out).
version_added: "1.1"
options:
stack_name:
@@ -59,8 +60,10 @@ options:
template:
description:
- The local path of the cloudformation template.
- - This must be the full path to the file, relative to the working directory. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json".
- - If 'state' is 'present' and the stack does not exist yet, either 'template' or 'template_url' must be specified (but not both). If 'state' is present, the stack does exist, and neither 'template' nor 'template_url' are specified, the previous template will be reused.
+ - This must be the full path to the file, relative to the working directory. If using roles this may look
+ like "roles/cloudformation/files/cloudformation-example.json".
+ - If 'state' is 'present' and the stack does not exist yet, either 'template' or 'template_url' must be specified (but not both). If 'state' is
+ present, the stack does exist, and neither 'template' nor 'template_url' are specified, the previous template will be reused.
required: false
default: null
notification_arns:
@@ -71,7 +74,8 @@ options:
version_added: "2.0"
stack_policy:
description:
- - the path of the cloudformation stack policy. A policy cannot be removed once placed, but it can be modified. (for instance, [allow all updates](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
+ - the path of the cloudformation stack policy. A policy cannot be removed once placed, but it can be modified.
+ (for instance, [allow all updates](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/protect-stack-resources.html#d0e9051)
required: false
default: null
version_added: "1.9"
@@ -83,20 +87,24 @@ options:
version_added: "1.4"
template_url:
description:
- - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region as the stack.
- - If 'state' is 'present' and the stack does not exist yet, either 'template' or 'template_url' must be specified (but not both). If 'state' is present, the stack does exist, and neither 'template' nor 'template_url' are specified, the previous template will be reused.
+ - Location of file containing the template body. The URL must point to a template (max size 307,200 bytes) located in an S3 bucket in the same region
+ as the stack.
+ - If 'state' is 'present' and the stack does not exist yet, either 'template' or 'template_url' must be specified (but not both). If 'state' is
+ present, the stack does exist, and neither 'template' nor 'template_url' are specified, the previous template will be reused.
required: false
version_added: "2.0"
template_format:
description:
- - (deprecated) For local templates, allows specification of json or yaml format. Templates are now passed raw to CloudFormation regardless of format. This parameter is ignored since Ansible 2.3.
+ - (deprecated) For local templates, allows specification of json or yaml format. Templates are now passed raw to CloudFormation regardless of format.
+ This parameter is ignored since Ansible 2.3.
default: json
choices: [ json, yaml ]
required: false
version_added: "2.0"
role_arn:
description:
- - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role docs U(http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
+ - The role that AWS CloudFormation assumes to create the stack. See the AWS CloudFormation Service Role
+ docs U(http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-servicerole.html)
required: false
default: null
version_added: "2.3"
@@ -212,7 +220,7 @@ stack_outputs:
description: A key:value dictionary of all the stack outputs currently defined. If there are no stack outputs, it is an empty dictionary.
returned: always
sample: {"MySg": "AnsibleModuleTestYAML-CFTestSg-C8UVS567B6NS"}
-'''
+''' # NOQA
import json
import time
diff --git a/lib/ansible/modules/cloud/amazon/cloudformation_facts.py b/lib/ansible/modules/cloud/amazon/cloudformation_facts.py
index 68958646050..c8c9d47fb16 100644
--- a/lib/ansible/modules/cloud/amazon/cloudformation_facts.py
+++ b/lib/ansible/modules/cloud/amazon/cloudformation_facts.py
@@ -112,11 +112,13 @@ stack_description:
returned: always
type: dict
stack_outputs:
- description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each output 'OutputValue' parameter
+ description: Dictionary of stack outputs keyed by the value of each output 'OutputKey' parameter and corresponding value of each
+ output 'OutputValue' parameter
returned: always
type: dict
stack_parameters:
- description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of each parameter 'ParameterValue' parameter
+ description: Dictionary of stack parameters keyed by the value of each parameter 'ParameterKey' parameter and corresponding value of
+ each parameter 'ParameterValue' parameter
returned: always
type: dict
stack_events:
@@ -136,7 +138,8 @@ stack_resource_list:
returned: only if all_facts or stack_resourses is true
type: list of resources
stack_resources:
- description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each resource 'PhysicalResourceId' parameter
+ description: Dictionary of stack resources keyed by the value of each resource 'LogicalResourceId' parameter and corresponding value of each
+ resource 'PhysicalResourceId' parameter
returned: only if all_facts or stack_resourses is true
type: dict
'''
diff --git a/lib/ansible/modules/cloud/amazon/cloudtrail.py b/lib/ansible/modules/cloud/amazon/cloudtrail.py
index f0558786af8..5297e32117e 100644
--- a/lib/ansible/modules/cloud/amazon/cloudtrail.py
+++ b/lib/ansible/modules/cloud/amazon/cloudtrail.py
@@ -44,7 +44,8 @@ options:
s3_bucket_prefix:
description:
- bucket to place CloudTrail in.
- - this bucket should exist and have the proper policy. See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
+ - this bucket should exist and have the proper policy.
+ See U(http://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_bucket_policy.html)
- required when state=enabled.
required: false
s3_key_prefix:
@@ -215,12 +216,14 @@ def main():
results['view'].get('S3KeyPrefix', '') != s3_key_prefix or \
results['view']['IncludeGlobalServiceEvents'] != include_global_events:
if not module.check_mode:
- results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
+ results['update'] = cf_man.update(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix,
+ include_global_service_events=include_global_events)
results['changed'] = True
else:
if not module.check_mode:
# doesn't exist. create it.
- results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix, include_global_service_events=include_global_events)
+ results['enable'] = cf_man.enable(name=ct_name, s3_bucket_name=s3_bucket_name, s3_key_prefix=s3_key_prefix,
+ include_global_service_events=include_global_events)
results['changed'] = True
# given cloudtrail should exist now. Enable the logging.
diff --git a/lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py b/lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
index 64b20edbd64..cc94a066c69 100644
--- a/lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
+++ b/lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
@@ -117,7 +117,7 @@ targets:
returned: success
type: list
sample: "[{ 'arn': 'arn:aws:lambda:us-east-1:123456789012:function:MyFunction', 'id': 'MyTargetId' }]"
-'''
+''' # NOQA
class CloudWatchEventRule(object):
diff --git a/lib/ansible/modules/cloud/amazon/dynamodb_table.py b/lib/ansible/modules/cloud/amazon/dynamodb_table.py
index 7d8ee706f3a..2151e142059 100644
--- a/lib/ansible/modules/cloud/amazon/dynamodb_table.py
+++ b/lib/ansible/modules/cloud/amazon/dynamodb_table.py
@@ -332,7 +332,11 @@ def get_changed_global_indexes(table, global_indexes):
removed_indexes = dict((name, index) for name, index in table_index_info.items() if name not in set_index_info)
added_indexes = dict((name, set_index_objects[name]) for name, index in set_index_info.items() if name not in table_index_info)
# todo: uncomment once boto has https://github.com/boto/boto/pull/3447 fixed
- # index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.items() if name not in added_indexes and (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or index.throughput['write'] != str(table_index_objects[name].throughput['write'])))
+ # for name, index in set_index_objects.items():
+ # if (name not in added_indexes and
+ # (index.throughput['read'] != str(table_index_objects[name].throughput['read']) or
+ # index.throughput['write'] != str(table_index_objects[name].throughput['write']))):
+ # index_throughput_changes[name] = index.throughput
# todo: remove once boto has https://github.com/boto/boto/pull/3447 fixed
index_throughput_changes = dict((name, index.throughput) for name, index in set_index_objects.items() if name not in added_indexes)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_ami.py b/lib/ansible/modules/cloud/amazon/ec2_ami.py
index bfb6d1150ad..2a3812ddf51 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_ami.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_ami.py
@@ -84,7 +84,8 @@ options:
default: null
no_reboot:
description:
- - Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the responsibility of maintaining file system integrity is left to the owner of the instance.
+ - Flag indicating that the bundling process should not attempt to shutdown the instance before bundling. If this flag is True, the
+ responsibility of maintaining file system integrity is left to the owner of the instance.
required: false
default: no
choices: [ "yes", "no" ]
@@ -97,7 +98,9 @@ options:
version_added: "2.0"
description:
- List of device hashes/dictionaries with custom configurations (same block-device-mapping parameters)
- - "Valid properties include: device_name, volume_type, size (in GB), delete_on_termination (boolean), no_device (boolean), snapshot_id, iops (for io1 volume_type)"
+ - >
+ Valid properties include: device_name, volume_type, size (in GB), delete_on_termination (boolean), no_device (boolean),
+ snapshot_id, iops (for io1 volume_type)
required: false
default: null
delete_snapshot:
@@ -474,7 +477,8 @@ def create_image(module, ec2):
module.fail_json(msg="AMI creation failed, please see the AWS console for more details")
except boto.exception.EC2ResponseError as e:
if ('InvalidAMIID.NotFound' not in e.error_code and 'InvalidAMIID.Unavailable' not in e.error_code) and wait and i == wait_timeout - 1:
- module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help. %s: %s" % (e.error_code, e.error_message))
+ module.fail_json(msg="Error while trying to find the new image. Using wait=yes and/or a longer "
+ "wait_timeout may help. %s: %s" % (e.error_code, e.error_message))
finally:
time.sleep(1)
@@ -569,7 +573,8 @@ def update_image(module, ec2, image_id):
try:
set_permissions = img.get_launch_permissions()
if set_permissions != launch_permissions:
- if ('user_ids' in launch_permissions and launch_permissions['user_ids']) or ('group_names' in launch_permissions and launch_permissions['group_names']):
+ if (('user_ids' in launch_permissions and launch_permissions['user_ids']) or
+ ('group_names' in launch_permissions and launch_permissions['group_names'])):
res = img.set_launch_permissions(**launch_permissions)
elif ('user_ids' in set_permissions and set_permissions['user_ids']) or ('group_names' in set_permissions and set_permissions['group_names']):
res = img.remove_launch_permissions(**set_permissions)
diff --git a/lib/ansible/modules/cloud/amazon/ec2_ami_find.py b/lib/ansible/modules/cloud/amazon/ec2_ami_find.py
index c2bfab136a7..5af26d0e773 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_ami_find.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_ami_find.py
@@ -32,7 +32,8 @@ description:
- Results can be sorted and sliced
author: "Tom Bamford (@tombamford)"
notes:
- - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com.
+ - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on
+ cloud-images.ubuntu.com.
- See the example below for a suggestion of how to search by distro/release.
options:
region:
@@ -45,7 +46,9 @@ options:
- Search AMIs owned by the specified owner
- Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace'
- If not specified, all EC2 AMIs in the specified region will be searched.
- - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\.
+ - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one
+ character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the
+ literal string *amazon?\.
required: false
default: null
ami_id:
@@ -94,8 +97,24 @@ options:
description:
- Optional attribute which with to sort the results.
- If specifying 'tag', the 'tag_name' parameter is required.
- - Starting at version 2.1, additional sort choices of architecture, block_device_mapping, creationDate, hypervisor, is_public, location, owner_id, platform, root_device_name, root_device_type, state, and virtualization_type are supported.
- choices: ['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location', 'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']
+ - Starting at version 2.1, additional sort choices of architecture, block_device_mapping, creationDate, hypervisor, is_public, location, owner_id,
+ platform, root_device_name, root_device_type, state, and virtualization_type are supported.
+ choices:
+ - 'name'
+ - 'description'
+ - 'tag'
+ - 'architecture'
+ - 'block_device_mapping'
+ - 'creationDate'
+ - 'hypervisor'
+ - 'is_public'
+ - 'location'
+ - 'owner_id'
+ - 'platform'
+ - 'root_device_name'
+ - 'root_device_type'
+ - 'state'
+ - 'virtualization_type'
default: null
required: false
sort_tag:
@@ -316,7 +335,8 @@ def main():
platform = dict(required=False),
product_code = dict(required=False),
sort = dict(required=False, default=None,
- choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location', 'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']),
+ choices=['name', 'description', 'tag', 'architecture', 'block_device_mapping', 'creationDate', 'hypervisor', 'is_public', 'location',
+ 'owner_id', 'platform', 'root_device_name', 'root_device_type', 'state', 'virtualization_type']),
sort_tag = dict(required=False),
sort_order = dict(required=False, default='ascending',
choices=['ascending', 'descending']),
diff --git a/lib/ansible/modules/cloud/amazon/ec2_asg.py b/lib/ansible/modules/cloud/amazon/ec2_asg.py
index a396d3f5cd4..1fd30d43c6a 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_asg.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_asg.py
@@ -82,7 +82,8 @@ options:
default: 1
replace_instances:
description:
- - List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch configuration.
+ - List of instance_ids belonging to the named ASG that you would like to terminate and be replaced with instances matching the current launch
+ configuration.
required: false
version_added: "1.8"
default: None
@@ -129,14 +130,16 @@ options:
version_added: "1.8"
wait_for_instances:
description:
- - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all instances have a lifecycle_state of "InService" and a health_status of "Healthy".
+ - Wait for the ASG instances to be in a ready state before exiting. If instances are behind an ELB, it will wait until the ELB determines all
+ instances have a lifecycle_state of "InService" and a health_status of "Healthy".
version_added: "1.9"
default: yes
required: False
termination_policies:
description:
- An ordered list of criteria used for selecting instances to be removed from the Auto Scaling group when reducing capacity.
- - For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the current termination policies are maintained.
+ - For 'Default', when used to create a new autoscaling group, the "Default"i value is used. When used to change an existent autoscaling group, the
+ current termination policies are maintained.
required: false
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
@@ -150,7 +153,11 @@ options:
notification_types:
description:
- A list of auto scaling events to trigger notifications on.
- default: ['autoscaling:EC2_INSTANCE_LAUNCH', 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR', 'autoscaling:EC2_INSTANCE_TERMINATE', 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR']
+ default:
+ - 'autoscaling:EC2_INSTANCE_LAUNCH'
+ - 'autoscaling:EC2_INSTANCE_LAUNCH_ERROR'
+ - 'autoscaling:EC2_INSTANCE_TERMINATE'
+ - 'autoscaling:EC2_INSTANCE_TERMINATE_ERROR'
required: false
version_added: "2.2"
suspend_processes:
diff --git a/lib/ansible/modules/cloud/amazon/ec2_asg_facts.py b/lib/ansible/modules/cloud/amazon/ec2_asg_facts.py
index a9d6aed1cfb..583a693ceea 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_asg_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_asg_facts.py
@@ -35,7 +35,9 @@ options:
required: false
tags:
description:
- - "A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling group(s) you are searching for."
+ - >
+ A dictionary/hash of tags in the format { tag1_name: 'tag1_value', tag2_name: 'tag2_value' } to match against the auto scaling
+ group(s) you are searching for.
required: false
extends_documentation_fragment:
- aws
@@ -232,7 +234,10 @@ def find_asgs(conn, module, name=None, tags=None):
List
[
{
- "auto_scaling_group_arn": "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:autoScalingGroupName/public-webapp-production",
+ "auto_scaling_group_arn": (
+ "arn:aws:autoscaling:us-west-2:275977225706:autoScalingGroup:58abc686-9783-4528-b338-3ad6f1cbbbaf:"
+ "autoScalingGroupName/public-webapp-production"
+ ),
"auto_scaling_group_name": "public-webapp-production",
"availability_zones": ["us-west-2c", "us-west-2b", "us-west-2a"],
"created_time": "2016-02-02T23:28:42.481000+00:00",
diff --git a/lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py b/lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py
index 82f836bd68e..c9b8dbf0604 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py
@@ -28,7 +28,9 @@ version_added: "2.2"
author: Michael Baydoun (@MichaelBaydoun)
requirements: [ botocore, boto3 ]
notes:
- - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent requests do not create new customer gateway resources.
+ - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the
+ first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent
+ requests do not create new customer gateway resources.
- Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
options:
diff --git a/lib/ansible/modules/cloud/amazon/ec2_elb.py b/lib/ansible/modules/cloud/amazon/ec2_elb.py
index 55e5c30d57f..b2736bea3e7 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_elb.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_elb.py
@@ -69,7 +69,8 @@ options:
version_added: "1.5"
wait_timeout:
description:
- - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
+ - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs.
+ If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
required: false
default: 0
version_added: "1.6"
diff --git a/lib/ansible/modules/cloud/amazon/ec2_eni_facts.py b/lib/ansible/modules/cloud/amazon/ec2_eni_facts.py
index a141baf6899..01d4f686149 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_eni_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_eni_facts.py
@@ -29,7 +29,8 @@ author: "Rob White (@wimnat)"
options:
filters:
description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkInterfaces.html) for possible filters.
required: false
default: null
diff --git a/lib/ansible/modules/cloud/amazon/ec2_lc.py b/lib/ansible/modules/cloud/amazon/ec2_lc.py
index 9572eae1d38..013f9be24e2 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_lc.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_lc.py
@@ -58,11 +58,14 @@ options:
required: false
security_groups:
description:
- - A list of security groups to apply to the instances. For VPC instances, specify security group IDs. For EC2-Classic, specify either security group names or IDs.
+ - A list of security groups to apply to the instances. For VPC instances, specify security group IDs. For EC2-Classic, specify either security
+ group names or IDs.
required: false
volumes:
description:
- - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
+ - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id.
+ Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume.
+ For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
required: false
user_data:
description:
@@ -87,7 +90,8 @@ options:
default: false
assign_public_ip:
description:
- - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
+ - Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP
+ address to each instance launched in a Amazon VPC.
required: false
version_added: "1.8"
ramdisk_id:
diff --git a/lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py b/lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py
index 040ba82086e..2949c57630e 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py
@@ -73,7 +73,34 @@ options:
description:
- The threshold's unit of measurement
required: false
- choices: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None']
+ choices:
+ - 'Seconds'
+ - 'Microseconds'
+ - 'Milliseconds'
+ - 'Bytes'
+ - 'Kilobytes'
+ - 'Megabytes'
+ - 'Gigabytes'
+ - 'Terabytes'
+ - 'Bits'
+ - 'Kilobits'
+ - 'Megabits'
+ - 'Gigabits'
+ - 'Terabits'
+ - 'Percent'
+ - 'Count'
+ - 'Bytes/Second'
+ - 'Kilobytes/Second'
+ - 'Megabytes/Second'
+ - 'Gigabytes/Second'
+ - 'Terabytes/Second'
+ - 'Bits/Second'
+ - 'Kilobits/Second'
+ - 'Megabits/Second'
+ - 'Gigabits/Second'
+ - 'Terabits/Second'
+ - 'Count/Second'
+ - 'None'
description:
description:
- A longer description of the alarm
@@ -254,7 +281,10 @@ def main():
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
- unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
+ unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes',
+ 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second',
+ 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second',
+ 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict', default={}),
diff --git a/lib/ansible/modules/cloud/amazon/ec2_remote_facts.py b/lib/ansible/modules/cloud/amazon/ec2_remote_facts.py
index aa4e7abee65..ff6385e6989 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_remote_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_remote_facts.py
@@ -28,7 +28,8 @@ version_added: "2.0"
options:
filters:
description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters.
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html) for possible filters.
required: false
default: null
author:
diff --git a/lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py b/lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py
index 92d32ead43c..569bf5763bb 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py
@@ -110,7 +110,8 @@ def create_scaling_policy(connection, module):
try:
connection.create_scaling_policy(sp)
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
- module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
+ module.exit_json(changed=True, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
+ cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
@@ -137,7 +138,8 @@ def create_scaling_policy(connection, module):
if changed:
connection.create_scaling_policy(policy)
policy = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])[0]
- module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
+ module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment,
+ cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
except BotoServerError as e:
module.fail_json(msg=str(e))
diff --git a/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py b/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
index 9d88605c742..1aa80e2d5b6 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
@@ -113,7 +113,9 @@ state:
type: string
sample: completed
state_message:
- description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the error occurred.
+ description: Encrypted Amazon EBS snapshots are copied asynchronously. If a snapshot copy operation fails (for example, if the proper
+ AWS Key Management Service (AWS KMS) permissions are not obtained) this field displays error state details to help you diagnose why the
+ error occurred.
type: string
sample:
start_time:
diff --git a/lib/ansible/modules/cloud/amazon/ec2_tag.py b/lib/ansible/modules/cloud/amazon/ec2_tag.py
index 47812bc4b07..3937592d95f 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_tag.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_tag.py
@@ -24,7 +24,8 @@ DOCUMENTATION = '''
module: ec2_tag
short_description: create and remove tag(s) to ec2 resources.
description:
- - Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
+ - Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX).
+ It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
version_added: "1.3"
options:
resource:
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vol_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
index 33e657a2337..dc2a26bb873 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
@@ -29,7 +29,8 @@ author: "Rob White (@wimnat)"
options:
filters:
description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVolumes.html) for possible filters.
required: false
default: null
extends_documentation_fragment:
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py
index cb9ff5849ef..b4b44c8b010 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py
@@ -30,7 +30,8 @@ author: "Nick Aslanidis (@naslanidis)"
options:
filters:
description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
required: false
default: null
DhcpOptionsIds:
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_net.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_net.py
index 15a322a4710..2ff5dcbbd05 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_net.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_net.py
@@ -61,7 +61,8 @@ options:
required: false
tags:
description:
- - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
+ - The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of
+ the VPC if it's different.
default: None
required: false
aliases: [ 'resource_tags' ]
@@ -73,7 +74,8 @@ options:
choices: [ 'present', 'absent' ]
multi_ok:
description:
- - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
+ - By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want
+ duplicate VPCs created.
default: false
required: false
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py
index 6de30347a46..3d84337a1a8 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py
@@ -29,7 +29,8 @@ author: "Rob White (@wimnat)"
options:
filters:
description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html) for possible filters.
required: false
default: null
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_facts.py
index 36b5ad079a6..975755eaa89 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_facts.py
@@ -29,7 +29,8 @@ author: "Rob White (@wimnat)"
options:
filters:
description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
required: false
default: null
extends_documentation_fragment:
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_facts.py
index 357be886c89..3d07d96883e 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_facts.py
@@ -29,7 +29,8 @@ author: "Rob White (@wimnat)"
options:
filters:
description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
required: false
default: null
extends_documentation_fragment:
diff --git a/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_facts.py b/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_facts.py
index 7f6473460ac..176aab37244 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_facts.py
@@ -29,7 +29,8 @@ requirements: [ boto3 ]
options:
filters:
description:
- - A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
+ - A dict of filters to apply. Each dict item consists of a filter key and a filter value.
+ See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
required: false
default: None
vpn_gateway_ids:
diff --git a/lib/ansible/modules/cloud/amazon/ec2_win_password.py b/lib/ansible/modules/cloud/amazon/ec2_win_password.py
index 39af5643496..cc64b4e3383 100644
--- a/lib/ansible/modules/cloud/amazon/ec2_win_password.py
+++ b/lib/ansible/modules/cloud/amazon/ec2_win_password.py
@@ -23,7 +23,8 @@ DOCUMENTATION = '''
module: ec2_win_password
short_description: gets the default administrator password for ec2 windows instances
description:
- - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module has a dependency on python-boto.
+ - Gets the default administrator password from any EC2 Windows instance. The instance is referenced by its id (e.g. i-XXXXXXX). This module
+ has a dependency on python-boto.
version_added: "2.0"
author: "Rick Mendes (@rickmendes)"
options:
@@ -38,7 +39,8 @@ options:
key_passphrase:
version_added: "2.0"
description:
- - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
+ - The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to
+ convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
required: false
default: null
wait:
diff --git a/lib/ansible/modules/cloud/amazon/ecs_service.py b/lib/ansible/modules/cloud/amazon/ecs_service.py
index 9361c6a23a4..f4081b4b0c8 100644
--- a/lib/ansible/modules/cloud/amazon/ecs_service.py
+++ b/lib/ansible/modules/cloud/amazon/ecs_service.py
@@ -69,7 +69,8 @@ options:
required: false
role:
description:
- - The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. This parameter is only required if you are using a load balancer with your service.
+ - The name or full Amazon Resource Name (ARN) of the IAM role that allows your Amazon ECS container agent to make calls to your load balancer
+ on your behalf. This parameter is only required if you are using a load balancer with your service.
required: false
delay:
description:
@@ -164,7 +165,9 @@ service:
returned: always
type: int
serviceArn:
- description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example, arn:aws:ecs:region :012345678910 :service/my-service .
+ description: The Amazon Resource Name (ARN) that identifies the service. The ARN contains the arn:aws:ecs namespace, followed by the region
+ of the service, the AWS account ID of the service owner, the service namespace, and then the service name. For example,
+ arn:aws:ecs:region :012345678910 :service/my-service .
returned: always
type: string
serviceName:
diff --git a/lib/ansible/modules/cloud/amazon/ecs_service_facts.py b/lib/ansible/modules/cloud/amazon/ecs_service_facts.py
index e9aadfaf4e7..c3c08ed3bca 100644
--- a/lib/ansible/modules/cloud/amazon/ecs_service_facts.py
+++ b/lib/ansible/modules/cloud/amazon/ecs_service_facts.py
@@ -130,7 +130,8 @@ services:
description: lost of service events
returned: always
type: list of complex
-'''
+''' # NOQA
+
try:
import boto
import botocore
@@ -167,7 +168,8 @@ class EcsServiceManager:
# return self.client.list_clusters()
# {'failures': [],
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': 'ce7b5880-1c41-11e5-8a31-47a93a8a98eb'},
- # 'clusters': [{'activeServicesCount': 0, 'clusterArn': 'arn:aws:ecs:us-west-2:777110527155:cluster/default', 'status': 'ACTIVE', 'pendingTasksCount': 0, 'runningTasksCount': 0, 'registeredContainerInstancesCount': 0, 'clusterName': 'default'}]}
+ # 'clusters': [{'activeServicesCount': 0, 'clusterArn': 'arn:aws:ecs:us-west-2:777110527155:cluster/default',
+ # 'status': 'ACTIVE', 'pendingTasksCount': 0, 'runningTasksCount': 0, 'registeredContainerInstancesCount': 0, 'clusterName': 'default'}]}
# {'failures': [{'arn': 'arn:aws:ecs:us-west-2:777110527155:cluster/bogus', 'reason': 'MISSING'}],
# 'ResponseMetadata': {'HTTPStatusCode': 200, 'RequestId': '0f66c219-1c42-11e5-8a31-47a93a8a98eb'},
# 'clusters': []}
diff --git a/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py b/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
index ff1d89e15de..109e35b9fc6 100644
--- a/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
+++ b/lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
@@ -59,7 +59,8 @@ options:
version_added: 2.3
task_role_arn:
description:
- - The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted the permissions that are specified in this role.
+ - The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers in this task are granted
+ the permissions that are specified in this role.
required: false
version_added: 2.3
volumes:
@@ -88,7 +89,10 @@ EXAMPLES = '''
hostPort: 80
- name: busybox
command:
- - /bin/sh -c "while true; do echo '
Amazon ECS Sample AppAmazon ECS Sample App
Congratulations!
Your application is now running on a container in Amazon ECS.
' > top; /bin/date > date ; echo '
' > bottom; cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
+ - >
+ /bin/sh -c "while true; do echo 'Amazon ECS Sample AppAmazon ECS Sample App
Congratulations!
+
Your application is now running on a container in Amazon ECS.
' > top; /bin/date > date ; echo '
' > bottom;
+ cat top date bottom > /usr/local/apache2/htdocs/index.html ; sleep 1; done"
cpu: 10
entryPoint:
- sh
@@ -199,7 +203,12 @@ class EcsTaskManager:
pass
# Return the full descriptions of the task definitions, sorted ascending by revision
- return list(sorted([self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']], key=lambda td: td['revision']))
+ return list(
+ sorted(
+ [self.ecs.describe_task_definition(taskDefinition=arn)['taskDefinition'] for arn in data['taskDefinitionArns']],
+ key=lambda td: td['revision']
+ )
+ )
def deregister_task(self, taskArn):
response = self.ecs.deregister_task_definition(taskDefinition=taskArn)
@@ -256,7 +265,8 @@ def main():
if not existing_definitions_in_family and revision != 1:
module.fail_json(msg="You have specified a revision of %d but a created revision would be 1" % revision)
elif existing_definitions_in_family and existing_definitions_in_family[-1]['revision'] + 1 != revision:
- module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" % (revision, existing_definitions_in_family[-1]['revision'] + 1))
+ module.fail_json(msg="You have specified a revision of %d but a created revision would be %d" %
+ (revision, existing_definitions_in_family[-1]['revision'] + 1))
else:
existing = None
diff --git a/lib/ansible/modules/cloud/amazon/elasticache.py b/lib/ansible/modules/cloud/amazon/elasticache.py
index 2a7172c3ab2..aebb007b8e7 100644
--- a/lib/ansible/modules/cloud/amazon/elasticache.py
+++ b/lib/ansible/modules/cloud/amazon/elasticache.py
@@ -31,7 +31,8 @@ author: "Jim Dalton (@jsdalton)"
options:
state:
description:
- - C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster, resulting in a momentary outage.
+ - C(absent) or C(present) are idempotent actions that will create or destroy a cache cluster as needed. C(rebooted) will reboot the cluster,
+ resulting in a momentary outage.
choices: ['present', 'absent', 'rebooted']
required: true
name:
@@ -65,7 +66,8 @@ options:
default: None
cache_parameter_group:
description:
- - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group for the specified engine will be used.
+ - The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group
+ for the specified engine will be used.
required: false
default: None
version_added: "2.0"
diff --git a/lib/ansible/modules/cloud/amazon/iam_cert.py b/lib/ansible/modules/cloud/amazon/iam_cert.py
index be24cadc476..b27e335cf75 100644
--- a/lib/ansible/modules/cloud/amazon/iam_cert.py
+++ b/lib/ansible/modules/cloud/amazon/iam_cert.py
@@ -70,7 +70,8 @@ options:
- The path to the private key of the certificate in PEM encoded format.
dup_ok:
description:
- - By default the module will not upload a certificate that is already uploaded into AWS. If set to True, it will upload the certificate as long as the name is unique.
+ - By default the module will not upload a certificate that is already uploaded into AWS. If set to True, it will upload the certificate as
+ long as the name is unique.
required: false
default: False
aliases: []
diff --git a/lib/ansible/modules/cloud/amazon/iam_policy.py b/lib/ansible/modules/cloud/amazon/iam_policy.py
index 99015f20edd..add3bddc91c 100644
--- a/lib/ansible/modules/cloud/amazon/iam_policy.py
+++ b/lib/ansible/modules/cloud/amazon/iam_policy.py
@@ -46,7 +46,8 @@ options:
required: false
policy_json:
description:
- - A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly)
+ - A properly json formatted policy as string (mutually exclusive with C(policy_document),
+ see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly)
required: false
state:
description:
@@ -56,7 +57,8 @@ options:
choices: [ "present", "absent"]
skip_duplicates:
description:
- - By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules.
+ - By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with
+ the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules.
required: false
default: "/"
diff --git a/lib/ansible/modules/cloud/amazon/iam_role.py b/lib/ansible/modules/cloud/amazon/iam_role.py
index d790d503a53..94238233e90 100644
--- a/lib/ansible/modules/cloud/amazon/iam_role.py
+++ b/lib/ansible/modules/cloud/amazon/iam_role.py
@@ -43,7 +43,8 @@ options:
required: false
managed_policy:
description:
- - A list of managed policy ARNs (can't use friendly names due to AWS API limitation) to attach to the role. To embed an inline policy, use M(iam_policy). To remove existing policies, use an empty list item.
+ - A list of managed policy ARNs (can't use friendly names due to AWS API limitation) to attach to the role. To embed an inline policy,
+ use M(iam_policy). To remove existing policies, use an empty list item.
required: true
state:
description:
diff --git a/lib/ansible/modules/cloud/amazon/lambda.py b/lib/ansible/modules/cloud/amazon/lambda.py
index 31ccaddb302..565d2dbbbae 100644
--- a/lib/ansible/modules/cloud/amazon/lambda.py
+++ b/lib/ansible/modules/cloud/amazon/lambda.py
@@ -41,11 +41,13 @@ options:
choices: [ 'present', 'absent' ]
runtime:
description:
- - The runtime environment for the Lambda function you are uploading. Required when creating a function. Use parameters as described in boto3 docs. Current example runtime environments are nodejs, nodejs4.3, java8 or python2.7
+ - The runtime environment for the Lambda function you are uploading. Required when creating a function. Use parameters as described in boto3 docs.
+ Current example runtime environments are nodejs, nodejs4.3, java8 or python2.7
required: true
role:
description:
- - The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS) resources. You may use the bare ARN if the role belongs to the same AWS account.
+ - The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
+ resources. You may use the bare ARN if the role belongs to the same AWS account.
default: null
handler:
description:
@@ -89,7 +91,8 @@ options:
default: 128
vpc_subnet_ids:
description:
- - List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run the function in a VPC.
+ - List of subnet IDs to run Lambda function in. Use this option if you need to access resources in your VPC. Leave empty if you don't want to run
+ the function in a VPC.
required: false
default: None
vpc_security_group_ids:
diff --git a/lib/ansible/modules/cloud/amazon/rds.py b/lib/ansible/modules/cloud/amazon/rds.py
index f68b9b79ddb..e8d33d297b7 100644
--- a/lib/ansible/modules/cloud/amazon/rds.py
+++ b/lib/ansible/modules/cloud/amazon/rds.py
@@ -25,7 +25,9 @@ module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- - Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0)
+ - Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing
+ instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely
+ on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
@@ -48,7 +50,7 @@ options:
- mariadb was added in version 2.2
required: false
default: null
- choices: [ 'mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
+ choices: ['mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
@@ -56,7 +58,8 @@ options:
default: null
instance_type:
description:
- - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
+ - The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore.
+ If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
username:
@@ -81,12 +84,13 @@ options:
default: null
engine_version:
description:
- - Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
+ - Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used
required: false
default: null
parameter_group:
description:
- - Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
+ - Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only
+ when command=create or command=modify.
required: false
default: null
license_model:
@@ -97,7 +101,8 @@ options:
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- - Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
+ - Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or
+ command=modify.
choices: [ "yes", "no" ]
required: false
default: null
@@ -136,7 +141,9 @@ options:
default: null
maint_window:
description:
- - "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
+ - >
+ Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is
+ assigned. Used only when command=create or command=modify.
required: false
default: null
backup_window:
@@ -146,7 +153,9 @@ options:
default: null
backup_retention:
description:
- - "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
+ - >
+ Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or
+ command=modify.
required: false
default: null
zone:
@@ -162,7 +171,8 @@ options:
default: null
snapshot:
description:
- - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
+ - Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with
+ no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aws_secret_key:
@@ -178,7 +188,8 @@ options:
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- - When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
+ - When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for
+ the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
@@ -188,7 +199,8 @@ options:
default: 300
apply_immediately:
description:
- - Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
+ - Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next
+ preferred maintenance window.
default: no
choices: [ "yes", "no" ]
force_failover:
@@ -445,7 +457,9 @@ class RDS2Connection:
def get_db_instance(self, instancename):
try:
- dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
+ dbinstances = self.connection.describe_db_instances(
+ db_instance_identifier=instancename
+ )['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
except boto.rds2.exceptions.DBInstanceNotFound as e:
@@ -455,7 +469,10 @@ class RDS2Connection:
def get_db_snapshot(self, snapshotid):
try:
- snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
+ snapshots = self.connection.describe_db_snapshots(
+ db_snapshot_identifier=snapshotid,
+ snapshot_type='manual'
+ )['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
except boto.rds2.exceptions.DBSnapshotNotFound as e:
@@ -472,7 +489,11 @@ class RDS2Connection:
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
- result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
+ result = self.connection.create_db_instance_read_replica(
+ instance_name,
+ source_instance,
+ **params
+ )['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
@@ -507,7 +528,11 @@ class RDS2Connection:
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
- result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
+ result = self.connection.restore_db_instance_from_db_snapshot(
+ instance_name,
+ snapshot,
+ **params
+ )['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError as e:
raise RDSException(e)
@@ -1046,7 +1071,8 @@ def main():
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
instance_name = dict(required=False),
source_instance = dict(required=False),
- db_engine = dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora'], required=False),
+ db_engine = dict(choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex',
+ 'sqlserver-web', 'postgres', 'aurora'], required=False),
size = dict(required=False),
instance_type = dict(aliases=['type'], required=False),
username = dict(required=False),
diff --git a/lib/ansible/modules/cloud/amazon/rds_param_group.py b/lib/ansible/modules/cloud/amazon/rds_param_group.py
index 83f5c5f0259..6c915974934 100644
--- a/lib/ansible/modules/cloud/amazon/rds_param_group.py
+++ b/lib/ansible/modules/cloud/amazon/rds_param_group.py
@@ -52,7 +52,35 @@ options:
required: false
default: null
aliases: []
- choices: [ 'aurora5.6', 'mariadb10.0', 'mariadb10.1', 'mysql5.1', 'mysql5.5', 'mysql5.6', 'mysql5.7', 'oracle-ee-11.2', 'oracle-ee-12.1', 'oracle-se-11.2', 'oracle-se-12.1', 'oracle-se1-11.2', 'oracle-se1-12.1', 'postgres9.3', 'postgres9.4', 'postgres9.5', 'postgres9.6', sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-ex-12.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-se-12.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0', 'sqlserver-web-12.0' ]
+ choices:
+ - 'aurora5.6'
+ - 'mariadb10.0'
+ - 'mariadb10.1'
+ - 'mysql5.1'
+ - 'mysql5.5'
+ - 'mysql5.6'
+ - 'mysql5.7'
+ - 'oracle-ee-11.2'
+ - 'oracle-ee-12.1'
+ - 'oracle-se-11.2'
+ - 'oracle-se-12.1'
+ - 'oracle-se1-11.2'
+ - 'oracle-se1-12.1'
+ - 'postgres9.3'
+ - 'postgres9.4'
+ - 'postgres9.5'
+ - 'postgres9.6'
+ - 'sqlserver-ee-10.5'
+ - 'sqlserver-ee-11.0'
+ - 'sqlserver-ex-10.5'
+ - 'sqlserver-ex-11.0'
+ - 'sqlserver-ex-12.0'
+ - 'sqlserver-se-10.5'
+ - 'sqlserver-se-11.0'
+ - 'sqlserver-se-12.0'
+ - 'sqlserver-web-10.5'
+ - 'sqlserver-web-11.0'
+ - 'sqlserver-web-12.0'
immediate:
description:
- Whether to apply the changes immediately, or after the next reboot of any associated instances.
@@ -61,7 +89,8 @@ options:
aliases: []
params:
description:
- - Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3), or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group.
+ - Map of parameter names and values. Numeric values may be represented as K for kilo (1024), M for mega (1024^2), G for giga (1024^3),
+ or T for tera (1024^4), and these values will be expanded into the appropriate number before being set in the parameter group.
required: false
default: null
aliases: []
diff --git a/lib/ansible/modules/cloud/amazon/rds_subnet_group.py b/lib/ansible/modules/cloud/amazon/rds_subnet_group.py
index d8f8b8428b6..09ca926e7fe 100644
--- a/lib/ansible/modules/cloud/amazon/rds_subnet_group.py
+++ b/lib/ansible/modules/cloud/amazon/rds_subnet_group.py
@@ -144,7 +144,9 @@ def main():
# Sort the subnet groups before we compare them
matching_groups[0].subnet_ids.sort()
group_subnets.sort()
- if ( (matching_groups[0].name != group_name) or (matching_groups[0].description != group_description) or (matching_groups[0].subnet_ids != group_subnets) ):
+ if (matching_groups[0].name != group_name or
+ matching_groups[0].description != group_description or
+ matching_groups[0].subnet_ids != group_subnets):
changed_group = conn.modify_db_subnet_group(group_name, description=group_description, subnet_ids=group_subnets)
changed = True
except BotoServerError as e:
diff --git a/lib/ansible/modules/cloud/amazon/redshift.py b/lib/ansible/modules/cloud/amazon/redshift.py
index e033bb5ceb0..7bc4ea1f331 100644
--- a/lib/ansible/modules/cloud/amazon/redshift.py
+++ b/lib/ansible/modules/cloud/amazon/redshift.py
@@ -129,7 +129,8 @@ options:
default: null
wait:
description:
- - When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
+ - When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be
+ terminated.
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
@@ -413,7 +414,8 @@ def main():
argument_spec.update(dict(
command = dict(choices=['create', 'facts', 'delete', 'modify'], required=True),
identifier = dict(required=True),
- node_type = dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge', 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'], required=False),
+ node_type = dict(choices=['ds1.xlarge', 'ds1.8xlarge', 'ds2.xlarge', 'ds2.8xlarge', 'dc1.large', 'dc1.8xlarge',
+ 'dw1.xlarge', 'dw1.8xlarge', 'dw2.large', 'dw2.8xlarge'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(require=False),
diff --git a/lib/ansible/modules/cloud/amazon/route53.py b/lib/ansible/modules/cloud/amazon/route53.py
index ed8c61f4af9..0b394f229c7 100644
--- a/lib/ansible/modules/cloud/amazon/route53.py
+++ b/lib/ansible/modules/cloud/amazon/route53.py
@@ -77,7 +77,8 @@ options:
default: false
value:
description:
- - The new value when creating a DNS record. Multiple comma-spaced values are allowed for non-alias records. When deleting a record all values for the record must be specified or Route53 will not delete it.
+ - The new value when creating a DNS record. Multiple comma-spaced values are allowed for non-alias records. When deleting a record all values
+ for the record must be specified or Route53 will not delete it.
required: false
default: null
overwrite:
@@ -87,12 +88,14 @@ options:
default: null
retry_interval:
description:
- - In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many domain names, the default of 500 seconds may be too long.
+ - In the case that route53 is still servicing a prior request, this module will wait and try again after this many seconds. If you have many
+ domain names, the default of 500 seconds may be too long.
required: false
default: 500
private_zone:
description:
- - If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones. The default is to use the public zone.
+ - If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones.
+ The default is to use the public zone.
required: false
default: false
version_added: "1.9"
diff --git a/lib/ansible/modules/cloud/amazon/s3.py b/lib/ansible/modules/cloud/amazon/s3.py
index dc3703abdb8..fb9655da646 100644
--- a/lib/ansible/modules/cloud/amazon/s3.py
+++ b/lib/ansible/modules/cloud/amazon/s3.py
@@ -24,7 +24,8 @@ DOCUMENTATION = '''
module: s3
short_description: manage objects in S3.
description:
- - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets, retrieving objects as files or strings and generating download links. This module has a dependency on python-boto.
+ - This module allows the user to manage S3 buckets and the objects within them. Includes support for creating and deleting both objects and buckets,
+ retrieving objects as files or strings and generating download links. This module has a dependency on python-boto.
version_added: "1.1"
options:
aws_access_key:
@@ -89,7 +90,8 @@ options:
version_added: "1.6"
mode:
description:
- - Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket), and delobj (delete object, Ansible 2.0+).
+ - Switches the module behaviour between put (upload), get (download), geturl (return download url, Ansible 1.3+),
+ getstr (download object as string (1.3+)), list (list keys, Ansible 2.0+), create (bucket), delete (bucket), and delobj (delete object, Ansible 2.0+).
required: true
choices: ['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list']
object:
@@ -99,7 +101,9 @@ options:
default: null
permission:
description:
- - This option lets the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read'. Multiple permissions can be specified as a list.
+ - This option lets the user set the canned permissions on the object/bucket that are created.
+ The permissions that can be set are 'private', 'public-read', 'public-read-write', 'authenticated-read'. Multiple permissions can be
+ specified as a list.
required: false
default: private
version_added: "2.0"
@@ -118,13 +122,17 @@ options:
version_added: "2.0"
overwrite:
description:
- - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations. Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0
+ - Force overwrite either locally on the filesystem or remotely with the object/key. Used with PUT and GET operations.
+ Boolean or one of [always, never, different], true is equal to 'always' and false is equal to 'never', new in 2.0
required: false
default: 'always'
version_added: "1.2"
region:
description:
- - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect."
+ - >
+ AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked,
+ followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the
+ S3 Location: US Standard. Prior to ansible 1.8 this parameter could be specified but had no effect.
required: false
default: null
version_added: "1.8"
@@ -153,7 +161,9 @@ options:
version_added: "1.3"
ignore_nonexistent_bucket:
description:
- - "Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the GetObject permission but no other permissions. In this case using the option mode: get will fail without specifying ignore_nonexistent_bucket: True."
+ - >
+ Overrides initial bucket lookups in case bucket or iam policies are restrictive. Example: a user may have the GetObject permission but no other
+ permissions. In this case using the option mode: get will fail without specifying ignore_nonexistent_bucket: True.
default: false
aliases: []
version_added: "2.3"
diff --git a/lib/ansible/modules/cloud/amazon/s3_lifecycle.py b/lib/ansible/modules/cloud/amazon/s3_lifecycle.py
index edd7516c7ca..b7c36b7a7f9 100644
--- a/lib/ansible/modules/cloud/amazon/s3_lifecycle.py
+++ b/lib/ansible/modules/cloud/amazon/s3_lifecycle.py
@@ -38,7 +38,9 @@ options:
required: true
expiration_date:
description:
- - "Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified."
+ - >
+ Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must
+ be midnight and a GMT timezone must be specified.
required: false
default: null
expiration_days:
@@ -77,7 +79,10 @@ options:
choices: [ 'glacier', 'standard_ia']
transition_date:
description:
- - "Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified, this parameter is required."
+ - >
+ Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class.
+ The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified,
+ this parameter is required."
required: false
default: null
transition_days:
@@ -110,7 +115,8 @@ EXAMPLES = '''
status: enabled
state: present
-# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030. Note that midnight GMT must be specified.
+# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030.
+# Note that midnight GMT must be specified.
# Be sure to quote your date strings
- s3_lifecycle:
name: mybucket
@@ -295,7 +301,9 @@ def compare_rule(rule_a, rule_b):
if rule2_expiration is None:
rule2_expiration = Expiration()
- if (rule1.__dict__ == rule2.__dict__) and (rule1_expiration.__dict__ == rule2_expiration.__dict__) and (rule1_transition.__dict__ == rule2_transition.__dict__):
+ if (rule1.__dict__ == rule2.__dict__ and
+ rule1_expiration.__dict__ == rule2_expiration.__dict__ and
+ rule1_transition.__dict__ == rule2_transition.__dict__):
return True
else:
return False
diff --git a/lib/ansible/modules/cloud/amazon/s3_sync.py b/lib/ansible/modules/cloud/amazon/s3_sync.py
index b519edf0a1b..18f1b1ca098 100644
--- a/lib/ansible/modules/cloud/amazon/s3_sync.py
+++ b/lib/ansible/modules/cloud/amazon/s3_sync.py
@@ -24,7 +24,8 @@ DOCUMENTATION = '''
module: s3_sync
short_description: Efficiently upload multiple files to S3
description:
- - The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing, inclusions/exclusions, mime types, expiration mapping, recursion, and smart directory mapping.
+ - The S3 module is great, but it is very slow for a large volume of files- even a dozen will be noticeable. In addition to speed, it handles globbing,
+ inclusions/exclusions, mime types, expiration mapping, recursion, and smart directory mapping.
version_added: "2.3"
options:
mode:
@@ -63,7 +64,9 @@ options:
choices: [ '', private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, bucket-owner-full-control ]
mime_map:
description:
- - 'Dict entry from extension to MIME type. This will override any default/sniffed MIME type. For example C({".txt": "application/text", ".yml": "appication/text"})'
+ - >
+ Dict entry from extension to MIME type. This will override any default/sniffed MIME type.
+ For example C({".txt": "application/text", ".yml": "appication/text"})
required: false
include:
description:
@@ -362,7 +365,10 @@ def head_s3(s3, bucket, s3keys):
try:
retentry['s3_head'] = s3.head_object(Bucket=bucket, Key=entry['s3_path'])
except botocore.exceptions.ClientError as err:
- if hasattr(err, 'response') and 'ResponseMetadata' in err.response and 'HTTPStatusCode' in err.response['ResponseMetadata'] and str(err.response['ResponseMetadata']['HTTPStatusCode']) == '404':
+ if (hasattr(err, 'response') and
+ 'ResponseMetadata' in err.response and
+ 'HTTPStatusCode' in err.response['ResponseMetadata'] and
+ str(err.response['ResponseMetadata']['HTTPStatusCode']) == '404'):
pass
else:
raise Exception(err)
@@ -444,7 +450,8 @@ def main():
bucket = dict(required=True),
key_prefix = dict(required=False, default=''),
file_root = dict(required=True, type='path'),
- permission = dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read', 'bucket-owner-full-control']),
+ permission = dict(required=False, choices=['private', 'public-read', 'public-read-write', 'authenticated-read', 'aws-exec-read', 'bucket-owner-read',
+ 'bucket-owner-full-control']),
retries = dict(required=False),
mime_map = dict(required=False, type='dict'),
exclude = dict(required=False, default=".*"),
diff --git a/lib/ansible/modules/cloud/amazon/s3_website.py b/lib/ansible/modules/cloud/amazon/s3_website.py
index ff85efbc329..0cf85238651 100644
--- a/lib/ansible/modules/cloud/amazon/s3_website.py
+++ b/lib/ansible/modules/cloud/amazon/s3_website.py
@@ -44,7 +44,10 @@ options:
default: null
region:
description:
- - "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard."
+ - >
+ AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked,
+ followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the
+ S3 Location: US Standard.
required: false
default: null
state:
@@ -55,7 +58,10 @@ options:
choices: [ 'present', 'absent' ]
suffix:
description:
- - "Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash character."
+ - >
+ Suffix that is appended to a request that is for a directory on the website endpoint (e.g. if the suffix is index.html and you make a request to
+ samplebucket/images/ the data that is returned will be for the object with the key name images/index.html). The suffix must not include a slash
+ character.
required: false
default: index.html
@@ -115,7 +121,8 @@ routing_rules:
sample: ansible.com
condition:
key_prefix_equals:
- description: object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html
+ description: object key name prefix when the redirect is applied. For example, to redirect requests for ExamplePage.html, the key prefix will be
+ ExamplePage.html
returned: when routing rule present
type: string
sample: docs/
diff --git a/lib/ansible/modules/cloud/amazon/sts_assume_role.py b/lib/ansible/modules/cloud/amazon/sts_assume_role.py
index eb53b776948..e0b8fd74597 100644
--- a/lib/ansible/modules/cloud/amazon/sts_assume_role.py
+++ b/lib/ansible/modules/cloud/amazon/sts_assume_role.py
@@ -30,7 +30,8 @@ author: Boris Ekelchik (@bekelchik)
options:
role_arn:
description:
- - The Amazon Resource Name (ARN) of the role that the caller is assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs)
+ - The Amazon Resource Name (ARN) of the role that the caller is
+ assuming (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs)
required: true
role_session_name:
description:
@@ -43,7 +44,8 @@ options:
default: null
duration_seconds:
description:
- - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set to 3600 seconds.
+ - The duration, in seconds, of the role session. The value can range from 900 seconds (15 minutes) to 3600 seconds (1 hour).
+ By default, the value is set to 3600 seconds.
required: false
default: null
external_id:
diff --git a/lib/ansible/modules/cloud/amazon/sts_session_token.py b/lib/ansible/modules/cloud/amazon/sts_session_token.py
index c657ca2932a..af2c652ab27 100644
--- a/lib/ansible/modules/cloud/amazon/sts_session_token.py
+++ b/lib/ansible/modules/cloud/amazon/sts_session_token.py
@@ -30,7 +30,9 @@ author: Victor Costan (@pwnall)
options:
duration_seconds:
description:
- - The duration, in seconds, of the session token. See http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters for acceptable and default values.
+ - The duration, in seconds, of the session token.
+ See http://docs.aws.amazon.com/STS/latest/APIReference/API_GetSessionToken.html#API_GetSessionToken_RequestParameters
+ for acceptable and default values.
required: false
default: null
mfa_serial_number:
diff --git a/lib/ansible/modules/cloud/azure/azure.py b/lib/ansible/modules/cloud/azure/azure.py
index c44a7e0ae83..a5a7b5542fd 100644
--- a/lib/ansible/modules/cloud/azure/azure.py
+++ b/lib/ansible/modules/cloud/azure/azure.py
@@ -53,12 +53,14 @@ options:
required: true
image:
description:
- - system image for creating the virtual machine (e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB)
+ - system image for creating the virtual machine
+ (e.g., b39f27a8b8c64d52b05eac6a62ebad85__Ubuntu_DAILY_BUILD-precise-12_04_3-LTS-amd64-server-20131205-en-us-30GB)
required: true
default: null
role_size:
description:
- - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location.
+ - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of
+ type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location.
required: false
default: Small
endpoints:
@@ -78,7 +80,8 @@ options:
default: null
ssh_cert_path:
description:
- - path to an X509 certificate containing the public ssh key to install in the virtual machine. See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details.
+ - path to an X509 certificate containing the public ssh key to install in the virtual machine.
+ See http://www.windowsazure.com/en-us/manage/linux/tutorials/intro-to-linux/ for more details.
- if this option is specified, password-based ssh authentication will be disabled.
required: false
default: null
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_deployment.py b/lib/ansible/modules/cloud/azure/azure_rm_deployment.py
index 6a8024fc7aa..ebd89dcc5ec 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_deployment.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_deployment.py
@@ -233,7 +233,9 @@ EXAMPLES = '''
- "14.04.2-LTS"
- "15.04"
metadata:
- description: "The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version. Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04."
+ description: >
+ The Ubuntu version for the VM. This will pick a fully patched image of this given Ubuntu version.
+ Allowed values: 12.04.5-LTS, 14.04.2-LTS, 15.04."
variables:
location: "West US"
imagePublisher: "Canonical"
@@ -320,7 +322,9 @@ EXAMPLES = '''
osDisk:
name: "osdisk"
vhd:
- uri: "[concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',variables('OSDiskName'),'.vhd')]"
+ uri: >
+ [concat('http://',parameters('newStorageAccountName'),'.blob.core.windows.net/',variables('vmStorageAccountContainerName'),'/',
+ variables('OSDiskName'),'.vhd')]
caching: "ReadWrite"
createOption: "FromImage"
networkProfile:
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_networkinterface_facts.py b/lib/ansible/modules/cloud/azure/azure_rm_networkinterface_facts.py
index ecd5b52429e..2428a2aea26 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_networkinterface_facts.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_networkinterface_facts.py
@@ -120,7 +120,7 @@ azure_networkinterfaces:
"tags": {},
"type": "Microsoft.Network/networkInterfaces"
}]
-'''
+''' # NOQA
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py b/lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py
index 7d7c9159809..4966eb7693e 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py
@@ -334,7 +334,7 @@ state:
},
"type": "Microsoft.Network/networkSecurityGroups"
}
-'''
+''' # NOQA
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_securitygroup_facts.py b/lib/ansible/modules/cloud/azure/azure_rm_securitygroup_facts.py
index 80c87e9f719..4dee9df8b3d 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_securitygroup_facts.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_securitygroup_facts.py
@@ -200,7 +200,7 @@ azure_securitygroups:
"type": "Microsoft.Network/networkSecurityGroups"
}]
-'''
+''' # NOQA
from ansible.module_utils.basic import *
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_subnet.py b/lib/ansible/modules/cloud/azure/azure_rm_subnet.py
index ee69cf8ae02..65dbe5c0473 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_subnet.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_subnet.py
@@ -130,7 +130,7 @@ state:
description: Success or failure of the provisioning event.
type: str
example: "Succeeded"
-'''
+''' # NOQA
from ansible.module_utils.basic import *
diff --git a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py
index 91832941ae8..b32cbd03254 100644
--- a/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py
+++ b/lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py
@@ -52,7 +52,8 @@ options:
description:
- Assert the state of the virtual machine.
- State 'present' will check that the machine exists with the requested configuration. If the configuration
- of the existing machine does not match, the machine will be updated. Use options started, allocated and restarted to change the machine's power state.
+ of the existing machine does not match, the machine will be updated. Use options started, allocated and restarted to change the machine's power
+ state.
- State 'absent' will remove the virtual machine.
default: present
required: false
@@ -437,7 +438,7 @@ azure_vm:
},
"type": "Microsoft.Compute/virtualMachines"
}
-'''
+''' # NOQA
import random
diff --git a/lib/ansible/modules/cloud/cloudscale/cloudscale_server.py b/lib/ansible/modules/cloud/cloudscale/cloudscale_server.py
index ebba7bb6e06..84f2d203040 100644
--- a/lib/ansible/modules/cloud/cloudscale/cloudscale_server.py
+++ b/lib/ansible/modules/cloud/cloudscale/cloudscale_server.py
@@ -31,12 +31,15 @@ description:
- Create, start, stop and delete servers on the cloudscale.ch IaaS service.
- All operations are performed using the cloudscale.ch public API v1.
- "For details consult the full API documentation: U(https://www.cloudscale.ch/en/api/v1)."
- - An valid API token is required for all operations. You can create as many tokens as you like using the cloudscale.ch control panel at U(https://control.cloudscale.ch).
+ - An valid API token is required for all operations. You can create as many tokens as you like using the cloudscale.ch control panel at
+ U(https://control.cloudscale.ch).
notes:
- Instead of the api_token parameter the CLOUDSCALE_API_TOKEN environment variable can be used.
- To create a new server at least the C(name), C(ssh_key), C(image) and C(flavor) options are required.
- If more than one server with the name given by the C(name) option exists, execution is aborted.
- - Once a server is created all parameters except C(state) are read-only. You can't change the name, flavor or any other property. This is a limitation of the cloudscale.ch API. The module will silently ignore differences between the configured parameters and the running server if a server with the correct name or UUID exists. Only state changes will be applied.
+ - Once a server is created all parameters except C(state) are read-only. You can't change the name, flavor or any other property. This is a limitation
+ of the cloudscale.ch API. The module will silently ignore differences between the configured parameters and the running server if a server with the
+ correct name or UUID exists. Only state changes will be applied.
version_added: 2.3
author: "Gaudenz Steinlin "
options:
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_host.py b/lib/ansible/modules/cloud/cloudstack/cs_host.py
index f01c41dcda5..2086a131f28 100644
--- a/lib/ansible/modules/cloud/cloudstack/cs_host.py
+++ b/lib/ansible/modules/cloud/cloudstack/cs_host.py
@@ -307,7 +307,8 @@ state:
type: string
sample: Up
suitable_for_migration:
- description: Whether this host is suitable (has enough capacity and satisfies all conditions like hosttags, max guests VM limit, etc) to migrate a VM to it or not.
+ description: Whether this host is suitable (has enough capacity and satisfies all conditions like hosttags, max guests VM limit, etc) to migrate a VM
+ to it or not.
returned: success
type: string
sample: true
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_instance.py b/lib/ansible/modules/cloud/cloudstack/cs_instance.py
index d7d5040e113..1638ab223af 100644
--- a/lib/ansible/modules/cloud/cloudstack/cs_instance.py
+++ b/lib/ansible/modules/cloud/cloudstack/cs_instance.py
@@ -150,7 +150,8 @@ options:
default: null
root_disk_size:
description:
- - Root disk size in GByte required if deploying instance with KVM hypervisor and want resize the root disk size at startup (need CloudStack >= 4.4, cloud-initramfs-growroot installed and enabled in the template)
+ - Root disk size in GByte required if deploying instance with KVM hypervisor and want resize the root disk size at startup
+ (need CloudStack >= 4.4, cloud-initramfs-growroot installed and enabled in the template)
required: false
default: null
security_groups:
@@ -984,7 +985,8 @@ def main():
memory = dict(default=None, type='int'),
template = dict(default=None),
iso = dict(default=None),
- template_filter = dict(default="executable", aliases=['iso_filter'], choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
+ template_filter = dict(default="executable", aliases=['iso_filter'], choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable',
+ 'community']),
networks = dict(type='list', aliases=[ 'network' ], default=None),
ip_to_networks = dict(type='list', aliases=['ip_to_network'], default=None),
ip_address = dict(defaul=None),
diff --git a/lib/ansible/modules/cloud/cloudstack/cs_iso.py b/lib/ansible/modules/cloud/cloudstack/cs_iso.py
index 352895d8425..1b88491e20a 100644
--- a/lib/ansible/modules/cloud/cloudstack/cs_iso.py
+++ b/lib/ansible/modules/cloud/cloudstack/cs_iso.py
@@ -48,7 +48,8 @@ options:
default: null
is_ready:
description:
- - This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g. successfully downloaded and installed. Recommended to set it to C(false).
+ - This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g.
+ successfully downloaded and installed. Recommended to set it to C(false).
required: false
default: false
aliases: []
diff --git a/lib/ansible/modules/cloud/digital_ocean/digital_ocean.py b/lib/ansible/modules/cloud/digital_ocean/digital_ocean.py
index 9c88d9ff76f..0a5db77ac6f 100644
--- a/lib/ansible/modules/cloud/digital_ocean/digital_ocean.py
+++ b/lib/ansible/modules/cloud/digital_ocean/digital_ocean.py
@@ -51,7 +51,8 @@ options:
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key.
unique_name:
description:
- - Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence.
+ - Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host
+ per name. Useful for idempotence.
version_added: "1.4"
default: "no"
choices: [ "yes", "no" ]
@@ -269,7 +270,8 @@ class Droplet(JsonfyMixIn):
cls.manager = DoManager(None, api_token, api_version=2)
@classmethod
- def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False, user_data=None, ipv6=False):
+ def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False, user_data=None,
+ ipv6=False):
private_networking_lower = str(private_networking).lower()
backups_enabled_lower = str(backups_enabled).lower()
ipv6_lower = str(ipv6).lower()
@@ -463,7 +465,8 @@ def main():
),
)
if not HAS_DOPY and not HAS_SIX:
- module.fail_json(msg='dopy >= 0.3.2 is required for this module. dopy requires six but six is not installed. Make sure both dopy and six are installed.')
+ module.fail_json(msg='dopy >= 0.3.2 is required for this module. dopy requires six but six is not installed. '
+ 'Make sure both dopy and six are installed.')
if not HAS_DOPY:
module.fail_json(msg='dopy >= 0.3.2 required for this module')
diff --git a/lib/ansible/modules/cloud/google/gc_storage.py b/lib/ansible/modules/cloud/google/gc_storage.py
index 32f94f2d4ce..ee9cf19d43b 100644
--- a/lib/ansible/modules/cloud/google/gc_storage.py
+++ b/lib/ansible/modules/cloud/google/gc_storage.py
@@ -25,7 +25,10 @@ module: gc_storage
version_added: "1.4"
short_description: This module manages objects/buckets in Google Cloud Storage.
description:
- - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for information about setting the default project.
+ - This module allows users to manage their objects/buckets in Google Cloud Storage. It allows upload and download operations and can set some
+ canned permissions. It also allows retrieval of URLs for objects for use in playbooks, and retrieval of string contents of objects. This module
+ requires setting the default project in GCS prior to playbook usage. See U(https://developers.google.com/storage/docs/reference/v1/apiversion1) for
+ information about setting the default project.
options:
bucket:
@@ -54,7 +57,8 @@ options:
aliases: [ 'overwrite' ]
permission:
description:
- - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private', 'public-read', 'authenticated-read'.
+ - This option let's the user set the canned permissions on the object/bucket that are created. The permissions that can be set are 'private',
+ 'public-read', 'authenticated-read'.
required: false
default: private
headers:
@@ -65,12 +69,14 @@ options:
default: '{}'
expiration:
description:
- - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only available when public-read is the acl for the object.
+ - Time limit (in seconds) for the URL generated and returned by GCA when performing a mode=put or mode=get_url operation. This url is only
+ available when public-read is the acl for the object.
required: false
default: null
mode:
description:
- - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and delete (bucket).
+ - Switches the module behaviour between upload, download, get_url (return download url) , get_str (download object as string), create (bucket) and
+ delete (bucket).
required: true
default: null
choices: [ 'get', 'put', 'get_url', 'get_str', 'delete', 'create' ]
diff --git a/lib/ansible/modules/cloud/google/gce_tag.py b/lib/ansible/modules/cloud/google/gce_tag.py
index 5436235289e..a65dbe81476 100644
--- a/lib/ansible/modules/cloud/google/gce_tag.py
+++ b/lib/ansible/modules/cloud/google/gce_tag.py
@@ -36,7 +36,8 @@ options:
aliases: []
instance_pattern:
description:
- - The pattern of GCE instance names to match for adding/removing tags. Full-Python regex is supported. See U(https://docs.python.org/2/library/re.html) for details.
+ - The pattern of GCE instance names to match for adding/removing tags. Full-Python regex is supported.
+ See U(https://docs.python.org/2/library/re.html) for details.
If instance_name is not specified, this field is required.
required: false
default: null
diff --git a/lib/ansible/modules/cloud/google/gcpubsub.py b/lib/ansible/modules/cloud/google/gcpubsub.py
index e1c515ee927..41e0a787361 100644
--- a/lib/ansible/modules/cloud/google/gcpubsub.py
+++ b/lib/ansible/modules/cloud/google/gcpubsub.py
@@ -44,7 +44,9 @@ options:
required: True
subscription:
description:
- - Dictionary containing a subscripton name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull. For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields. See subfields name, push_endpoint and ack_deadline for more information.
+ - Dictionary containing a subscripton name associated with a topic (required), along with optional ack_deadline, push_endpoint and pull.
+ For pulling from a subscription, message_ack (bool), max_messages (int) and return_immediate are available as subfields.
+ See subfields name, push_endpoint and ack_deadline for more information.
required: False
name:
description: Subfield of subscription. Required if subscription is specified. See examples.
@@ -53,15 +55,25 @@ options:
description: Subfield of subscription. Not required. Default deadline for subscriptions to ACK the message before it is resent. See examples.
required: False
pull:
- description: Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the provided subscription name. max_messages (int; default None; max number of messages to pull), message_ack (bool; default False; acknowledge the message) and return_immediately (bool; default True, don't wait for messages to appear). If the messages are acknowledged, changed is set to True, otherwise, changed is False.
+ description:
+ - Subfield of subscription. Not required. If specified, messages will be retrieved from topic via the provided subscription name.
+ max_messages (int; default None; max number of messages to pull), message_ack (bool; default False; acknowledge the message) and return_immediately
+ (bool; default True, don't wait for messages to appear). If the messages are acknowledged, changed is set to True, otherwise, changed is False.
push_endpoint:
- description: Subfield of subscription. Not required. If specified, message will be sent to an endpoint. See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
+ description:
+ - Subfield of subscription. Not required. If specified, message will be sent to an endpoint.
+ See U(https://cloud.google.com/pubsub/docs/advanced#push_endpoints) for more information.
required: False
publish:
- description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format. Only message is required.
+ description:
+ - List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
required: False
state:
- description: State of the topic or queue (absent, present). Applies to the most granular resource. Remove the most granular resource. If subcription is specified we remove it. If only topic is specified, that is what is removed. Note that a topic can be removed without first removing the subscription.
+ description:
+ - State of the topic or queue (absent, present). Applies to the most granular resource. Remove the most granular resource. If subcription is
+ specified we remove it. If only topic is specified, that is what is removed. Note that a topic can be removed without first removing the
+ subscription.
required: False
default: "present"
'''
@@ -144,7 +156,8 @@ gcpubsub:
RETURN = '''
publish:
- description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format. Only message is required.
+ description: List of dictionaries describing messages and attributes to be published. Dictionary is in message(str):attributes(dict) format.
+ Only message is required.
returned: Only when specified
type: list of dictionary
sample: "publish: ['message': 'my message', attributes: {'key1': 'value1'}]"
diff --git a/lib/ansible/modules/cloud/misc/ovirt.py b/lib/ansible/modules/cloud/misc/ovirt.py
index 0b0cc6cb63d..6faa0e3eac4 100644
--- a/lib/ansible/modules/cloud/misc/ovirt.py
+++ b/lib/ansible/modules/cloud/misc/ovirt.py
@@ -292,7 +292,9 @@ def conn(url, user, password):
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
if vmdisk_alloc == 'thin':
# define VM params
- vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
+ template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
+ cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
# define disk params
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow',
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
@@ -301,10 +303,12 @@ def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork,
nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
elif vmdisk_alloc == 'preallocated':
# define VM params
- vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
+ vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), os=params.OperatingSystem(type_=vmos),
+ template=conn.templates.get(name="Blank"), memory=1024 * 1024 * int(vmmem),
+ cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
# define disk params
- vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw',
- storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
+ vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System",
+ format='raw', storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
# define network parameters
network_net = params.Network(name=vmnetwork)
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
diff --git a/lib/ansible/modules/cloud/misc/proxmox_kvm.py b/lib/ansible/modules/cloud/misc/proxmox_kvm.py
index 83a0b5234e0..35c83932e1a 100644
--- a/lib/ansible/modules/cloud/misc/proxmox_kvm.py
+++ b/lib/ansible/modules/cloud/misc/proxmox_kvm.py
@@ -762,7 +762,10 @@ def get_vminfo(module, proxmox, node, vmid, **kwargs):
k = vm[k]
k = re.search('=(.*?),', k).group(1)
mac[interface] = k
- if re.match(r'virtio[0-9]', k) is not None or re.match(r'ide[0-9]', k) is not None or re.match(r'scsi[0-9]', k) is not None or re.match(r'sata[0-9]', k) is not None:
+ if (re.match(r'virtio[0-9]', k) is not None or
+ re.match(r'ide[0-9]', k) is not None or
+ re.match(r'scsi[0-9]', k) is not None or
+ re.match(r'sata[0-9]', k) is not None):
device = k
k = vm[k]
k = re.search('(.*?),', k).group(1)
diff --git a/lib/ansible/modules/cloud/misc/serverless.py b/lib/ansible/modules/cloud/misc/serverless.py
index 98c1b239f92..842ec39462f 100644
--- a/lib/ansible/modules/cloud/misc/serverless.py
+++ b/lib/ansible/modules/cloud/misc/serverless.py
@@ -54,7 +54,8 @@ options:
default: us-east-1
deploy:
description:
- - Whether or not to deploy artifacts after building them. When this option is `false` all the functions will be built, but no stack update will be run to send them out. This is mostly useful for generating artifacts to be stored/deployed elsewhere.
+ - Whether or not to deploy artifacts after building them. When this option is `false` all the functions will be built, but no stack update will be
+ run to send them out. This is mostly useful for generating artifacts to be stored/deployed elsewhere.
required: false
default: true
notes:
diff --git a/lib/ansible/modules/cloud/openstack/_nova_compute.py b/lib/ansible/modules/cloud/openstack/_nova_compute.py
index ea51d67a484..fedca58ca52 100644
--- a/lib/ansible/modules/cloud/openstack/_nova_compute.py
+++ b/lib/ansible/modules/cloud/openstack/_nova_compute.py
@@ -80,7 +80,8 @@ options:
version_added: "1.8"
image_exclude:
description:
- - Text to use to filter image names, for the case, such as HP, where there are multiple image names matching the common identifying portions. image_exclude is a negative match filter - it is text that may not exist in the image name. Defaults to "(deprecated)"
+ - Text to use to filter image names, for the case, such as HP, where there are multiple image names matching the common identifying
+ portions. image_exclude is a negative match filter - it is text that may not exist in the image name. Defaults to "(deprecated)"
version_added: "1.8"
flavor_id:
description:
@@ -95,7 +96,8 @@ options:
version_added: "1.8"
flavor_include:
description:
- - Text to use to filter flavor names, for the case, such as Rackspace, where there are multiple flavors that have the same ram count. flavor_include is a positive match filter - it must exist in the flavor name.
+ - Text to use to filter flavor names, for the case, such as Rackspace, where there are multiple flavors that have the same ram count.
+ flavor_include is a positive match filter - it must exist in the flavor name.
version_added: "1.8"
key_name:
description:
@@ -459,7 +461,7 @@ def _create_server(module, nova):
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
# now exit with info
- module.exit_json(changed = True, id = server.id, private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info)
+ module.exit_json(changed=True, id=server.id, private_ip=''.join(private), public_ip=''.join(public), status=server.status, info=server._info)
if server.status == 'ERROR':
module.fail_json(msg = "Error in creating the server, please check logs")
diff --git a/lib/ansible/modules/cloud/packet/packet_device.py b/lib/ansible/modules/cloud/packet/packet_device.py
index 99cb28863a5..741f905b216 100644
--- a/lib/ansible/modules/cloud/packet/packet_device.py
+++ b/lib/ansible/modules/cloud/packet/packet_device.py
@@ -30,7 +30,8 @@ module: packet_device
short_description: create, destroy, start, stop, and reboot a Packet Host machine.
description:
- - create, destroy, update, start, stop, and reboot a Packet Host machine. When the machine is created it can optionally wait for it to have an IP address before returning. This module has a dependency on packet >= 1.0.
+ - create, destroy, update, start, stop, and reboot a Packet Host machine. When the machine is created it can optionally wait for it to have an
+ IP address before returning. This module has a dependency on packet >= 1.0.
- API is documented at U(https://www.packet.net/help/api/#page:devices,header:devices-devices-post).
version_added: 2.3
@@ -157,7 +158,7 @@ EXAMPLES = '''
user_data: |
#cloud-config
ssh_authorized_keys:
- - ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2
+ - {{ lookup('file', 'my_packet_sshkey') }}
coreos:
etcd:
discovery: https://discovery.etcd.io/6a28e078895c5ec737174db2419bb2f3
@@ -207,7 +208,7 @@ devices:
type: array
sample: '[{"hostname": "my-server.com", "id": "server-id", "public-ipv4": "147.229.15.12", "private-ipv4": "10.0.15.12", "public-ipv6": ""2604:1380:2:5200::3"}]'
returned: always
-'''
+''' # NOQA
import os
diff --git a/lib/ansible/modules/cloud/packet/packet_sshkey.py b/lib/ansible/modules/cloud/packet/packet_sshkey.py
index 9c4d0d97f4e..b742596b32c 100644
--- a/lib/ansible/modules/cloud/packet/packet_sshkey.py
+++ b/lib/ansible/modules/cloud/packet/packet_sshkey.py
@@ -69,7 +69,7 @@ EXAMPLES = '''
hosts: localhost
tasks:
packet_sshkey:
- key: ssh-dss AAAAB3NzaC1kc3MAAACBAIfNT5S0ncP4BBJBYNhNPxFF9lqVhfPeu6SM1LoCocxqDc1AT3zFRi8hjIf6TLZ2AA4FYbcAWxLMhiBxZRVldT9GdBXile78kAK5z3bKTwq152DCqpxwwbaTIggLFhsU8wrfBsPWnDuAxZ0h7mmrCjoLIE3CNLDA/NmV3iB8xMThAAAAFQCStcesSgR1adPORzBxTr7hug92LwAAAIBOProm3Gk+HWedLyE8IfofLaOeRnbBRHAOL4z0SexKkVOnQ/LGN/uDIIPGGBDYTvXgKZT+jbHeulRJ2jKgfSpGKN4JxFQ8uzVH492jEiiUJtT72Ss1dCV4PmyERVIw+f54itihV3z/t25dWgowhb0int8iC/OY3cGodlmYb3wdcQAAAIBuLbB45djZXzUkOTzzcRDIRfhaxo5WipbtEM2B1fuBt2gyrvksPpH/LK6xTjdIIb0CxPu4OCxwJG0aOz5kJoRnOWIXQGhH7VowrJhsqhIc8gN9ErbO5ea8b1L76MNcAotmBDeTUiPw01IJ8MdDxfmcsCslJKgoRKSmQpCwXQtN2g== tomk@hp2
+ key: "{{ lookup('file', 'my_packet_sshkey.pub') }}"
- name: create sshkey from file
hosts: localhost
@@ -104,7 +104,7 @@ sshkeys:
}
]
returned: always
-'''
+''' # NOQA
import os
import uuid
diff --git a/lib/ansible/modules/cloud/profitbricks/profitbricks.py b/lib/ansible/modules/cloud/profitbricks/profitbricks.py
index 1882b9e788e..5104bfbacd3 100644
--- a/lib/ansible/modules/cloud/profitbricks/profitbricks.py
+++ b/lib/ansible/modules/cloud/profitbricks/profitbricks.py
@@ -24,7 +24,8 @@ DOCUMENTATION = '''
module: profitbricks
short_description: Create, destroy, start, stop, and reboot a ProfitBricks virtual machine.
description:
- - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
+ - Create, destroy, update, start, stop, and reboot a ProfitBricks virtual machine. When the virtual machine is created it can optionally wait
+ for it to be 'running' before returning. This module has a dependency on profitbricks >= 1.0.0
version_added: "2.0"
options:
auto_increment:
diff --git a/lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py b/lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py
index 14646ee443d..8d6c696dc6e 100644
--- a/lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py
+++ b/lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py
@@ -24,7 +24,8 @@ DOCUMENTATION = '''
module: profitbricks_datacenter
short_description: Create or destroy a ProfitBricks Virtual Datacenter.
description:
- - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency on profitbricks >= 1.0.0
+ - This is a simple module that supports creating or removing vDCs. A vDC is required before you can create servers. This module has a dependency
+ on profitbricks >= 1.0.0
version_added: "2.0"
options:
name:
diff --git a/lib/ansible/modules/cloud/softlayer/sl_vm.py b/lib/ansible/modules/cloud/softlayer/sl_vm.py
index 4a2c38b3e03..5f9edcf7ffb 100644
--- a/lib/ansible/modules/cloud/softlayer/sl_vm.py
+++ b/lib/ansible/modules/cloud/softlayer/sl_vm.py
@@ -240,13 +240,14 @@ import time
#TODO: get this info from API
STATES = ['present', 'absent']
-DATACENTERS = ['ams01','ams03','che01','dal01','dal05','dal06','dal09','dal10','fra02','hkg02','hou02','lon02','mel01','mex01','mil01','mon01','osl01','par01','sjc01','sjc03','sao01','sea01','sng01','syd01','tok02','tor01','wdc01','wdc04']
-CPU_SIZES = [1,2,4,8,16,32,56]
-MEMORY_SIZES = [1024,2048,4096,6144,8192,12288,16384,32768,49152,65536,131072,247808]
-INITIALDISK_SIZES = [25,100]
-LOCALDISK_SIZES = [25,100,150,200,300]
-SANDISK_SIZES = [10,20,25,30,40,50,75,100,125,150,175,200,250,300,350,400,500,750,1000,1500,2000]
-NIC_SPEEDS = [10,100,1000]
+DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'fra02', 'hkg02', 'hou02', 'lon02', 'mel01', 'mex01', 'mil01', 'mon01',
+ 'osl01', 'par01', 'sjc01', 'sjc03', 'sao01', 'sea01', 'sng01', 'syd01', 'tok02', 'tor01', 'wdc01', 'wdc04']
+CPU_SIZES = [1, 2, 4, 8, 16, 32, 56]
+MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
+INITIALDISK_SIZES = [25, 100]
+LOCALDISK_SIZES = [25, 100, 150, 200, 300]
+SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000]
+NIC_SPEEDS = [10, 100, 1000]
try:
import SoftLayer
diff --git a/lib/ansible/modules/cloud/vmware/vmware_guest.py b/lib/ansible/modules/cloud/vmware/vmware_guest.py
index 48129f1f350..0bd8541e32a 100644
--- a/lib/ansible/modules/cloud/vmware/vmware_guest.py
+++ b/lib/ansible/modules/cloud/vmware/vmware_guest.py
@@ -82,7 +82,9 @@ options:
description:
- "Set the guest ID (Debian, RHEL, Windows...)"
- "This field is required when creating a VM"
- - "Valid values are referenced here: https://www.vmware.com/support/developer/converter-sdk/conv55_apireference/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html"
+ - >
+ Valid values are referenced here:
+ https://www.vmware.com/support/developer/converter-sdk/conv55_apireference/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html
version_added: "2.3"
disk:
description:
@@ -675,7 +677,9 @@ class PyVmomiHelper(object):
# VDS switch
pg_obj = get_obj(self.content, [vim.dvs.DistributedVirtualPortgroup], network_devices[key]['name'])
- if nic.device.backing and ( nic.device.backing.port.portgroupKey != pg_obj.key or nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid ):
+ if (nic.device.backing and
+ (nic.device.backing.port.portgroupKey != pg_obj.key or
+ nic.device.backing.port.switchUuid != pg_obj.config.distributedVirtualSwitch.uuid)):
nic_change_detected = True
dvs_port_connection = vim.dvs.PortConnection()
@@ -792,7 +796,8 @@ class PyVmomiHelper(object):
if 'joindomain' in self.params['customization']:
if 'domainadmin' not in self.params['customization'] or 'domainadminpassword' not in self.params['customization']:
- self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use joindomain feature")
+ self.module.fail_json(msg="'domainadmin' and 'domainadminpassword' entries are mandatory in 'customization' section to use "
+ "joindomain feature")
ident.identification.domainAdmin = str(self.params['customization'].get('domainadmin'))
ident.identification.joinDomain = str(self.params['customization'].get('joindomain'))
diff --git a/lib/ansible/modules/cloud/vmware/vsphere_guest.py b/lib/ansible/modules/cloud/vmware/vsphere_guest.py
index 40758e3f269..27bf20d9b19 100644
--- a/lib/ansible/modules/cloud/vmware/vsphere_guest.py
+++ b/lib/ansible/modules/cloud/vmware/vsphere_guest.py
@@ -75,18 +75,21 @@ options:
default: None
esxi:
description:
- - Dictionary which includes datacenter and hostname on which the VM should be created. For standalone ESXi hosts, ha-datacenter should be used as the datacenter name
+ - Dictionary which includes datacenter and hostname on which the VM should be created. For standalone ESXi hosts, ha-datacenter should be used as the
+ datacenter name
required: false
default: null
state:
description:
- - Indicate desired state of the vm. 'reconfigured' only applies changes to 'vm_cdrom', 'memory_mb', and 'num_cpus' in vm_hardware parameter. The 'memory_mb' and 'num_cpus' changes are applied to powered-on vms when hot-plugging is enabled for the guest.
+ - Indicate desired state of the vm. 'reconfigured' only applies changes to 'vm_cdrom', 'memory_mb', and 'num_cpus' in vm_hardware parameter.
+ The 'memory_mb' and 'num_cpus' changes are applied to powered-on vms when hot-plugging is enabled for the guest.
default: present
choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured']
from_template:
version_added: "1.9"
description:
- - Specifies if the VM should be deployed from a template (mutually exclusive with 'state' parameter). No guest customization changes to hardware such as CPU, RAM, NICs or Disks can be applied when launching from template.
+ - Specifies if the VM should be deployed from a template (mutually exclusive with 'state' parameter). No guest customization changes to hardware
+ such as CPU, RAM, NICs or Disks can be applied when launching from template.
default: no
choices: ['yes', 'no']
template_src:
@@ -128,7 +131,8 @@ options:
default: null
vm_hw_version:
description:
- - Desired hardware version identifier (for example, "vmx-08" for vms that needs to be managed with vSphere Client). Note that changing hardware version of existing vm is not supported.
+ - Desired hardware version identifier (for example, "vmx-08" for vms that needs to be managed with vSphere Client). Note that changing hardware
+ version of existing vm is not supported.
required: false
default: null
version_added: "1.7"
@@ -1780,7 +1784,8 @@ def main():
# CONNECT TO THE SERVER
viserver = VIServer()
if validate_certs and not hasattr(ssl, 'SSLContext') and not vcenter_hostname.startswith('http://'):
- module.fail_json(msg='pysphere does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+ module.fail_json(msg='pysphere does not support verifying certificates with python < 2.7.9. Either update python or set '
+ 'validate_certs=False on the task')
try:
viserver.connect(vcenter_hostname, username, password)
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_app.py b/lib/ansible/modules/cloud/webfaction/webfaction_app.py
index abeb01cab5b..e54327aecd2 100644
--- a/lib/ansible/modules/cloud/webfaction/webfaction_app.py
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_app.py
@@ -41,7 +41,10 @@ description:
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API `_ for more info.
options:
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_db.py b/lib/ansible/modules/cloud/webfaction/webfaction_db.py
index f13b9ec04e8..cf0a64a590e 100644
--- a/lib/ansible/modules/cloud/webfaction/webfaction_db.py
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_db.py
@@ -38,7 +38,10 @@ description:
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API `_ for more info.
options:
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_domain.py b/lib/ansible/modules/cloud/webfaction/webfaction_domain.py
index 3ad3f4d65fd..8329da6ee1b 100644
--- a/lib/ansible/modules/cloud/webfaction/webfaction_domain.py
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_domain.py
@@ -36,8 +36,12 @@ description:
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted. If you don't specify subdomains, the domain will be deleted.
- - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - If you are I(deleting) domains by using C(state=absent), then note that if you specify subdomains, just those particular subdomains will be deleted.
+ If you don't specify subdomains, the domain will be deleted.
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API `_ for more info.
options:
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_mailbox.py b/lib/ansible/modules/cloud/webfaction/webfaction_mailbox.py
index 08c523a1409..e9c1656da8b 100644
--- a/lib/ansible/modules/cloud/webfaction/webfaction_mailbox.py
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_mailbox.py
@@ -35,7 +35,10 @@ description:
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API `_ for more info.
options:
diff --git a/lib/ansible/modules/cloud/webfaction/webfaction_site.py b/lib/ansible/modules/cloud/webfaction/webfaction_site.py
index 505034684f2..75a0944da3d 100644
--- a/lib/ansible/modules/cloud/webfaction/webfaction_site.py
+++ b/lib/ansible/modules/cloud/webfaction/webfaction_site.py
@@ -36,9 +36,13 @@ description:
author: Quentin Stafford-Fraser (@quentinsf)
version_added: "2.0"
notes:
- - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP address - you can use a DNS name.
+ - Sadly, you I(do) need to know your webfaction hostname for the C(host) parameter. But at least, unlike the API, you don't need to know the IP
+ address. You can use a DNS name.
- If a site of the same name exists in the account but on a different host, the operation will exit.
- - "You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API - the location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as your host, you may want to add C(serial: 1) to the plays."
+ - >
+ You can run playbooks that use this on a local machine, or on a Webfaction host, or elsewhere, since the scripts use the remote webfaction API.
+ The location is not important. However, running them on multiple hosts I(simultaneously) is best avoided. If you don't specify I(localhost) as
+ your host, you may want to add C(serial: 1) to the plays.
- See `the webfaction API `_ for more info.
options:
diff --git a/lib/ansible/modules/clustering/consul.py b/lib/ansible/modules/clustering/consul.py
index 8a61757be9d..1f024cdf531 100644
--- a/lib/ansible/modules/clustering/consul.py
+++ b/lib/ansible/modules/clustering/consul.py
@@ -393,7 +393,8 @@ def parse_service(module):
)
elif module.params.get('service_name') and not module.params.get('service_port'):
- module.fail_json( msg="service_name supplied but no service_port, a port is required to configure a service. Did you configure the 'port' argument meaning 'service_port'?")
+ module.fail_json(msg="service_name supplied but no service_port, a port is required to configure a service. Did you configure "
+ "the 'port' argument meaning 'service_port'?")
class ConsulService():
diff --git a/lib/ansible/modules/commands/script.py b/lib/ansible/modules/commands/script.py
index 84b3d0832cc..fc69a89e9e2 100644
--- a/lib/ansible/modules/commands/script.py
+++ b/lib/ansible/modules/commands/script.py
@@ -51,7 +51,8 @@ options:
version_added: "1.5"
notes:
- It is usually preferable to write Ansible modules than pushing scripts. Convert your script to an Ansible module for bonus points!
- - The ssh connection plugin will force psuedo-tty allocation via -tt when scripts are executed. psuedo-ttys do not have a stderr channel and all stderr is sent to stdout. If you depend on separated stdout and stderr result keys, please switch to a copy+command set of tasks instead of using script.
+ - The ssh connection plugin will force psuedo-tty allocation via -tt when scripts are executed. psuedo-ttys do not have a stderr channel and all
+ stderr is sent to stdout. If you depend on separated stdout and stderr result keys, please switch to a copy+command set of tasks instead of using script.
author:
- Ansible Core Team
- Michael DeHaan
diff --git a/lib/ansible/modules/database/influxdb/influxdb_retention_policy.py b/lib/ansible/modules/database/influxdb/influxdb_retention_policy.py
index ea069b93d57..847cf83379b 100644
--- a/lib/ansible/modules/database/influxdb/influxdb_retention_policy.py
+++ b/lib/ansible/modules/database/influxdb/influxdb_retention_policy.py
@@ -202,7 +202,9 @@ def alter_retention_policy(module, client, retention_policy):
elif duration == 'INF':
influxdb_duration_format = '0'
- if not retention_policy['duration'] == influxdb_duration_format or not retention_policy['replicaN'] == int(replication) or not retention_policy['default'] == default:
+ if (not retention_policy['duration'] == influxdb_duration_format or
+ not retention_policy['replicaN'] == int(replication) or
+ not retention_policy['default'] == default):
if not module.check_mode:
try:
client.alter_retention_policy(policy_name, database_name, duration, replication, default)
diff --git a/lib/ansible/modules/database/mongodb/mongodb_user.py b/lib/ansible/modules/database/mongodb/mongodb_user.py
index c8b2a7e4d1b..76e162d6d9f 100644
--- a/lib/ansible/modules/database/mongodb/mongodb_user.py
+++ b/lib/ansible/modules/database/mongodb/mongodb_user.py
@@ -94,7 +94,10 @@ options:
roles:
version_added: "1.3"
description:
- - "The database user roles valid values could either be one or more of the following strings: 'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'"
+ - >
+ The database user roles valid values could either be one or more of the following strings:
+ 'read', 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase',
+ 'dbAdminAnyDatabase'
- "Or the following dictionary '{ db: DATABASE_NAME, role: ROLE_NAME }'."
- "This param requires pymongo 2.5+. If it is a string, mongodb 2.4+ is also required. If it is a dictionary, mongo 2.6+ is required."
required: false
diff --git a/lib/ansible/modules/database/mssql/mssql_db.py b/lib/ansible/modules/database/mssql/mssql_db.py
index 5a5d0bc80c0..6929b7b6c3a 100644
--- a/lib/ansible/modules/database/mssql/mssql_db.py
+++ b/lib/ansible/modules/database/mssql/mssql_db.py
@@ -71,7 +71,8 @@ options:
required: false
autocommit:
description:
- - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed within a transaction.
+ - Automatically commit the change only if the import succeed. Sometimes it is necessary to use autocommit=true, since some content can't be changed
+ within a transaction.
required: false
default: false
choices: [ "false", "true" ]
@@ -198,7 +199,8 @@ def main():
errno, errstr = e.args
module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
else:
- module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your @sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
+ module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check your "
+ "@sysconfdir@/freetds.conf / ${HOME}/.freetds.conf")
conn.autocommit(True)
changed = False
diff --git a/lib/ansible/modules/database/mysql/mysql_db.py b/lib/ansible/modules/database/mysql/mysql_db.py
index 6580cfd771f..4a7a71db51c 100644
--- a/lib/ansible/modules/database/mysql/mysql_db.py
+++ b/lib/ansible/modules/database/mysql/mysql_db.py
@@ -137,7 +137,8 @@ def db_delete(cursor, db):
cursor.execute(query)
return True
-def db_dump(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None, single_transaction=None, quick=None):
+def db_dump(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None,
+ single_transaction=None, quick=None):
cmd = module.get_bin_path('mysqldump', True)
# If defined, mysqldump demands --defaults-extra-file be the first option
if config_file:
@@ -312,7 +313,8 @@ def main():
except Exception:
e = get_exception()
if os.path.exists(config_file):
- module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e))
+ module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
+ "Exception message: %s" % (config_file, e))
else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e))
diff --git a/lib/ansible/modules/database/mysql/mysql_replication.py b/lib/ansible/modules/database/mysql/mysql_replication.py
index 112745ed6cd..07106c698be 100644
--- a/lib/ansible/modules/database/mysql/mysql_replication.py
+++ b/lib/ansible/modules/database/mysql/mysql_replication.py
@@ -39,7 +39,8 @@ author: "Balazs Pocze (@banyek)"
options:
mode:
description:
- - module operating mode. Could be getslave (SHOW SLAVE STATUS), getmaster (SHOW MASTER STATUS), changemaster (CHANGE MASTER TO), startslave (START SLAVE), stopslave (STOP SLAVE), resetslave (RESET SLAVE), resetslaveall (RESET SLAVE ALL)
+ - module operating mode. Could be getslave (SHOW SLAVE STATUS), getmaster (SHOW MASTER STATUS), changemaster (CHANGE MASTER TO), startslave
+ (START SLAVE), stopslave (STOP SLAVE), resetslave (RESET SLAVE), resetslaveall (RESET SLAVE ALL)
required: False
choices:
- getslave
@@ -267,7 +268,8 @@ def main():
except Exception:
e = get_exception()
if os.path.exists(config_file):
- module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e))
+ module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
+ "Exception message: %s" % (config_file, e))
else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e))
diff --git a/lib/ansible/modules/database/mysql/mysql_user.py b/lib/ansible/modules/database/mysql/mysql_user.py
index b65f759491f..1f278612386 100644
--- a/lib/ansible/modules/database/mysql/mysql_user.py
+++ b/lib/ansible/modules/database/mysql/mysql_user.py
@@ -597,7 +597,8 @@ def main():
connect_timeout=connect_timeout)
except Exception:
e = get_exception()
- module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e))
+ module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
+ "Exception message: %s" % (config_file, e))
if not sql_log_bin:
cursor.execute("SET SQL_LOG_BIN=0;")
diff --git a/lib/ansible/modules/database/mysql/mysql_variables.py b/lib/ansible/modules/database/mysql/mysql_variables.py
index 7e4ad3aeaf9..d0b6fe3e6bf 100644
--- a/lib/ansible/modules/database/mysql/mysql_variables.py
+++ b/lib/ansible/modules/database/mysql/mysql_variables.py
@@ -165,7 +165,8 @@ def main():
except Exception:
e = get_exception()
if os.path.exists(config_file):
- module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e))
+ module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. "
+ "Exception message: %s" % (config_file, e))
else:
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e))
diff --git a/lib/ansible/modules/database/postgresql/postgresql_db.py b/lib/ansible/modules/database/postgresql/postgresql_db.py
index 864bd8a5c56..844bc4cceb3 100644
--- a/lib/ansible/modules/database/postgresql/postgresql_db.py
+++ b/lib/ansible/modules/database/postgresql/postgresql_db.py
@@ -56,7 +56,8 @@ options:
default: null
lc_ctype:
description:
- - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0) is used as template.
+ - Character classification (LC_CTYPE) to use in the database (e.g. lower, upper, ...) Must match LC_CTYPE of template database unless C(template0)
+ is used as template.
required: false
default: null
state:
@@ -276,7 +277,8 @@ def main():
except TypeError:
e = get_exception()
if 'sslrootcert' in e.args[0]:
- module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(e), exception=traceback.format_exc())
+ module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert. Exception: {0}'.format(e),
+ exception=traceback.format_exc())
module.fail_json(msg="unable to connect to database: %s" % e, exception=traceback.format_exc())
except Exception:
diff --git a/lib/ansible/modules/database/postgresql/postgresql_ext.py b/lib/ansible/modules/database/postgresql/postgresql_ext.py
index 02aac5f3ce8..e9e9844a4c1 100644
--- a/lib/ansible/modules/database/postgresql/postgresql_ext.py
+++ b/lib/ansible/modules/database/postgresql/postgresql_ext.py
@@ -68,7 +68,9 @@ options:
notes:
- The default authentication assumes that you are either logging in as or sudo'ing to the C(postgres) account on the host.
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
- the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
+ the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed
+ on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using
+ this module.
requirements: [ psycopg2 ]
author: "Daniel Schep (@dschep)"
'''
diff --git a/lib/ansible/modules/database/postgresql/postgresql_privs.py b/lib/ansible/modules/database/postgresql/postgresql_privs.py
index da0eaeb1df6..5b152193f56 100644
--- a/lib/ansible/modules/database/postgresql/postgresql_privs.py
+++ b/lib/ansible/modules/database/postgresql/postgresql_privs.py
@@ -132,7 +132,8 @@ options:
version_added: '2.3'
ssl_rootcert:
description:
- - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). If the file exists, the server's certificate will be
+ verified to be signed by one of these authorities.
required: false
default: null
version_added: '2.3'
diff --git a/lib/ansible/modules/database/postgresql/postgresql_schema.py b/lib/ansible/modules/database/postgresql/postgresql_schema.py
index 9ef2139c844..499e704e5ab 100644
--- a/lib/ansible/modules/database/postgresql/postgresql_schema.py
+++ b/lib/ansible/modules/database/postgresql/postgresql_schema.py
@@ -77,7 +77,9 @@ options:
choices: [ "present", "absent" ]
notes:
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
- the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
+ the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed
+ on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before
+ using this module.
requirements: [ psycopg2 ]
author: "Flavien Chantelot "
'''
diff --git a/lib/ansible/modules/database/postgresql/postgresql_user.py b/lib/ansible/modules/database/postgresql/postgresql_user.py
index 016297f1b77..9732c8bd38a 100644
--- a/lib/ansible/modules/database/postgresql/postgresql_user.py
+++ b/lib/ansible/modules/database/postgresql/postgresql_user.py
@@ -49,7 +49,11 @@ options:
password:
description:
- set the user's password, before 1.4 this was required.
- - "When passing an encrypted password, the encrypted parameter must also be true, and it must be generated with the format C('str[\\"md5\\"] + md5[ password + username ]'), resulting in a total of 35 characters. An easy way to do this is: C(echo \\"md5`echo -n \\"verysecretpasswordJOE\\" | md5`\\"). Note that if encrypted is set, the stored password will be hashed whether or not it is pre-encrypted."
+ - >
+ When passing an encrypted password, the encrypted parameter must also be true, and it must be generated with the format
+ C('str[\\"md5\\"] + md5[ password + username ]'), resulting in a total of 35 characters. An easy way to do this is:
+ C(echo \\"md5`echo -n \\"verysecretpasswordJOE\\" | md5`\\"). Note that if encrypted is set, the stored password will be hashed whether or not
+ it is pre-encrypted.
required: false
default: null
db:
@@ -108,7 +112,8 @@ options:
choices: [ "present", "absent" ]
encrypted:
description:
- - whether the password is stored hashed in the database. boolean. Passwords can be passed already hashed or unhashed, and postgresql ensures the stored password is hashed when encrypted is set.
+ - whether the password is stored hashed in the database. boolean. Passwords can be passed already hashed or unhashed, and postgresql ensures the
+ stored password is hashed when encrypted is set.
required: false
default: false
version_added: '1.4'
@@ -120,7 +125,8 @@ options:
version_added: '1.4'
no_password_changes:
description:
- - if C(yes), don't inspect database for password changes. Effective when C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make password changes as necessary.
+ - if C(yes), don't inspect database for password changes. Effective when C(pg_authid) is not accessible (such as AWS RDS). Otherwise, make
+ password changes as necessary.
required: false
default: 'no'
choices: [ "yes", "no" ]
@@ -136,7 +142,8 @@ options:
version_added: '2.3'
ssl_rootcert:
description:
- - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). If the file exists, the server's certificate will be verified to be signed by one of these authorities.
+ - Specifies the name of a file containing SSL certificate authority (CA) certificate(s). If the file exists, the server's certificate will be
+ verified to be signed by one of these authorities.
required: false
default: null
version_added: '2.3'
diff --git a/lib/ansible/modules/files/acl.py b/lib/ansible/modules/files/acl.py
index 40e4e7639b5..d7331e871c4 100644
--- a/lib/ansible/modules/files/acl.py
+++ b/lib/ansible/modules/files/acl.py
@@ -55,7 +55,8 @@ options:
default: no
choices: [ 'yes', 'no' ]
description:
- - if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if path is a file.
+ - if the target is a directory, setting this to yes will make it the default acl for entities created inside the directory. It causes an error if
+ path is a file.
entity:
version_added: "1.5"
@@ -82,7 +83,9 @@ options:
required: false
default: null
description:
- - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '::'. The qualifier may be empty for some types, but the type and perms are always required. '-' can be used as placeholder when you do not care about permissions. This is now superseded by entity, type and permissions fields.
+ - DEPRECATED. The acl to set or remove. This must always be quoted in the form of '::'. The qualifier may be empty for
+ some types, but the type and perms are always required. '-' can be used as placeholder when you do not care about permissions. This is now
+ superseded by entity, type and permissions fields.
recursive:
version_added: "2.0"
diff --git a/lib/ansible/modules/files/archive.py b/lib/ansible/modules/files/archive.py
index 50c9fa21c6b..241c5ea33d3 100644
--- a/lib/ansible/modules/files/archive.py
+++ b/lib/ansible/modules/files/archive.py
@@ -31,7 +31,8 @@ version_added: 2.3
short_description: Creates a compressed archive of one or more files or trees.
extends_documentation_fragment: files
description:
- - Packs an archive. It is the opposite of M(unarchive). By default, it assumes the compression source exists on the target. It will not copy the source file from the local system to the target before archiving. Source files can be deleted after archival by specifying I(remove=True).
+ - Packs an archive. It is the opposite of M(unarchive). By default, it assumes the compression source exists on the target. It will not copy the
+ source file from the local system to the target before archiving. Source files can be deleted after archival by specifying I(remove=True).
options:
path:
description:
@@ -44,7 +45,8 @@ options:
default: 'gz'
dest:
description:
- - The file name of the destination archive. This is required when C(path) refers to multiple files by either specifying a glob, a directory or multiple paths in a list.
+ - The file name of the destination archive. This is required when C(path) refers to multiple files by either specifying a glob, a directory or
+ multiple paths in a list.
required: false
default: null
remove:
diff --git a/lib/ansible/modules/files/copy.py b/lib/ansible/modules/files/copy.py
index d3fb168da01..b9f92484680 100644
--- a/lib/ansible/modules/files/copy.py
+++ b/lib/ansible/modules/files/copy.py
@@ -29,7 +29,8 @@ module: copy
version_added: "historical"
short_description: Copies files to remote locations.
description:
- - The C(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box. If you need variable interpolation in copied files, use the M(template) module.
+ - The C(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box.
+ If you need variable interpolation in copied files, use the M(template) module.
options:
src:
description:
diff --git a/lib/ansible/modules/files/synchronize.py b/lib/ansible/modules/files/synchronize.py
index 045c722314a..c8cc941895e 100644
--- a/lib/ansible/modules/files/synchronize.py
+++ b/lib/ansible/modules/files/synchronize.py
@@ -27,7 +27,10 @@ module: synchronize
version_added: "1.4"
short_description: A wrapper around rsync to make common tasks in your playbooks quick and easy.
description:
- - C(synchronize) is a wrapper around rsync to make common tasks in your playbooks quick and easy. It is run and originates on the local host where Ansible is being run. Of course, you could just use the C(command) action to call rsync yourself, but you also have to add a fair number of boilerplate options and host facts. C(synchronize) is not intended to provide access to the full power of rsync, but does make the most common invocations easier to implement. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case.
+ - C(synchronize) is a wrapper around rsync to make common tasks in your playbooks quick and easy. It is run and originates on the local host where
+ Ansible is being run. Of course, you could just use the C(command) action to call rsync yourself, but you also have to add a fair number of
+ boilerplate options and host facts. C(synchronize) is not intended to provide access to the full power of rsync, but does make the most common
+ invocations easier to implement. You `still` may need to call rsync directly via C(command) or C(shell) depending on your use case.
options:
src:
description:
@@ -44,7 +47,8 @@ options:
version_added: "1.5"
mode:
description:
- - Specify the direction of the synchronization. In push mode the localhost or delegate is the source; In pull mode the remote host in context is the source.
+ - Specify the direction of the synchronization. In push mode the localhost or delegate is the source; In pull mode the remote host in context
+ is the source.
required: false
choices: [ 'push', 'pull' ]
default: 'push'
@@ -56,7 +60,8 @@ options:
required: false
checksum:
description:
- - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will not disable it.
+ - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will
+ not disable it.
choices: [ 'yes', 'no' ]
default: 'no'
required: false
@@ -171,13 +176,20 @@ options:
version_added: "2.0"
notes:
- rsync must be installed on both the local and remote host.
- - For the C(synchronize) module, the "local host" is the host `the synchronize task originates on`, and the "destination host" is the host `synchronize is connecting to`.
- - The "local host" can be changed to a different host by using `delegate_to`. This enables copying between two remote hosts or entirely on one remote machine.
- - "The user and permissions for the synchronize `src` are those of the user running the Ansible task on the local host (or the remote_user for a delegate_to host when delegate_to is used)."
+ - For the C(synchronize) module, the "local host" is the host `the synchronize task originates on`, and the "destination host" is the host
+ `synchronize is connecting to`.
+ - The "local host" can be changed to a different host by using `delegate_to`. This enables copying between two remote hosts or entirely on one
+ remote machine.
+ - >
+ The user and permissions for the synchronize `src` are those of the user running the Ansible task on the local host (or the remote_user for a
+ delegate_to host when delegate_to is used).
- The user and permissions for the synchronize `dest` are those of the `remote_user` on the destination host or the `become_user` if `become=yes` is active.
- In 2.0.0.0 a bug in the synchronize module made become occur on the "local host". This was fixed in 2.0.1.
- - Currently, synchronize is limited to elevating permissions via passwordless sudo. This is because rsync itself is connecting to the remote machine and rsync doesn't give us a way to pass sudo credentials in.
- - Currently there are only a few connection types which support synchronize (ssh, paramiko, local, and docker) because a sync strategy has been determined for those connection types. Note that the connection for these must not need a password as rsync itself is making the connection and rsync does not provide us a way to pass a password to the connection.
+ - Currently, synchronize is limited to elevating permissions via passwordless sudo. This is because rsync itself is connecting to the remote machine
+ and rsync doesn't give us a way to pass sudo credentials in.
+ - Currently there are only a few connection types which support synchronize (ssh, paramiko, local, and docker) because a sync strategy has been
+ determined for those connection types. Note that the connection for these must not need a password as rsync itself is making the connection and
+ rsync does not provide us a way to pass a password to the connection.
- Expect that dest=~/x will be ~/x even if using sudo.
- Inspect the verbose output to validate the destination user/host/path
are what was expected.
@@ -185,7 +197,8 @@ notes:
C(.rsync-filter) files to the source directory.
- rsync daemon must be up and running with correct permission when using
rsync protocol in source or destination path.
- - The C(synchronize) module forces `--delay-updates` to avoid leaving a destination in a broken in-between state if the underlying rsync process encounters an error. Those synchronizing large numbers of files that are willing to trade safety for performance should call rsync directly.
+ - The C(synchronize) module forces `--delay-updates` to avoid leaving a destination in a broken in-between state if the underlying rsync process
+ encounters an error. Those synchronizing large numbers of files that are willing to trade safety for performance should call rsync directly.
author: "Timothy Appnel (@tima)"
'''
diff --git a/lib/ansible/modules/files/tempfile.py b/lib/ansible/modules/files/tempfile.py
index 9426e0d059f..050ee3b192d 100644
--- a/lib/ansible/modules/files/tempfile.py
+++ b/lib/ansible/modules/files/tempfile.py
@@ -31,7 +31,9 @@ author:
- Krzysztof Magosa
short_description: Creates temporary files and directories.
description:
- - The C(tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible you need to use M(file) module.
+ - The C(tempfile) module creates temporary files and directories. C(mktemp) command takes different parameters on various systems, this module helps
+ to avoid troubles related to that. Files/directories created by module are accessible only by creator. In case you need to make them world-accessible
+ you need to use M(file) module.
options:
state:
description:
diff --git a/lib/ansible/modules/files/unarchive.py b/lib/ansible/modules/files/unarchive.py
index 523891c1cbf..a573a5b76fb 100644
--- a/lib/ansible/modules/files/unarchive.py
+++ b/lib/ansible/modules/files/unarchive.py
@@ -33,12 +33,15 @@ version_added: 1.4
short_description: Unpacks an archive after (optionally) copying it from the local machine.
extends_documentation_fragment: files
description:
- - The C(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set remote_src=yes to unpack an archive which already exists on the target..
+ - The C(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking.
+ Set remote_src=yes to unpack an archive which already exists on the target.
options:
src:
description:
- - If remote_src=no (default), local path to archive file to copy to the target server; can be absolute or relative. If remote_src=yes, path on the target server to existing archive file to unpack.
- - If remote_src=yes and src contains ://, the remote machine will download the file from the url first. (version_added 2.0). This is only for simple cases, for full download support look at the M(get_url) module.
+ - If remote_src=no (default), local path to archive file to copy to the target server; can be absolute or relative. If remote_src=yes, path on the
+ target server to existing archive file to unpack.
+ - If remote_src=yes and src contains ://, the remote machine will download the file from the url first. (version_added 2.0). This is only for
+ simple cases, for full download support look at the M(get_url) module.
required: true
default: null
dest:
diff --git a/lib/ansible/modules/messaging/rabbitmq_queue.py b/lib/ansible/modules/messaging/rabbitmq_queue.py
index 5ecb5936775..ef3cbfc6bc6 100644
--- a/lib/ansible/modules/messaging/rabbitmq_queue.py
+++ b/lib/ansible/modules/messaging/rabbitmq_queue.py
@@ -201,12 +201,14 @@ def main():
( 'x-max-length' not in response['arguments'] and module.params['max_length'] is None )
) and
(
- ( 'x-dead-letter-exchange' in response['arguments'] and response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange'] ) or
- ( 'x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None )
+ ('x-dead-letter-exchange' in response['arguments'] and
+ response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange']) or
+ ('x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None)
) and
(
- ( 'x-dead-letter-routing-key' in response['arguments'] and response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key'] ) or
- ( 'x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None )
+ ('x-dead-letter-routing-key' in response['arguments'] and
+ response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key']) or
+ ('x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None)
)
):
module.fail_json(
diff --git a/lib/ansible/modules/monitoring/datadog_monitor.py b/lib/ansible/modules/monitoring/datadog_monitor.py
index eddfd3c7628..9a6a9bb84ab 100644
--- a/lib/ansible/modules/monitoring/datadog_monitor.py
+++ b/lib/ansible/modules/monitoring/datadog_monitor.py
@@ -65,7 +65,9 @@ options:
description: ["The name of the alert."]
required: true
message:
- description: ["A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same '@username' notation as events. Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'."]
+ description:
+ - A message to include with notifications for this monitor. Email notifications can be sent to specific users by using the same
+ '@username' notation as events. Monitor message template variables can be accessed by using double square brackets, i.e '[[' and ']]'.
required: false
default: null
silenced:
@@ -77,7 +79,9 @@ options:
required: false
default: False
no_data_timeframe:
- description: ["The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric alerts or 2 minutes for service checks."]
+ description:
+ - The number of minutes before a monitor will notify when data stops reporting. Must be at least 2x the monitor timeframe for metric
+ alerts or 2 minutes for service checks.
required: false
default: 2x timeframe for metric, 2 minutes for service
timeout_h:
@@ -85,11 +89,15 @@ options:
required: false
default: null
renotify_interval:
- description: ["The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's not resolved."]
+ description:
+ - The number of minutes after the last notification before a monitor will re-notify on the current status. It will only re-notify if it's
+ not resolved.
required: false
default: null
escalation_message:
- description: ["A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval is None"]
+ description:
+ - A message to include with a re-notification. Supports the '@username' notification we allow elsewhere. Not applicable if renotify_interval
+ is None
required: false
default: null
notify_audit:
@@ -97,7 +105,9 @@ options:
required: false
default: False
thresholds:
- description: ["A dictionary of thresholds by status. This option is only available for service checks and metric alerts. Because each of them can have multiple thresholds, we don't define them directly in the query."]
+ description:
+ - A dictionary of thresholds by status. This option is only available for service checks and metric alerts. Because each of them can have
+ multiple thresholds, we don't define them directly in the query."]
required: false
default: {'ok': 1, 'critical': 1, 'warning': 1}
locked:
@@ -106,7 +116,9 @@ options:
default: False
version_added: "2.2"
require_full_window:
- description: ["A boolean indicating whether this monitor needs a full window of data before it's evaluated. We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped."]
+ description:
+ - A boolean indicating whether this monitor needs a full window of data before it's evaluated. We highly recommend you set this to False for
+ sparse metrics, otherwise some evaluations will be skipped.
required: false
default: null
version_added: "2.3"
diff --git a/lib/ansible/modules/monitoring/logicmonitor.py b/lib/ansible/modules/monitoring/logicmonitor.py
index d2f60d20157..8cbf6c7386e 100644
--- a/lib/ansible/modules/monitoring/logicmonitor.py
+++ b/lib/ansible/modules/monitoring/logicmonitor.py
@@ -50,10 +50,14 @@ options:
description:
- The type of LogicMonitor object you wish to manage.
- "Collector: Perform actions on a LogicMonitor collector."
- - NOTE You should use Ansible service modules such as M(service) or M(supervisorctl) for managing the Collector 'logicmonitor-agent' and 'logicmonitor-watchdog' services. Specifically, you'll probably want to start these services after a Collector add and stop these services before a Collector remove.
+ - NOTE You should use Ansible service modules such as M(service) or M(supervisorctl) for managing the Collector 'logicmonitor-agent' and
+ 'logicmonitor-watchdog' services. Specifically, you'll probably want to start these services after a Collector add and stop these services
+ before a Collector remove.
- "Host: Perform actions on a host device."
- "Hostgroup: Perform actions on a LogicMonitor host group."
- - 'NOTE Host and Hostgroup tasks should always be performed via delegate_to: localhost. There are no benefits to running these tasks on the remote host and doing so will typically cause problems.'
+ - >
+ NOTE Host and Hostgroup tasks should always be performed via delegate_to: localhost. There are no benefits to running these tasks on the
+ remote host and doing so will typically cause problems.
required: true
default: null
choices: ['collector', 'host', 'datsource', 'hostgroup']
@@ -86,7 +90,8 @@ options:
description:
- The fully qualified domain name of a collector in your LogicMonitor account.
- This is required for the creation of a LogicMonitor host (target=host action=add).
- - This is required for updating, removing or scheduling downtime for hosts if 'displayname' isn't specified (target=host action=update action=remove action=sdt).
+ - This is required for updating, removing or scheduling downtime for hosts if 'displayname' isn't
+ specified (target=host action=update action=remove action=sdt).
required: false
default: null
hostname:
diff --git a/lib/ansible/modules/monitoring/nagios.py b/lib/ansible/modules/monitoring/nagios.py
index e7236f1f96b..f63660d4aaa 100644
--- a/lib/ansible/modules/monitoring/nagios.py
+++ b/lib/ansible/modules/monitoring/nagios.py
@@ -26,9 +26,13 @@ module: nagios
short_description: Perform common tasks in Nagios related to downtime and notifications.
description:
- "The C(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts."
- - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer to the host the playbook is currently running on.
+ - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer
+ to the host the playbook is currently running on.
- You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet).
- - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself), e.g., C(service=host). This keyword may not be given with other services at the same time. I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all).
+ - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself),
+ e.g., C(service=host). This keyword may not be given with other services at the same time.
+ I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all
+ services on particular host use keyword "all", e.g., C(service=all).
- When using the C(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter.
version_added: "0.7"
options:
diff --git a/lib/ansible/modules/monitoring/pagerduty_alert.py b/lib/ansible/modules/monitoring/pagerduty_alert.py
index d1c446fa1b5..9a0db2fd4d3 100644
--- a/lib/ansible/modules/monitoring/pagerduty_alert.py
+++ b/lib/ansible/modules/monitoring/pagerduty_alert.py
@@ -56,15 +56,20 @@ options:
required: true
desc:
description:
- - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version) will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI. The maximum length is 1024 characters.
+ - For C(triggered) I(state) - Required. Short description of the problem that led to this trigger. This field (or a truncated version)
+ will be used when generating phone calls, SMS messages and alert emails. It will also appear on the incidents tables in the PagerDuty UI.
+ The maximum length is 1024 characters.
- For C(acknowledged) or C(resolved) I(state) - Text that will appear in the incident's log associated with this event.
required: false
default: Created via Ansible
incident_key:
description:
- Identifies the incident to which this I(state) should be applied.
- - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup" problem reports.
- - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
+ - For C(triggered) I(state) - If there's no open (i.e. unresolved) incident with this key, a new one will be created. If there's already an
+ open incident with a matching key, this event will be appended to that incident's log. The event key provides an easy way to "de-dup"
+ problem reports.
+ - For C(acknowledged) or C(resolved) I(state) - This should be the incident_key you received back when the incident was first opened by a
+ trigger event. Acknowledge events referencing resolved or nonexistent incidents will be discarded.
required: false
client:
description:
diff --git a/lib/ansible/modules/monitoring/zabbix_screen.py b/lib/ansible/modules/monitoring/zabbix_screen.py
index 5745ff1f600..6e36dd1bf81 100644
--- a/lib/ansible/modules/monitoring/zabbix_screen.py
+++ b/lib/ansible/modules/monitoring/zabbix_screen.py
@@ -75,7 +75,9 @@ options:
- If the screen(s) already been added, the screen(s) name won't be updated.
- When creating or updating screen(s), C(screen_name), C(host_group) are required.
- When deleting screen(s), the C(screen_name) is required.
- - 'The available states are: C(present) (default) and C(absent). If the screen(s) already exists, and the state is not C(absent), the screen(s) will just be updated as needed.'
+ - >
+ The available states are: C(present) (default) and C(absent). If the screen(s) already exists, and the state is not C(absent), the screen(s)
+ will just be updated as needed.
required: true
notes:
- Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed.
@@ -423,7 +425,8 @@ def main():
changed_screens.append(screen_name)
if created_screens and changed_screens:
- module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens)))
+ module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens),
+ ",".join(changed_screens)))
elif created_screens:
module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens))
elif changed_screens:
diff --git a/lib/ansible/modules/network/a10/a10_server.py b/lib/ansible/modules/network/a10/a10_server.py
index 975d363fcf5..5cd5d1b8cca 100644
--- a/lib/ansible/modules/network/a10/a10_server.py
+++ b/lib/ansible/modules/network/a10/a10_server.py
@@ -264,7 +264,9 @@ def main():
# - in case ports are missing from the ones specified by the user
# - in case ports are missing from those on the device
# - in case we are change the status of a server
- if port_needs_update(defined_ports, slb_server_ports) or port_needs_update(slb_server_ports, defined_ports) or status_needs_update(current_status, axapi_enabled_disabled(slb_server_status)):
+ if (port_needs_update(defined_ports, slb_server_ports) or
+ port_needs_update(slb_server_ports, defined_ports) or
+ status_needs_update(current_status, axapi_enabled_disabled(slb_server_status))):
result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg'])
diff --git a/lib/ansible/modules/network/a10/a10_service_group.py b/lib/ansible/modules/network/a10/a10_service_group.py
index bb5ec996bae..5c38ac2045f 100644
--- a/lib/ansible/modules/network/a10/a10_service_group.py
+++ b/lib/ansible/modules/network/a10/a10_service_group.py
@@ -65,7 +65,18 @@ options:
required: false
default: round-robin
aliases: ['method']
- choices: ['round-robin', 'weighted-rr', 'least-connection', 'weighted-least-connection', 'service-least-connection', 'service-weighted-least-connection', 'fastest-response', 'least-request', 'round-robin-strict', 'src-ip-only-hash', 'src-ip-hash']
+ choices:
+ - 'round-robin'
+ - 'weighted-rr'
+ - 'least-connection'
+ - 'weighted-least-connection'
+ - 'service-least-connection'
+ - 'service-weighted-least-connection'
+ - 'fastest-response'
+ - 'least-request'
+ - 'round-robin-strict'
+ - 'src-ip-only-hash'
+ - 'src-ip-hash'
servers:
description:
- A list of servers to add to the service group. Each list item should be a
diff --git a/lib/ansible/modules/network/basics/slurp.py b/lib/ansible/modules/network/basics/slurp.py
index e4e1a4809c6..eed1721353e 100644
--- a/lib/ansible/modules/network/basics/slurp.py
+++ b/lib/ansible/modules/network/basics/slurp.py
@@ -40,7 +40,8 @@ options:
default: null
aliases: []
notes:
- - This module returns an 'in memory' base64 encoded version of the file, take into account that this will require at least twice the RAM as the original file size.
+ - This module returns an 'in memory' base64 encoded version of the file, take into account that this will require at least twice the RAM as the
+ original file size.
- "See also: M(fetch)"
requirements: []
author:
diff --git a/lib/ansible/modules/network/cloudflare_dns.py b/lib/ansible/modules/network/cloudflare_dns.py
index d6f58a8ab27..2040a397ac9 100644
--- a/lib/ansible/modules/network/cloudflare_dns.py
+++ b/lib/ansible/modules/network/cloudflare_dns.py
@@ -36,7 +36,8 @@ description:
options:
account_api_token:
description:
- - "Account API token. You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://www.cloudflare.com/a/account)"
+ - >
+ Account API token. You can obtain your API key from the bottom of the Cloudflare 'My Account' page, found here: U(https://www.cloudflare.com/a/account)
required: true
account_email:
description:
diff --git a/lib/ansible/modules/network/dnsimple.py b/lib/ansible/modules/network/dnsimple.py
index 105ba879bae..7e859d35588 100644
--- a/lib/ansible/modules/network/dnsimple.py
+++ b/lib/ansible/modules/network/dnsimple.py
@@ -29,7 +29,9 @@ description:
options:
account_email:
description:
- - "Account email. If omitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)"
+ - >
+ Account email. If omitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for.
+ If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)
required: false
default: null
@@ -41,7 +43,8 @@ options:
domain:
description:
- - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If omitted, a list of domains will be returned.
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If omitted, a list of domains
+ will be returned.
- If domain is present but the domain doesn't exist, it will be created.
required: false
default: null
@@ -175,18 +178,19 @@ except ImportError:
def main():
module = AnsibleModule(
- argument_spec = dict(
- account_email = dict(required=False),
- account_api_token = dict(required=False, no_log=True),
- domain = dict(required=False),
- record = dict(required=False),
- record_ids = dict(required=False, type='list'),
- type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']),
- ttl = dict(required=False, default=3600, type='int'),
- value = dict(required=False),
- priority = dict(required=False, type='int'),
- state = dict(required=False, choices=['present', 'absent']),
- solo = dict(required=False, type='bool'),
+ argument_spec=dict(
+ account_email=dict(required=False),
+ account_api_token=dict(required=False, no_log=True),
+ domain=dict(required=False),
+ record=dict(required=False),
+ record_ids=dict(required=False, type='list'),
+ type=dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO',
+ 'POOL']),
+ ttl=dict(required=False, default=3600, type='int'),
+ value=dict(required=False),
+ priority=dict(required=False, type='int'),
+ state=dict(required=False, choices=['present', 'absent']),
+ solo=dict(required=False, type='bool'),
),
required_together = (
['record', 'value']
diff --git a/lib/ansible/modules/network/dnsmadeeasy.py b/lib/ansible/modules/network/dnsmadeeasy.py
index 803f90482e4..4cbcabca2de 100644
--- a/lib/ansible/modules/network/dnsmadeeasy.py
+++ b/lib/ansible/modules/network/dnsmadeeasy.py
@@ -25,7 +25,9 @@ module: dnsmadeeasy
version_added: "1.3"
short_description: Interface with dnsmadeeasy.com (a DNS hosting service).
description:
- - "Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)"
+ - >
+ Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or
+ monitor/account support yet. See: U(https://www.dnsmadeeasy.com/integration/restapi/)
options:
account_key:
description:
@@ -41,13 +43,15 @@ options:
domain:
description:
- - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster resolution.
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster
+ resolution
required: true
default: null
record_name:
description:
- - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless of the state argument.
+ - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless
+ of the state argument.
required: false
default: null
@@ -60,8 +64,12 @@ options:
record_value:
description:
- - "Record value. HTTPRED: , MX: , NS: , PTR: , SRV: , TXT: "
- - "If record_value is not specified; no changes will be made and the record will be returned in 'result' (in other words, this module can be used to fetch a record's current id, type, and ttl)"
+ - >
+ Record value. HTTPRED: , MX: , NS: , PTR: ,
+ SRV: , TXT: "
+ - >
+ If record_value is not specified; no changes will be made and the record will be returned in 'result'
+ (in other words, this module can be used to fetch a record's current id, type, and ttl)
required: false
default: null
@@ -88,7 +96,8 @@ options:
version_added: 1.5.1
notes:
- - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP.
+ - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few
+ seconds of actual time by using NTP.
- This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks.
requirements: [ hashlib, hmac ]
diff --git a/lib/ansible/modules/network/f5/bigip_facts.py b/lib/ansible/modules/network/f5/bigip_facts.py
index 0d8b5d83688..6d0ca9eea39 100644
--- a/lib/ansible/modules/network/f5/bigip_facts.py
+++ b/lib/ansible/modules/network/f5/bigip_facts.py
@@ -1640,7 +1640,9 @@ def main():
if validate_certs:
import ssl
if not hasattr(ssl, 'SSLContext'):
- module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+ module.fail_json(
+ msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task'
+ )
if fact_filter:
regex = fnmatch.translate(fact_filter)
diff --git a/lib/ansible/modules/network/f5/bigip_monitor_tcp.py b/lib/ansible/modules/network/f5/bigip_monitor_tcp.py
index de1e4fa8926..6a7cb4d779e 100644
--- a/lib/ansible/modules/network/f5/bigip_monitor_tcp.py
+++ b/lib/ansible/modules/network/f5/bigip_monitor_tcp.py
@@ -329,7 +329,9 @@ def main():
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
- module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+ module.fail_json(
+ msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task'
+ )
server = module.params['server']
server_port = module.params['server_port']
diff --git a/lib/ansible/modules/network/f5/bigip_node.py b/lib/ansible/modules/network/f5/bigip_node.py
index 73ffd93d7dd..2077ced52fb 100644
--- a/lib/ansible/modules/network/f5/bigip_node.py
+++ b/lib/ansible/modules/network/f5/bigip_node.py
@@ -319,7 +319,9 @@ def main():
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
- module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+ module.fail_json(
+ msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task'
+ )
server = module.params['server']
server_port = module.params['server_port']
diff --git a/lib/ansible/modules/network/f5/bigip_pool.py b/lib/ansible/modules/network/f5/bigip_pool.py
index d13730c0b53..14c03352801 100644
--- a/lib/ansible/modules/network/f5/bigip_pool.py
+++ b/lib/ansible/modules/network/f5/bigip_pool.py
@@ -409,7 +409,9 @@ def main():
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
- module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+ module.fail_json(
+ msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task'
+ )
server = module.params['server']
server_port = module.params['server_port']
diff --git a/lib/ansible/modules/network/f5/bigip_pool_member.py b/lib/ansible/modules/network/f5/bigip_pool_member.py
index ee337da9553..bc272b43c8e 100644
--- a/lib/ansible/modules/network/f5/bigip_pool_member.py
+++ b/lib/ansible/modules/network/f5/bigip_pool_member.py
@@ -396,7 +396,9 @@ def main():
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
- module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+ module.fail_json(
+ msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task'
+ )
server = module.params['server']
server_port = module.params['server_port']
diff --git a/lib/ansible/modules/network/f5/bigip_virtual_server.py b/lib/ansible/modules/network/f5/bigip_virtual_server.py
index b28131dce29..1a774ca7b8d 100644
--- a/lib/ansible/modules/network/f5/bigip_virtual_server.py
+++ b/lib/ansible/modules/network/f5/bigip_virtual_server.py
@@ -683,7 +683,9 @@ def main():
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
- module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
+ module.fail_json(
+ msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task'
+ )
server = module.params['server']
server_port = module.params['server_port']
diff --git a/lib/ansible/modules/network/haproxy.py b/lib/ansible/modules/network/haproxy.py
index 65910b41666..bd66d18bad6 100644
--- a/lib/ansible/modules/network/haproxy.py
+++ b/lib/ansible/modules/network/haproxy.py
@@ -288,7 +288,12 @@ class HAProxy(object):
"""
data = self.execute('show stat', 200, False).lstrip('# ')
r = csv.DictReader(data.splitlines())
- state = tuple(map(lambda d: { 'status': d['status'], 'weight': d['weight'] }, filter(lambda d: (pxname is None or d['pxname'] == pxname) and d['svname'] == svname, r)))
+ state = tuple(
+ map(
+ lambda d: {'status': d['status'], 'weight': d['weight']},
+ filter(lambda d: (pxname is None or d['pxname'] == pxname) and d['svname'] == svname, r)
+ )
+ )
return state or None
diff --git a/lib/ansible/modules/network/nmcli.py b/lib/ansible/modules/network/nmcli.py
index cb25067d8f8..a23907bf28f 100644
--- a/lib/ansible/modules/network/nmcli.py
+++ b/lib/ansible/modules/network/nmcli.py
@@ -172,7 +172,9 @@ options:
required: False
default: None
description:
- - 'This is only used with bridge - MAC address of the bridge (note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel)'
+ - >
+ This is only used with bridge - MAC address of the bridge
+ (note: this requires a recent kernel feature, originally introduced in 3.15 upstream kernel)
slavepriority:
required: False
default: 32
@@ -187,7 +189,8 @@ options:
required: False
default: yes
description:
- - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the frame was received on.
+ - This is only used with 'bridge-slave' - 'hairpin mode' for the slave, which allows frames to be sent back out through the slave the
+ frame was received on.
vlanid:
required: False
default: None
@@ -1106,7 +1109,8 @@ def main():
gw6=dict(required=False, default=None, type='str'),
dns6=dict(required=False, default=None, type='str'),
# Bond Specific vars
- mode=dict(require=False, default="balance-rr", choices=["balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad", "balance-tlb", "balance-alb"], type='str'),
+ mode=dict(require=False, default="balance-rr", type='str', choices=["balance-rr", "active-backup", "balance-xor", "broadcast", "802.3ad",
+ "balance-tlb", "balance-alb"]),
miimon=dict(required=False, default=None, type='str'),
downdelay=dict(required=False, default=None, type='str'),
updelay=dict(required=False, default=None, type='str'),
diff --git a/lib/ansible/modules/network/panos/panos_nat_policy.py b/lib/ansible/modules/network/panos/panos_nat_policy.py
index 059d18c2e35..642c94e2182 100644
--- a/lib/ansible/modules/network/panos/panos_nat_policy.py
+++ b/lib/ansible/modules/network/panos/panos_nat_policy.py
@@ -24,7 +24,8 @@ DOCUMENTATION = '''
module: panos_nat_policy
short_description: create a policy NAT rule
description:
- - Create a policy nat rule. Keep in mind that we can either end up configuring source NAT, destination NAT, or both. Instead of splitting it into two we will make a fair attempt to determine which one the user wants.
+ - Create a policy nat rule. Keep in mind that we can either end up configuring source NAT, destination NAT, or both. Instead of splitting it
+ into two we will make a fair attempt to determine which one the user wants.
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
diff --git a/lib/ansible/modules/network/snmp_facts.py b/lib/ansible/modules/network/snmp_facts.py
index 74f6d27166b..259f858bbb0 100644
--- a/lib/ansible/modules/network/snmp_facts.py
+++ b/lib/ansible/modules/network/snmp_facts.py
@@ -241,7 +241,8 @@ def main():
# Use SNMP Version 3 with authPriv
else:
- snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto, privProtocol=privacy_proto)
+ snmp_auth = cmdgen.UsmUserData(m_args['username'], authKey=m_args['authkey'], privKey=m_args['privkey'], authProtocol=integrity_proto,
+ privProtocol=privacy_proto)
# Use p to prefix OIDs with a dot for polling
p = DefineOid(dotprefix=True)
diff --git a/lib/ansible/modules/notification/hall.py b/lib/ansible/modules/notification/hall.py
index 2ec1ea1a88c..30e9cef7a97 100644
--- a/lib/ansible/modules/notification/hall.py
+++ b/lib/ansible/modules/notification/hall.py
@@ -46,7 +46,9 @@ options:
required: true
picture:
description:
- - "The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)"
+ - >
+ The full URL to the image you wish to use for the Icon of the message. Defaults to
+ U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)
required: false
"""
@@ -62,7 +64,7 @@ EXAMPLES = """
hall:
room_token:
title: Server Creation
- msg: 'Created EC2 instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region.'
+ msg: 'Created instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region.'
delegate_to: loclahost
when: ec2.instances|length > 0
with_items: '{{ ec2.instances }}'
@@ -81,11 +83,12 @@ def send_request_to_hall(module, room_token, payload):
def main():
module = AnsibleModule(
- argument_spec = dict(
- room_token = dict(type='str', required=True),
- msg = dict(type='str', required=True),
- title = dict(type='str', required=True),
- picture = dict(type='str', default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'),
+ argument_spec=dict(
+ room_token=dict(type='str', required=True),
+ msg=dict(type='str', required=True),
+ title=dict(type='str', required=True),
+ picture=dict(type='str',
+ default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'),
)
)
diff --git a/lib/ansible/modules/notification/sendgrid.py b/lib/ansible/modules/notification/sendgrid.py
index b76ea75f5b0..cc215847909 100644
--- a/lib/ansible/modules/notification/sendgrid.py
+++ b/lib/ansible/modules/notification/sendgrid.py
@@ -254,7 +254,8 @@ def main():
sendgrid_lib_args = [api_key, bcc, cc, headers, from_name, html_body, attachments]
if any(lib_arg is not None for lib_arg in sendgrid_lib_args) and not HAS_SENDGRID:
- module.fail_json(msg='You must install the sendgrid python library if you want to use any of the following arguments: api_key, bcc, cc, headers, from_name, html_body, attachments')
+ module.fail_json(msg='You must install the sendgrid python library if you want to use any of the following arguments: '
+ 'api_key, bcc, cc, headers, from_name, html_body, attachments')
response, info = post_sendgrid_api(module, username, password,
from_address, to_addresses, subject, body, attachments=attachments,
diff --git a/lib/ansible/modules/notification/sns.py b/lib/ansible/modules/notification/sns.py
index 66f4135604b..f6a92c31f56 100644
--- a/lib/ansible/modules/notification/sns.py
+++ b/lib/ansible/modules/notification/sns.py
@@ -203,7 +203,8 @@ def main():
if not message_structure=='string' and message_attributes:
module.fail_json(msg="when specifying message_attributes, the message_structure must be set to 'string'; otherwise the attributes will not be sent.")
elif message_structure=='string' and (email or sqs or sms or http or https):
- module.fail_json(msg="do not specify non-default message formats when using the 'string' message_structure. they can only be used with the 'json' message_structure.")
+ module.fail_json(msg="do not specify non-default message formats when using the 'string' message_structure. they can only be used with "
+ "the 'json' message_structure.")
# .publish() takes full ARN topic id, but I'm lazy and type shortnames
# so do a lookup (topics cannot contain ':', so thats the decider)
diff --git a/lib/ansible/modules/packaging/language/maven_artifact.py b/lib/ansible/modules/packaging/language/maven_artifact.py
index cef00364636..9dd52ce443f 100644
--- a/lib/ansible/modules/packaging/language/maven_artifact.py
+++ b/lib/ansible/modules/packaging/language/maven_artifact.py
@@ -245,7 +245,10 @@ class MavenDownloader:
timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0]
buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):
- if len(snapshotArtifact.xpath("classifier/text()")) > 0 and snapshotArtifact.xpath("classifier/text()")[0] == artifact.classifier and len(snapshotArtifact.xpath("extension/text()")) > 0 and snapshotArtifact.xpath("extension/text()")[0] == artifact.extension:
+ if (len(snapshotArtifact.xpath("classifier/text()")) > 0 and
+ snapshotArtifact.xpath("classifier/text()")[0] == artifact.classifier and
+ len(snapshotArtifact.xpath("extension/text()")) > 0 and
+ snapshotArtifact.xpath("extension/text()")[0] == artifact.extension):
return self._uri_for_artifact(artifact, snapshotArtifact.xpath("value/text()")[0])
return self._uri_for_artifact(artifact, artifact.version.replace("SNAPSHOT", timestamp + "-" + buildNumber))
@@ -412,7 +415,8 @@ def main():
try:
if downloader.download(artifact, dest):
- module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier, extension=extension, repository_url=repository_url, changed=True)
+ module.exit_json(state=state, dest=dest, group_id=group_id, artifact_id=artifact_id, version=version, classifier=classifier,
+ extension=extension, repository_url=repository_url, changed=True)
else:
module.fail_json(msg="Unable to download the artifact")
except ValueError as e:
diff --git a/lib/ansible/modules/packaging/os/apt.py b/lib/ansible/modules/packaging/os/apt.py
index 4cc797cb00b..77b7d3d994b 100644
--- a/lib/ansible/modules/packaging/os/apt.py
+++ b/lib/ansible/modules/packaging/os/apt.py
@@ -34,13 +34,17 @@ version_added: "0.0.2"
options:
name:
description:
- - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported. Note that the apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user. Since we don't have warnings and prompts before installing we disallow this. Use an explicit fnmatch pattern if you want wildcarding)
+ - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Name wildcards (fnmatch) like C(apt*) and version wildcards
+ like C(foo=1.0*) are also supported. Note that the apt-get commandline supports implicit regex matches here but we do not because it can let
+ typos through easier (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for
+ the user. Since we don't have warnings and prompts before installing we disallow this. Use an explicit fnmatch pattern if you want wildcarding)
required: false
default: null
aliases: [ 'pkg', 'package' ]
state:
description:
- - Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies are installed.
+ - Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies
+ are installed.
required: false
default: present
choices: [ "latest", "absent", "present", "build-dep" ]
@@ -68,7 +72,8 @@ options:
default: null
install_recommends:
description:
- - Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
+ - Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install
+ recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
required: false
default: null
choices: [ "yes", "no" ]
@@ -219,7 +224,7 @@ stderr:
returned: success, when needed
type: string
sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..."
-'''
+''' # NOQA
# added to stave off future warnings about apt api
import warnings
diff --git a/lib/ansible/modules/packaging/os/apt_key.py b/lib/ansible/modules/packaging/os/apt_key.py
index b33a73c2463..27c71028f0e 100644
--- a/lib/ansible/modules/packaging/os/apt_key.py
+++ b/lib/ansible/modules/packaging/os/apt_key.py
@@ -367,7 +367,8 @@ def main():
if remove_key(module, short_key_id, keyring):
keys = all_keys(module, keyring, short_format)
if fingerprint in keys:
- module.fail_json(msg="apt-key del did not return an error but the key was not removed (check that the id is correct and *not* a subkey)", id=key_id)
+ module.fail_json(msg="apt-key del did not return an error but the key was not removed (check that the id is correct and *not* a subkey)",
+ id=key_id)
changed = True
else:
# FIXME: module.fail_json or exit-json immediately at point of failure
diff --git a/lib/ansible/modules/packaging/os/dnf.py b/lib/ansible/modules/packaging/os/dnf.py
index b179f0f6886..6451b283013 100644
--- a/lib/ansible/modules/packaging/os/dnf.py
+++ b/lib/ansible/modules/packaging/os/dnf.py
@@ -35,7 +35,9 @@ description:
options:
name:
description:
- - "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: dnf -y update. You can also pass a url or a local path to a rpm file."
+ - >
+ Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: dnf -y update.
+ You can also pass a url or a local path to a rpm file.
required: true
default: null
aliases: []
diff --git a/lib/ansible/modules/packaging/os/homebrew.py b/lib/ansible/modules/packaging/os/homebrew.py
index f1d82dfe2ee..21b0961fc0f 100644
--- a/lib/ansible/modules/packaging/os/homebrew.py
+++ b/lib/ansible/modules/packaging/os/homebrew.py
@@ -47,7 +47,10 @@ options:
aliases: ['pkg', 'package', 'formula']
path:
description:
- - "':' separated list of paths to search for 'brew' executable. Since A package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
+ - >
+ ':' separated list of paths to search for 'brew' executable. Since A package (I(formula) in homebrew parlance) location is prefixed
+ relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an
+ alternative location in the system.
required: false
default: '/usr/local/bin'
state:
diff --git a/lib/ansible/modules/packaging/os/openbsd_pkg.py b/lib/ansible/modules/packaging/os/openbsd_pkg.py
index 47c27f99309..fdc046708cc 100644
--- a/lib/ansible/modules/packaging/os/openbsd_pkg.py
+++ b/lib/ansible/modules/packaging/os/openbsd_pkg.py
@@ -192,7 +192,8 @@ def package_present(names, pkg_spec, module):
flavors = pkg_spec[name]['flavor'].replace('-', ' ')
install_cmd = "cd %s && make clean=depends && FLAVOR=\"%s\" make install && make clean=depends" % (port_dir, flavors)
elif pkg_spec[name]['subpackage']:
- install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir, pkg_spec[name]['subpackage'])
+ install_cmd = "cd %s && make clean=depends && SUBPACKAGE=\"%s\" make install && make clean=depends" % (port_dir,
+ pkg_spec[name]['subpackage'])
else:
install_cmd = "cd %s && make install && make clean=depends" % (port_dir)
else:
diff --git a/lib/ansible/modules/packaging/os/opkg.py b/lib/ansible/modules/packaging/os/opkg.py
index 732c99c64ee..5e62b3fcdcf 100644
--- a/lib/ansible/modules/packaging/os/opkg.py
+++ b/lib/ansible/modules/packaging/os/opkg.py
@@ -44,7 +44,18 @@ options:
force:
description:
- opkg --force parameter used
- choices: ["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", "checksum", "removal-of-dependent-packages"]
+ choices:
+ - ""
+ - "depends"
+ - "maintainer"
+ - "reinstall"
+ - "overwrite"
+ - "downgrade"
+ - "space"
+ - "postinstall"
+ - "remove"
+ - "checksum"
+ - "removal-of-dependent-packages"
required: false
default: absent
version_added: "2.0"
@@ -161,11 +172,12 @@ def install_packages(module, opkg_path, packages):
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(aliases=["pkg"], required=True),
- state = dict(default="present", choices=["present", "installed", "absent", "removed"]),
- force = dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove", "checksum", "removal-of-dependent-packages"]),
- update_cache = dict(default="no", aliases=["update-cache"], type='bool')
+ argument_spec=dict(
+ name=dict(aliases=["pkg"], required=True),
+ state=dict(default="present", choices=["present", "installed", "absent", "removed"]),
+ force=dict(default="", choices=["", "depends", "maintainer", "reinstall", "overwrite", "downgrade", "space", "postinstall", "remove",
+ "checksum", "removal-of-dependent-packages"]),
+ update_cache=dict(default="no", aliases=["update-cache"], type='bool')
)
)
diff --git a/lib/ansible/modules/packaging/os/pacman.py b/lib/ansible/modules/packaging/os/pacman.py
index 083ac7138d6..f573803f014 100644
--- a/lib/ansible/modules/packaging/os/pacman.py
+++ b/lib/ansible/modules/packaging/os/pacman.py
@@ -157,7 +157,9 @@ def get_version(pacman_output):
return None
def query_package(module, pacman_path, name, state="present"):
- """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available"""
+ """Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, a second
+ boolean to indicate if the package is up-to-date and a third boolean to indicate whether online information were available
+ """
if state == "present":
lcmd = "%s -Qi %s" % (pacman_path, name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
diff --git a/lib/ansible/modules/packaging/os/rhn_register.py b/lib/ansible/modules/packaging/os/rhn_register.py
index 02a85997350..f7951792093 100644
--- a/lib/ansible/modules/packaging/os/rhn_register.py
+++ b/lib/ansible/modules/packaging/os/rhn_register.py
@@ -372,14 +372,17 @@ def main():
rhn.configure_server_url(server_url)
if not rhn.server_url:
- module.fail_json(msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)")
+ module.fail_json(
+ msg="No serverURL was found (from either the 'server_url' module arg or the config file option 'serverURL' in /etc/sysconfig/rhn/up2date)"
+ )
# Ensure system is registered
if state == 'present':
# Check for missing parameters ...
if not (activationkey or rhn.username or rhn.password):
- module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username, rhn.password))
+ module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, rhn.username,
+ rhn.password))
if not activationkey and not (rhn.username and rhn.password):
module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password")
diff --git a/lib/ansible/modules/packaging/os/swdepot.py b/lib/ansible/modules/packaging/os/swdepot.py
index 03ea8a4c246..92ca487542b 100644
--- a/lib/ansible/modules/packaging/os/swdepot.py
+++ b/lib/ansible/modules/packaging/os/swdepot.py
@@ -95,7 +95,8 @@ def query_package(module, name, depot=None):
cmd_list = '/usr/sbin/swlist -a revision -l product'
if depot:
- rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, pipes.quote(depot), pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
+ rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, pipes.quote(depot), pipes.quote(name), pipes.quote(name)),
+ use_unsafe_shell=True)
else:
rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
diff --git a/lib/ansible/modules/packaging/os/zypper.py b/lib/ansible/modules/packaging/os/zypper.py
index 4e9a42fde65..d625c917295 100644
--- a/lib/ansible/modules/packaging/os/zypper.py
+++ b/lib/ansible/modules/packaging/os/zypper.py
@@ -48,7 +48,8 @@ options:
name:
description:
- Package name C(name) or package specifier.
- - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to update the package within the version range given.
+ - Can include a version like C(name=1.0), C(name>3.4) or C(name<=2.7). If a version is given, C(oldpackage) is implied and zypper is allowed to
+ update the package within the version range given.
- You can also pass a url or a local path to a rpm file.
- When using state=latest, this can be '*', which updates all installed packages.
required: true
@@ -79,7 +80,8 @@ options:
disable_recommends:
version_added: "1.8"
description:
- - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages.
+ - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does
+ install recommended packages.
required: false
default: "yes"
choices: [ "yes", "no" ]
@@ -101,7 +103,8 @@ options:
oldpackage:
version_added: "2.2"
description:
- - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a version is specified as part of the package name.
+ - Adds C(--oldpackage) option to I(zypper). Allows to downgrade packages with less side-effects than force. This is implied as soon as a
+ version is specified as part of the package name.
required: false
default: "no"
choices: [ "yes", "no" ]
diff --git a/lib/ansible/modules/source_control/github_hooks.py b/lib/ansible/modules/source_control/github_hooks.py
index 078c46aeb01..7a5d6bc3a33 100644
--- a/lib/ansible/modules/source_control/github_hooks.py
+++ b/lib/ansible/modules/source_control/github_hooks.py
@@ -41,7 +41,9 @@ options:
required: true
repo:
description:
- - "This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:. Note this is different than the normal repo url."
+ - >
+ This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:.
+ Note this is different than the normal repo url.
required: true
hookurl:
description:
@@ -78,7 +80,8 @@ EXAMPLES = '''
oauthkey: '{{ oauthkey }}'
repo: https://api.github.com/repos/pcgentry/Github-Auto-Deploy
-# Cleaning all hooks for this repo that had an error on the last update. Since this works for all hooks in a repo it is probably best that this would be called from a handler.
+# Cleaning all hooks for this repo that had an error on the last update. Since this works for all hooks in a repo it is probably best that this would
+# be called from a handler.
- github_hooks:
action: cleanall
user: '{{ gituser }}'
diff --git a/lib/ansible/modules/storage/netapp/netapp_e_amg.py b/lib/ansible/modules/storage/netapp/netapp_e_amg.py
index 70f2041ccb0..3ba5495c3f2 100644
--- a/lib/ansible/modules/storage/netapp/netapp_e_amg.py
+++ b/lib/ansible/modules/storage/netapp/netapp_e_amg.py
@@ -119,7 +119,7 @@ msg:
returned: success
type: string
sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}'
-"""
+""" # NOQA
import json
diff --git a/lib/ansible/modules/storage/netapp/netapp_e_amg_sync.py b/lib/ansible/modules/storage/netapp/netapp_e_amg_sync.py
index 22c2d2e63f8..25495304e57 100644
--- a/lib/ansible/modules/storage/netapp/netapp_e_amg_sync.py
+++ b/lib/ansible/modules/storage/netapp/netapp_e_amg_sync.py
@@ -58,7 +58,8 @@ options:
state:
description:
- The synchronization action you'd like to take.
- - If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in progress, it will return with an OK status.
+ - If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in
+ progress, it will return with an OK status.
- If C(suspended) it will suspend any ongoing sync action, but return OK if there is no active sync or if the sync is already suspended
choices:
- running
@@ -67,8 +68,10 @@ options:
delete_recovery_point:
description:
- Indicates whether the failures point can be deleted on the secondary if necessary to achieve the synchronization.
- - If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last failures point will be deleted and synchronization will continue.
- - If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary and the failures point will be preserved.
+ - If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last
+ failures point will be deleted and synchronization will continue.
+ - If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary
+ and the failures point will be preserved.
- "NOTE: This only has impact for newly launched syncs."
choices:
- yes
diff --git a/lib/ansible/modules/storage/netapp/netapp_e_auth.py b/lib/ansible/modules/storage/netapp/netapp_e_auth.py
index 71a42443a48..e36cf53d6a2 100644
--- a/lib/ansible/modules/storage/netapp/netapp_e_auth.py
+++ b/lib/ansible/modules/storage/netapp/netapp_e_auth.py
@@ -27,7 +27,8 @@ DOCUMENTATION = '''
module: netapp_e_auth
short_description: Sets or updates the password for a storage array.
description:
- - Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web Services proxy. Note, all storage arrays do not have a Monitor or RO role.
+ - Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web
+ Services proxy. Note, all storage arrays do not have a Monitor or RO role.
version_added: "2.2"
author: Kevin Hulquest (@hulquest)
options:
@@ -38,7 +39,8 @@ options:
- Should https certificates be validated?
name:
description:
- - The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use the ID instead.
+ - The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use
+ the ID instead.
required: False
ssid:
description:
diff --git a/lib/ansible/modules/storage/netapp/netapp_e_host.py b/lib/ansible/modules/storage/netapp/netapp_e_host.py
index fdda16cac6b..8890c58d6f7 100644
--- a/lib/ansible/modules/storage/netapp/netapp_e_host.py
+++ b/lib/ansible/modules/storage/netapp/netapp_e_host.py
@@ -59,7 +59,8 @@ options:
required: True
host_type_index:
description:
- - The index that maps to host type you wish to create. It is recommended to use the M(netapp_e_facts) module to gather this information. Alternatively you can use the WSP portal to retrieve the information.
+ - The index that maps to host type you wish to create. It is recommended to use the M(netapp_e_facts) module to gather this information.
+ Alternatively you can use the WSP portal to retrieve the information.
required: True
ports:
description:
@@ -288,7 +289,9 @@ class Host(object):
needs_update = True
else:
self.module.fail_json(
- msg="The port you specified:\n%s\n is associated with a different host. Specify force_port as True or try a different port spec" % arg_port)
+ msg="The port you specified:\n%s\n is associated with a different host. Specify force_port as True or try a different "
+ "port spec" % arg_port
+ )
return needs_update
diff --git a/lib/ansible/modules/storage/netapp/netapp_e_hostgroup.py b/lib/ansible/modules/storage/netapp/netapp_e_hostgroup.py
index 283cf4c53f6..083783936d4 100644
--- a/lib/ansible/modules/storage/netapp/netapp_e_hostgroup.py
+++ b/lib/ansible/modules/storage/netapp/netapp_e_hostgroup.py
@@ -93,7 +93,8 @@ clusterRef:
type: string
sample: "3233343536373839303132333100000000000000"
confirmLUNMappingCreation:
- description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping will alter the volume access rights of other clusters, in addition to this one.
+ description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping
+ will alter the volume access rights of other clusters, in addition to this one.
returned: always
type: boolean
sample: false
@@ -108,7 +109,8 @@ id:
type: string
sample: "3233343536373839303132333100000000000000"
isSAControlled:
- description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false, indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
+ description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false,
+ indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
returned: always except when state is absent
type: boolean
sample: false
diff --git a/lib/ansible/modules/storage/netapp/netapp_e_snapshot_group.py b/lib/ansible/modules/storage/netapp/netapp_e_snapshot_group.py
index d71ae81c10c..8d0ded067c3 100644
--- a/lib/ansible/modules/storage/netapp/netapp_e_snapshot_group.py
+++ b/lib/ansible/modules/storage/netapp/netapp_e_snapshot_group.py
@@ -78,7 +78,8 @@ options:
delete_limit:
description:
- The automatic deletion indicator.
- - If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of snapshot images limited to the number specified.
+ - If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of
+ snapshot images limited to the number specified.
- This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group.
required: False
default: 30
diff --git a/lib/ansible/modules/storage/netapp/netapp_e_snapshot_volume.py b/lib/ansible/modules/storage/netapp/netapp_e_snapshot_volume.py
index 86635830d86..f5d3d1a5c13 100644
--- a/lib/ansible/modules/storage/netapp/netapp_e_snapshot_volume.py
+++ b/lib/ansible/modules/storage/netapp/netapp_e_snapshot_volume.py
@@ -31,7 +31,8 @@ description:
version_added: '2.2'
author: Kevin Hulquest (@hulquest)
notes:
- - Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status will be returned, no other changes can be made to a pre-existing snapshot volume.
+ - Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status
+ will be returned, no other changes can be made to a pre-existing snapshot volume.
options:
api_username:
required: true
diff --git a/lib/ansible/modules/storage/netapp/netapp_e_storage_system.py b/lib/ansible/modules/storage/netapp/netapp_e_storage_system.py
index 232afaef22c..14f98357fe8 100644
--- a/lib/ansible/modules/storage/netapp/netapp_e_storage_system.py
+++ b/lib/ansible/modules/storage/netapp/netapp_e_storage_system.py
@@ -62,7 +62,8 @@ options:
array_wwn:
required: false
description:
- - The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of controller_addresses parameter.
+ - The WWN of the array to manage. Only necessary if in-band managing multiple arrays on the same agent host. Mutually exclusive of
+ controller_addresses parameter.
array_password:
required: false
description:
@@ -231,7 +232,9 @@ def main():
changed = True
if array_detail['wwn'] != array_wwn and array_wwn is not None:
module.fail_json(
- msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' % (ssid, array_detail['wwn']))
+ msg='It seems you may have specified a bad WWN. The storage system ID you specified, %s, currently has the WWN of %s' %
+ (ssid, array_detail['wwn'])
+ )
elif rc == 404:
if state == 'present':
changed = True
diff --git a/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py b/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py
index 5ad9a7dc6d3..dab5c9ed5e0 100644
--- a/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py
+++ b/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py
@@ -487,7 +487,8 @@ class NetAppESeriesStoragePool(object):
# # TODO: this arg appears to be ignored, uncomment if it isn't
# #if self.criteria_disk_min_size_gb:
# # drives_req['driveCapacityMin'] = self.criteria_disk_min_size_gb * 1024
- # (rc,drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), data=json.dumps(drives_req), headers=self.post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs)
+ # (rc,drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), data=json.dumps(drives_req), headers=self.post_headers,
+ # method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs)
#
# if rc == 204:
# self.module.fail_json(msg='Cannot find disks to match requested criteria for storage pool')
diff --git a/lib/ansible/modules/system/debconf.py b/lib/ansible/modules/system/debconf.py
index 0ea85be010b..7520ec6a262 100644
--- a/lib/ansible/modules/system/debconf.py
+++ b/lib/ansible/modules/system/debconf.py
@@ -138,14 +138,15 @@ def set_selection(module, pkg, question, vtype, value, unseen):
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required=True, aliases=['pkg'], type='str'),
- question = dict(required=False, aliases=['setting', 'selection'], type='str'),
- vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text', 'seen']),
- value = dict(required=False, type='str', aliases=['answer']),
- unseen = dict(required=False, type='bool'),
+ argument_spec=dict(
+ name=dict(required=True, aliases=['pkg'], type='str'),
+ question=dict(required=False, aliases=['setting', 'selection'], type='str'),
+ vtype=dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title',
+ 'text', 'seen']),
+ value=dict(required=False, type='str', aliases=['answer']),
+ unseen=dict(required=False, type='bool'),
),
- required_together = ( ['question','vtype', 'value'],),
+ required_together=(['question','vtype', 'value'],),
supports_check_mode=True,
)
diff --git a/lib/ansible/modules/system/firewalld.py b/lib/ansible/modules/system/firewalld.py
index fd32eedf42b..bd55d30e5ae 100644
--- a/lib/ansible/modules/system/firewalld.py
+++ b/lib/ansible/modules/system/firewalld.py
@@ -60,13 +60,17 @@ options:
version_added: "2.1"
zone:
description:
- - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).'
+ - >
+ The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices
+ can be extended based on per-system configs, listed here are "out of the box" defaults).
required: false
default: system-default(public)
choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block" ]
permanent:
description:
- - "Should this configuration be in the running firewalld configuration or persist across reboots. As of Ansible version 2.3, permanent operations can operate on firewalld configs when it's not running (requires firewalld >= 3.0.9)"
+ - >
+ Should this configuration be in the running firewalld configuration or persist across reboots. As of Ansible version 2.3, permanent operations can
+ operate on firewalld configs when it's not running (requires firewalld >= 3.0.9)
required: false
default: null
immediate:
@@ -495,7 +499,8 @@ def main():
except ImportError:
## Make python 2.4 shippable ci tests happy
e = sys.exc_info()[1]
- module.fail_json(msg='firewalld and its python 2 module are required for this module, version 2.0.11 or newer required (3.0.9 or newer for offline operations) \n %s' % e)
+ module.fail_json(msg='firewalld and its python 2 module are required for this module, version 2.0.11 or newer required '
+ '(3.0.9 or newer for offline operations) \n %s' % e)
if fw_offline:
## Pre-run version checking
diff --git a/lib/ansible/modules/system/known_hosts.py b/lib/ansible/modules/system/known_hosts.py
index de33f17ce47..b03f4ed472a 100644
--- a/lib/ansible/modules/system/known_hosts.py
+++ b/lib/ansible/modules/system/known_hosts.py
@@ -42,7 +42,8 @@ options:
default: null
key:
description:
- - The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed). The key must be in the right format for ssh (see sshd(1), section "SSH_KNOWN_HOSTS FILE FORMAT")
+ - The SSH public host key, as a string (required if state=present, optional when state=absent, in which case all keys for the host are removed).
+ The key must be in the right format for ssh (see sshd(1), section "SSH_KNOWN_HOSTS FILE FORMAT")
required: false
default: null
path:
diff --git a/lib/ansible/modules/system/lvol.py b/lib/ansible/modules/system/lvol.py
index 1f3bf2155e1..2b410c4b6f6 100644
--- a/lib/ansible/modules/system/lvol.py
+++ b/lib/ansible/modules/system/lvol.py
@@ -419,7 +419,10 @@ def main():
if (size_free > 0) and (('+' not in size) or (size_free >= (size_requested - this_lv['size']))):
tool = module.get_bin_path("lvextend", required=True)
else:
- module.fail_json(msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" % (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit))
+ module.fail_json(
+ msg="Logical Volume %s could not be extended. Not enough free space left (%s%s required / %s%s available)" %
+ (this_lv['name'], (size_requested - this_lv['size']), unit, size_free, unit)
+ )
elif shrink and this_lv['size'] > size_requested + this_vg['ext_size']: # more than an extent too large
if size_requested == 0:
module.fail_json(msg="Sorry, no shrinking of %s to 0 permitted." % (this_lv['name']))
diff --git a/lib/ansible/modules/system/pam_limits.py b/lib/ansible/modules/system/pam_limits.py
index 58d551d7f38..0e9fc8d65bb 100644
--- a/lib/ansible/modules/system/pam_limits.py
+++ b/lib/ansible/modules/system/pam_limits.py
@@ -47,7 +47,26 @@ options:
description:
- The limit to be set
required: true
- choices: [ "core", "data", "fsize", "memlock", "nofile", "rss", "stack", "cpu", "nproc", "as", "maxlogins", "maxsyslogins", "priority", "locks", "sigpending", "msgqueue", "nice", "rtprio", "chroot" ]
+ choices:
+ - "core"
+ - "data"
+ - "fsize"
+ - "memlock"
+ - "nofile"
+ - "rss"
+ - "stack"
+ - "cpu"
+ - "nproc"
+ - "as"
+ - "maxlogins"
+ - "maxsyslogins"
+ - "priority"
+ - "locks"
+ - "sigpending"
+ - "msgqueue"
+ - "nice"
+ - "rtprio"
+ - "chroot"
value:
description:
- The value of the limit.
@@ -120,7 +139,8 @@ import re
def main():
- pam_items = [ 'core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks', 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot' ]
+ pam_items = ['core', 'data', 'fsize', 'memlock', 'nofile', 'rss', 'stack', 'cpu', 'nproc', 'as', 'maxlogins', 'maxsyslogins', 'priority', 'locks',
+ 'sigpending', 'msgqueue', 'nice', 'rtprio', 'chroot']
pam_types = [ 'soft', 'hard', '-' ]
diff --git a/lib/ansible/modules/system/solaris_zone.py b/lib/ansible/modules/system/solaris_zone.py
index aedfa746b80..fcebcb80d16 100644
--- a/lib/ansible/modules/system/solaris_zone.py
+++ b/lib/ansible/modules/system/solaris_zone.py
@@ -444,18 +444,18 @@ class Zone(object):
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(required=True),
- state = dict(default='present', choices=['running', 'started', 'present', 'installed', 'stopped', 'absent', 'configured', 'detached', 'attached']),
- path = dict(default=None),
- sparse = dict(default=False, type='bool'),
- root_password = dict(default=None, no_log=True),
- timeout = dict(default=600, type='int'),
- config = dict(default=''),
- create_options = dict(default=''),
- install_options = dict(default=''),
- attach_options = dict(default=''),
- ),
+ argument_spec=dict(
+ name=dict(required=True),
+ state=dict(default='present', choices=['running', 'started', 'present', 'installed', 'stopped', 'absent', 'configured', 'detached', 'attached']),
+ path=dict(default=None),
+ sparse=dict(default=False, type='bool'),
+ root_password=dict(default=None, no_log=True),
+ timeout=dict(default=600, type='int'),
+ config=dict(default=''),
+ create_options=dict(default=''),
+ install_options=dict(default=''),
+ attach_options=dict(default=''),
+ ),
supports_check_mode=True
)
diff --git a/lib/ansible/modules/system/systemd.py b/lib/ansible/modules/system/systemd.py
index 724290a7220..4a7c07cb68a 100644
--- a/lib/ansible/modules/system/systemd.py
+++ b/lib/ansible/modules/system/systemd.py
@@ -246,7 +246,7 @@ status:
"WatchdogTimestampMonotonic": "0",
"WatchdogUSec": "0",
}
-'''
+''' # NOQA
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.service import sysv_exists, sysv_is_enabled, fail_if_missing
diff --git a/lib/ansible/modules/utilities/helper/meta.py b/lib/ansible/modules/utilities/helper/meta.py
index 0221e790cbe..20b1f9e2d5f 100644
--- a/lib/ansible/modules/utilities/helper/meta.py
+++ b/lib/ansible/modules/utilities/helper/meta.py
@@ -35,8 +35,12 @@ options:
free_form:
description:
- This module takes a free form command, as a string. There's not an actual option named "free form". See the examples!
- - "C(flush_handlers) makes Ansible run any handler tasks which have thus far been notified. Ansible inserts these tasks internally at certain points to implicitly trigger handler runs (after pre/post tasks, the final role execution, and the main tasks section of your plays)."
- - "C(refresh_inventory) (added in 2.0) forces the reload of the inventory, which in the case of dynamic inventory scripts means they will be re-executed. This is mainly useful when additional hosts are created and users wish to use them instead of using the `add_host` module."
+ - >
+ C(flush_handlers) makes Ansible run any handler tasks which have thus far been notified. Ansible inserts these tasks internally at certain
+ points to implicitly trigger handler runs (after pre/post tasks, the final role execution, and the main tasks section of your plays).
+ - >
+ C(refresh_inventory) (added in 2.0) forces the reload of the inventory, which in the case of dynamic inventory scripts means they will be
+ re-executed. This is mainly useful when additional hosts are created and users wish to use them instead of using the `add_host` module."
- "C(noop) (added in 2.0) This literally does 'nothing'. It is mainly used internally and not recommended for general use."
- "C(clear_facts) (added in 2.1) causes the gathered facts for the hosts specified in the play's list of hosts to be cleared, including the fact cache."
- "C(clear_host_errors) (added in 2.1) clears the failed state (if any) from hosts specified in the play's list of hosts."
diff --git a/lib/ansible/modules/utilities/logic/include.py b/lib/ansible/modules/utilities/logic/include.py
index 31bf22228c7..7e7b13418b3 100644
--- a/lib/ansible/modules/utilities/logic/include.py
+++ b/lib/ansible/modules/utilities/logic/include.py
@@ -32,7 +32,8 @@ options:
description:
- This module allows you to specify the name of the file directly w/o any other options.
notes:
- - This is really not a module, though it appears as such, this is a feature of the Ansible Engine, as such it cannot be overridden the same way a module can.
+ - This is really not a module, though it appears as such, this is a feature of the Ansible Engine, as such it cannot be overridden the same way a
+ module can.
'''
EXAMPLES = """
diff --git a/lib/ansible/modules/utilities/logic/include_role.py b/lib/ansible/modules/utilities/logic/include_role.py
index 25d7d73fc4a..5e4d52a23fe 100644
--- a/lib/ansible/modules/utilities/logic/include_role.py
+++ b/lib/ansible/modules/utilities/logic/include_role.py
@@ -54,7 +54,9 @@ options:
notes:
- Handlers are made available to the whole play.
- simple dependencies seem to work fine.
- - As with C(include) this task can be static or dynamic, If static it implies that it won't need templating nor loops nor conditionals and will show included tasks in the --list options. Ansible will try to autodetect what is needed, but you can set `static` to `yes` or `no` at task level to control this.
+ - As with C(include) this task can be static or dynamic, If static it implies that it won't need templating nor loops nor conditionals and will
+ show included tasks in the --list options. Ansible will try to autodetect what is needed, but you can set `static` to `yes` or `no` at task
+ level to control this.
'''
EXAMPLES = """
diff --git a/lib/ansible/modules/utilities/logic/include_vars.py b/lib/ansible/modules/utilities/logic/include_vars.py
index f56d1033d6e..da7977bd937 100644
--- a/lib/ansible/modules/utilities/logic/include_vars.py
+++ b/lib/ansible/modules/utilities/logic/include_vars.py
@@ -19,7 +19,8 @@ author: "Allen Sanabria (@linuxdynasty)"
module: include_vars
short_description: Load variables from files, dynamically within a task.
description:
- - Loads variables from a YAML/JSON files dynamically from within a file or from a directory recursively during task runtime. If loading a directory, the files are sorted alphabetically before being loaded.
+ - Loads variables from a YAML/JSON files dynamically from within a file or from a directory recursively during task runtime. If loading a directory,
+ the files are sorted alphabetically before being loaded.
version_added: "1.4"
options:
file:
diff --git a/lib/ansible/modules/utilities/logic/pause.py b/lib/ansible/modules/utilities/logic/pause.py
index 8d766148a0f..1f7b94b0428 100644
--- a/lib/ansible/modules/utilities/logic/pause.py
+++ b/lib/ansible/modules/utilities/logic/pause.py
@@ -25,9 +25,14 @@ DOCUMENTATION = '''
module: pause
short_description: Pause playbook execution
description:
- - Pauses playbook execution for a set amount of time, or until a prompt is acknowledged. All parameters are optional. The default behavior is to pause with a prompt.
- - "You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely. To continue early: press C(ctrl+c) and then C(c). To abort a playbook: press C(ctrl+c) and then C(a)."
- - "The pause module integrates into async/parallelized playbooks without any special considerations (see also: Rolling Updates). When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts."
+ - Pauses playbook execution for a set amount of time, or until a prompt is acknowledged. All parameters are optional. The default behavior is to
+ pause with a prompt.
+ - >
+ You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely. To continue early:
+ press C(ctrl+c) and then C(c). To abort a playbook: press C(ctrl+c) and then C(a).
+ - >
+ The pause module integrates into async/parallelized playbooks without any special considerations (see also: Rolling Updates). When using pauses with
+ the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts.
version_added: "0.8"
options:
minutes:
diff --git a/lib/ansible/modules/utilities/logic/set_fact.py b/lib/ansible/modules/utilities/logic/set_fact.py
index ebd088d69c3..86042e5ae7b 100644
--- a/lib/ansible/modules/utilities/logic/set_fact.py
+++ b/lib/ansible/modules/utilities/logic/set_fact.py
@@ -29,9 +29,11 @@ author: "Dag Wieers (@dagwieers)"
module: set_fact
short_description: Set host facts from a task
description:
- - This module allows setting new variables. Variables are set on a host-by-host basis just like facts discovered by the setup module.
- - These variables will be available to subsequent plays during an ansible-playbook run, but will not be saved across executions even if you use a fact cache.
- - Per the standard Ansible variable precedence rules, many other types of variables have a higher priority, so this value may be overridden. See U(http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable) for more information.
+ - This module allows setting new variables. Variables are set on a host-by-host basis just like facts discovered by the setup module.
+ - These variables will be available to subsequent plays during an ansible-playbook run, but will not be saved across executions even if you use
+ a fact cache.
+ - Per the standard Ansible variable precedence rules, many other types of variables have a higher priority, so this value may be overridden.
+ See U(http://docs.ansible.com/ansible/playbooks_variables.html#variable-precedence-where-should-i-put-a-variable) for more information.
options:
key_value:
description:
diff --git a/lib/ansible/modules/utilities/logic/wait_for.py b/lib/ansible/modules/utilities/logic/wait_for.py
index 7251e342c34..58262334e66 100644
--- a/lib/ansible/modules/utilities/logic/wait_for.py
+++ b/lib/ansible/modules/utilities/logic/wait_for.py
@@ -76,7 +76,8 @@ options:
description:
- either C(present), C(started), or C(stopped), C(absent), or C(drained)
- When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections
- - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed
+ - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing,
+ C(absent) will check that file is absent or removed
choices: [ "present", "started", "stopped", "absent", "drained" ]
required: False
default: "started"
@@ -294,7 +295,8 @@ class LinuxTCPConnectionInfo(TCPConnectionInfo):
tcp_connection = tcp_connection.strip().split()
if tcp_connection[self.local_address_field] == 'local_address':
continue
- if tcp_connection[self.connection_state_field] not in [ get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states'] ]:
+ if (tcp_connection[self.connection_state_field] not in
+ [get_connection_state_id(_connection_state) for _connection_state in self.module.params['active_connection_states']]):
continue
(local_ip, local_port) = tcp_connection[self.local_address_field].split(':')
if self.port != local_port:
diff --git a/lib/ansible/modules/web_infrastructure/apache2_mod_proxy.py b/lib/ansible/modules/web_infrastructure/apache2_mod_proxy.py
index 211bf2ea183..f45790de079 100644
--- a/lib/ansible/modules/web_infrastructure/apache2_mod_proxy.py
+++ b/lib/ansible/modules/web_infrastructure/apache2_mod_proxy.py
@@ -383,7 +383,9 @@ def main():
else:
for _state in states:
if _state not in ['present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors']:
- module.fail_json(msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'.")
+ module.fail_json(
+ msg="State can only take values amongst 'present', 'absent', 'enabled', 'disabled', 'drained', 'hot_standby', 'ignore_errors'."
+ )
else:
states = ['None']
diff --git a/lib/ansible/modules/web_infrastructure/django_manage.py b/lib/ansible/modules/web_infrastructure/django_manage.py
index ac2482971be..806a9f9ede7 100644
--- a/lib/ansible/modules/web_infrastructure/django_manage.py
+++ b/lib/ansible/modules/web_infrastructure/django_manage.py
@@ -29,14 +29,17 @@ DOCUMENTATION = '''
module: django_manage
short_description: Manages a Django application.
description:
- - Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all management commands will be executed by the given I(virtualenv) installation.
+ - Manages a Django application using the I(manage.py) application frontend to I(django-admin). With the I(virtualenv) parameter, all
+ management commands will be executed by the given I(virtualenv) installation.
version_added: "1.1"
options:
command:
choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ]
description:
- - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate.
- - Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run with the I(--noinput) flag.
+ - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb,
+ test, and validate.
+ - Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run
+ with the I(--noinput) flag.
required: true
app_path:
description:
@@ -92,12 +95,13 @@ options:
required: false
version_added: "1.3"
notes:
- - I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
- - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
- - This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately.
- - To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings.
- - To be able to use the collectstatic command, you must have enabled staticfiles in your settings.
- - As of ansible 2.x, your I(manage.py) application must be executable (rwxr-xr-x), and must have a valid I(shebang), i.e. "#!/usr/bin/env python", for invoking the appropriate Python interpreter.
+ - I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
+ - This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
+ - This module assumes English error messages for the 'createcachetable' command to detect table existence, unfortunately.
+ - To be able to use the migrate command with django versions < 1.7, you must have south installed and added as an app in your settings.
+ - To be able to use the collectstatic command, you must have enabled staticfiles in your settings.
+ - As of ansible 2.x, your I(manage.py) application must be executable (rwxr-xr-x), and must have a valid I(shebang), i.e. "#!/usr/bin/env python",
+ for invoking the appropriate Python interpreter.
requirements: [ "virtualenv", "django" ]
author: "Scott Anderson (@tastychutney)"
'''
diff --git a/lib/ansible/modules/windows/win_acl.py b/lib/ansible/modules/windows/win_acl.py
index 4e45d5e98fd..281e4c499ce 100644
--- a/lib/ansible/modules/windows/win_acl.py
+++ b/lib/ansible/modules/windows/win_acl.py
@@ -64,7 +64,9 @@ options:
default: none
rights:
description:
- - The rights/permissions that are to be allowed/denyed for the specified user or group for the given src file or directory. Can be entered as a comma separated list (Ex. "Modify, Delete, ExecuteFile"). For more information on the choices see MSDN FileSystemRights Enumeration.
+ - The rights/permissions that are to be allowed/denyed for the specified user or group for the given src file or directory.
+ Can be entered as a comma separated list (Ex. "Modify, Delete, ExecuteFile"). For more information on the choices see MSDN FileSystemRights
+ Enumeration.
required: yes
choices:
- AppendData
@@ -91,7 +93,8 @@ options:
default: none
inherit:
description:
- - Inherit flags on the ACL rules. Can be specified as a comma separated list (Ex. "ContainerInherit, ObjectInherit"). For more information on the choices see MSDN InheritanceFlags Enumeration.
+ - Inherit flags on the ACL rules. Can be specified as a comma separated list (Ex. "ContainerInherit, ObjectInherit"). For more information on
+ the choices see MSDN InheritanceFlags Enumeration.
required: no
choices:
- ContainerInherit
diff --git a/lib/ansible/modules/windows/win_acl_inheritance.py b/lib/ansible/modules/windows/win_acl_inheritance.py
index 0e1cc00cf7f..2c86d33ba4b 100644
--- a/lib/ansible/modules/windows/win_acl_inheritance.py
+++ b/lib/ansible/modules/windows/win_acl_inheritance.py
@@ -48,8 +48,10 @@ options:
default: absent
reorganize:
description:
- - For P(state) = I(absent), indicates if the inherited ACE's should be copied from the parent directory. This is necessary (in combination with removal) for a simple ACL instead of using multiple ACE deny entries.
- - For P(state) = I(present), indicates if the inherited ACE's should be deduplicated compared to the parent directory. This removes complexity of the ACL structure.
+ - For P(state) = I(absent), indicates if the inherited ACE's should be copied from the parent directory. This is necessary
+ (in combination with removal) for a simple ACL instead of using multiple ACE deny entries.
+ - For P(state) = I(present), indicates if the inherited ACE's should be deduplicated compared to the parent directory. This removes complexity
+ of the ACL structure.
required: false
choices:
- no
diff --git a/lib/ansible/modules/windows/win_command.py b/lib/ansible/modules/windows/win_command.py
index 483b053d5b5..a951a5d0e74 100644
--- a/lib/ansible/modules/windows/win_command.py
+++ b/lib/ansible/modules/windows/win_command.py
@@ -51,11 +51,12 @@ options:
description:
- set the specified path as the current working directory before executing a command
notes:
- - If you want to run a command through a shell (say you are using C(<),
- C(>), C(|), etc), you actually want the M(win_shell) module instead. The
- C(win_command) module is much more secure as it's not affected by the user's
- environment.
- - " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this."
+ - If you want to run a command through a shell (say you are using C(<),
+ C(>), C(|), etc), you actually want the M(win_shell) module instead. The
+ C(win_command) module is much more secure as it's not affected by the user's
+ environment.
+ - C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not
+ exist, use this.
author:
- Matt Davis
'''
diff --git a/lib/ansible/modules/windows/win_feature.py b/lib/ansible/modules/windows/win_feature.py
index 6b2045fc6e6..2c5a62f9a21 100644
--- a/lib/ansible/modules/windows/win_feature.py
+++ b/lib/ansible/modules/windows/win_feature.py
@@ -32,7 +32,8 @@ module: win_feature
version_added: "1.7"
short_description: Installs and uninstalls Windows Features on Windows Server
description:
- - Installs or uninstalls Windows Roles or Features on Windows Server. This module uses the Add/Remove-WindowsFeature Cmdlets on Windows 2008 and Install/Uninstall-WindowsFeature Cmdlets on Windows 2012, which are not available on client os machines.
+ - Installs or uninstalls Windows Roles or Features on Windows Server. This module uses the Add/Remove-WindowsFeature Cmdlets on Windows 2008
+ and Install/Uninstall-WindowsFeature Cmdlets on Windows 2012, which are not available on client os machines.
options:
name:
description:
diff --git a/lib/ansible/modules/windows/win_lineinfile.py b/lib/ansible/modules/windows/win_lineinfile.py
index 98bfd3aadbb..1e757b30707 100644
--- a/lib/ansible/modules/windows/win_lineinfile.py
+++ b/lib/ansible/modules/windows/win_lineinfile.py
@@ -41,7 +41,10 @@ options:
regexp:
required: false
description:
- - "The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions; see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx)."
+ - >
+ The regular expression to look for in every line of the file. For C(state=present), the pattern to replace if found; only the last line found
+ will be replaced. For C(state=absent), the pattern of the line to remove. Uses .NET compatible regular expressions;
+ see U(https://msdn.microsoft.com/en-us/library/hs600312%28v=vs.110%29.aspx).
state:
required: false
choices: [ present, absent ]
@@ -51,25 +54,30 @@ options:
line:
required: false
description:
- - Required for C(state=present). The line to insert/replace into the file. If C(backrefs) is set, may contain backreferences that will get expanded with the C(regexp) capture groups if the regexp matches.
+ - Required for C(state=present). The line to insert/replace into the file. If C(backrefs) is set, may contain backreferences that will get
+ expanded with the C(regexp) capture groups if the regexp matches.
backrefs:
required: false
default: "no"
choices: [ "yes", "no" ]
description:
- - Used with C(state=present). If set, line can contain backreferences (both positional and named) that will get populated if the C(regexp) matches. This flag changes the operation of the module slightly; C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp) doesn't match anywhere in the file, the file will be left unchanged.
+ - Used with C(state=present). If set, line can contain backreferences (both positional and named) that will get populated if the C(regexp)
+ matches. This flag changes the operation of the module slightly; C(insertbefore) and C(insertafter) will be ignored, and if the C(regexp)
+ doesn't match anywhere in the file, the file will be left unchanged.
- If the C(regexp) does match, the last matching line will be replaced by the expanded line parameter.
insertafter:
required: false
default: EOF
description:
- - Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is available; C(EOF) for inserting the line at the end of the file.
+ - Used with C(state=present). If specified, the line will be inserted after the last match of specified regular expression. A special value is
+ available; C(EOF) for inserting the line at the end of the file.
- If specified regular expression has no matches, EOF will be used instead. May not be used with C(backrefs).
choices: [ 'EOF', '*regex*' ]
insertbefore:
required: false
description:
- - Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available; C(BOF) for inserting the line at the beginning of the file.
+ - Used with C(state=present). If specified, the line will be inserted before the last match of specified regular expression. A value is available;
+ C(BOF) for inserting the line at the beginning of the file.
- If specified regular expression has no matches, the line will be inserted at the end of the file. May not be used with C(backrefs).
choices: [ 'BOF', '*regex*' ]
create:
@@ -94,13 +102,19 @@ options:
required: false
default: "auto"
description:
- - Specifies the encoding of the source text file to operate on (and thus what the output encoding will be). The default of C(auto) will cause the module to auto-detect the encoding of the source file and ensure that the modified file is written with the same encoding.
- - "An explicit encoding can be passed as a string that is a valid value to pass to the .NET framework System.Text.Encoding.GetEncoding() method - see U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx)."
- - This is mostly useful with C(create=yes) if you want to create a new file with a specific encoding. If C(create=yes) is specified without a specific encoding, the default encoding (UTF-8, no BOM) will be used.
+ - Specifies the encoding of the source text file to operate on (and thus what the output encoding will be). The default of C(auto) will cause
+ the module to auto-detect the encoding of the source file and ensure that the modified file is written with the same encoding.
+ - >
+ An explicit encoding can be passed as a string that is a valid value to pass to the .NET framework System.Text.Encoding.GetEncoding() method - see
+ U(https://msdn.microsoft.com/en-us/library/system.text.encoding%28v=vs.110%29.aspx).
+ - This is mostly useful with C(create=yes) if you want to create a new file with a specific encoding. If C(create=yes) is specified without a
+ specific encoding, the default encoding (UTF-8, no BOM) will be used.
newline:
required: false
description:
- - "Specifies the line separator style to use for the modified file. This defaults to the windows line separator (C(\r\n)). Note that the indicated line separator will be used for file output regardless of the original line separator that appears in the input file."
+ - >
+ Specifies the line separator style to use for the modified file. This defaults to the windows line separator (C(\r\n)). Note that the indicated
+ line separator will be used for file output regardless of the original line separator that appears in the input file.
choices: [ "windows", "unix" ]
default: "windows"
notes:
diff --git a/lib/ansible/modules/windows/win_nssm.py b/lib/ansible/modules/windows/win_nssm.py
index b9a99d35240..25da4c5258e 100644
--- a/lib/ansible/modules/windows/win_nssm.py
+++ b/lib/ansible/modules/windows/win_nssm.py
@@ -43,7 +43,8 @@ options:
state:
description:
- State of the service on the system
- - Note that NSSM actions like "pause", "continue", "rotate" do not fit the declarative style of ansible, so these should be implemented via the ansible command module
+ - Note that NSSM actions like "pause", "continue", "rotate" do not fit the declarative style of ansible, so these should be implemented via the
+ ansible command module
choices:
- present
- started
@@ -57,7 +58,9 @@ options:
- "Specify this whenever the service may need to be installed (state: present, started, stopped, restarted)"
- "Note that the application name must look like the following, if the directory includes spaces:"
- 'nssm install service "c:\\Program Files\\app.exe\\" "C:\\Path with spaces\\"'
- - "See commit 0b386fc1984ab74ee59b7bed14b7e8f57212c22b in the nssm.git project for more info: U(https://git.nssm.cc/?p=nssm.git;a=commit;h=0b386fc1984ab74ee59b7bed14b7e8f57212c22b)"
+ - >
+ See commit 0b386fc1984ab74ee59b7bed14b7e8f57212c22b in the nssm.git project for more info:
+ U(https://git.nssm.cc/?p=nssm.git;a=commit;h=0b386fc1984ab74ee59b7bed14b7e8f57212c22b)
stdout_file:
description:
- Path to receive output
@@ -84,7 +87,8 @@ options:
- Password to be used for service startup
start_mode:
description:
- - If C(auto) is selected, the service will start at bootup. C(manual) means that the service will start only when another service needs it. C(disabled) means that the service will stay off, regardless if it is needed or not.
+ - If C(auto) is selected, the service will start at bootup. C(manual) means that the service will start only when another service needs it.
+ C(disabled) means that the service will stay off, regardless if it is needed or not.
default: auto
choices:
- auto
diff --git a/lib/ansible/modules/windows/win_package.py b/lib/ansible/modules/windows/win_package.py
index fd0e26b0f8a..4c90500b7c3 100644
--- a/lib/ansible/modules/windows/win_package.py
+++ b/lib/ansible/modules/windows/win_package.py
@@ -34,7 +34,10 @@ author: Trond Hindenes
short_description: Installs/Uninstalls an installable package, either from local file system or url
description:
- Installs or uninstalls a package.
- - 'Optionally uses a product_id to check if the package needs installing. You can find product ids for installed programs in the windows registry either in C(HKLM:Software\Microsoft\Windows\CurrentVersion\Uninstall) or for 32 bit programs C(HKLM:Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall)'
+ - >
+ Optionally uses a product_id to check if the package needs installing. You can find product ids for installed programs in the windows registry
+ either in C(HKLM:Software\Microsoft\Windows\CurrentVersion\Uninstall) or for 32 bit programs
+ C(HKLM:Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall)
options:
path:
description:
@@ -48,7 +51,9 @@ options:
product_id:
description:
- Product id of the installed package (used for checking if already installed)
- - You can find product ids for installed programs in the windows registry either in C(HKLM:Software\Microsoft\Windows\CurrentVersion\Uninstall) or for 32 bit programs C(HKLM:Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall)'
+ - >
+ You can find product ids for installed programs in the windows registry either in C(HKLM:Software\Microsoft\Windows\CurrentVersion\Uninstall)
+ or for 32 bit programs C(HKLM:Software\Wow6432Node\Microsoft\Windows\CurrentVersion\Uninstall)
required: true
aliases: [productid]
arguments:
@@ -67,12 +72,14 @@ options:
aliases: [ensure]
user_name:
description:
- - Username of an account with access to the package if it's located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.
+ - Username of an account with access to the package if it's located on a file share. Only needed if the winrm user doesn't have access to the package.
+ Also specify user_password for this to function properly.
default: null
required: false
user_password:
description:
- - Password of an account with access to the package if it's located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.
+ - Password of an account with access to the package if it's located on a file share. Only needed if the winrm user doesn't have access to the package.
+ Also specify user_name for this to function properly.
default: null
required: false
expected_return_code:
diff --git a/lib/ansible/modules/windows/win_path.py b/lib/ansible/modules/windows/win_path.py
index 6f706f30463..9785e06dab4 100644
--- a/lib/ansible/modules/windows/win_path.py
+++ b/lib/ansible/modules/windows/win_path.py
@@ -40,11 +40,13 @@ options:
elements:
description:
- A single path element, or a list of path elements (ie, directories) to add or remove.
- - When multiple elements are included in the list (and C(state) is C(present)), the elements are guaranteed to appear in the same relative order in the resultant path value.
+ - When multiple elements are included in the list (and C(state) is C(present)), the elements are guaranteed to appear in the same relative order
+ in the resultant path value.
- Variable expansions (eg, C(%VARNAME%)) are allowed, and are stored unexpanded in the target path element.
- Any existing path elements not mentioned in C(elements) are always preserved in their current order.
- New path elements are appended to the path, and existing path elements may be moved closer to the end to satisfy the requested ordering.
- - Paths are compared in a case-insensitive fashion, and trailing backslashes are ignored for comparison purposes. However, note that trailing backslashes in YAML require quotes.
+ - Paths are compared in a case-insensitive fashion, and trailing backslashes are ignored for comparison purposes. However, note that trailing
+ backslashes in YAML require quotes.
required: true
state:
description:
diff --git a/lib/ansible/modules/windows/win_regedit.py b/lib/ansible/modules/windows/win_regedit.py
index 6515ae828a3..81ef98b8453 100644
--- a/lib/ansible/modules/windows/win_regedit.py
+++ b/lib/ansible/modules/windows/win_regedit.py
@@ -50,7 +50,9 @@ options:
data:
description:
- Value of the registry entry C(name) in C(path).
- - Binary data should be expressed a yaml byte array or as comma separated hex values. An easy way to generate this is to run C(regedit.exe) and use the I(Export) option to save the registry values to a file. In the exported file binary values will look like C(hex:be,ef,be,ef). The C(hex:) prefix is optional.
+ - Binary data should be expressed a yaml byte array or as comma separated hex values. An easy way to generate this is to run C(regedit.exe) and
+ use the I(Export) option to save the registry values to a file. In the exported file binary values will look like C(hex:be,ef,be,ef).
+ The C(hex:) prefix is optional.
type:
description:
- Registry value data type.
@@ -71,7 +73,8 @@ options:
- absent
default: present
notes:
-- Check-mode C(-C/--check) and diff output (-D/--diff) are supported, so that you can test every change against the active configuration before applying changes.
+- Check-mode C(-C/--check) and diff output (-D/--diff) are supported, so that you can test every change against the active configuration before
+ applying changes.
- Beware that some registry hives (HKEY_USERS in particular) do not allow to create new registry paths.
author: "Adam Keech (@smadam813), Josh Ludwig (@joshludwig)"
'''
diff --git a/lib/ansible/modules/windows/win_regmerge.py b/lib/ansible/modules/windows/win_regmerge.py
index ca6b9f88755..25c7a1b104a 100644
--- a/lib/ansible/modules/windows/win_regmerge.py
+++ b/lib/ansible/modules/windows/win_regmerge.py
@@ -34,7 +34,8 @@ short_description: Merges the contents of a registry file into the windows regis
description:
- Wraps the reg.exe command to import the contents of a registry file.
- Suitable for use with registry files created using M(win_template).
- - Windows registry files have a specific format and must be constructed correctly with carriage return and line feed line endings otherwise they will not be merged.
+ - Windows registry files have a specific format and must be constructed correctly with carriage return and line feed line endings otherwise they will not
+ be merged.
- Exported registry files often start with a Byte Order Mark which must be removed if the file is to templated using M(win_template).
- Registry file format is described at U(https://support.microsoft.com/en-us/kb/310516)
- See also M(win_template), M(win_regedit)
@@ -46,7 +47,8 @@ options:
default: no default
compare_key:
description:
- - The parent key to use when comparing the contents of the registry to the contents of the file. Needs to be in HKLM or HKCU part of registry. Use a PS-Drive style path for example HKLM:\SOFTWARE not HKEY_LOCAL_MACHINE\SOFTWARE
+ - The parent key to use when comparing the contents of the registry to the contents of the file. Needs to be in HKLM or HKCU part of registry.
+ Use a PS-Drive style path for example HKLM:\SOFTWARE not HKEY_LOCAL_MACHINE\SOFTWARE
If not supplied, or the registry key is not found, no comparison will be made, and the module will report changed.
required: false
default: no default
diff --git a/lib/ansible/modules/windows/win_robocopy.py b/lib/ansible/modules/windows/win_robocopy.py
index e584d6f222a..76028bb374b 100644
--- a/lib/ansible/modules/windows/win_robocopy.py
+++ b/lib/ansible/modules/windows/win_robocopy.py
@@ -32,7 +32,8 @@ module: win_robocopy
version_added: "2.2"
short_description: Synchronizes the contents of two directories using Robocopy.
description:
- - Synchronizes the contents of two directories on the remote machine. Under the hood this just calls out to RoboCopy, since that should be available on most modern Windows Systems.
+ - Synchronizes the contents of two directories on the remote machine. Under the hood this just calls out to RoboCopy, since that should be available
+ on most modern Windows Systems.
options:
src:
description:
@@ -52,7 +53,8 @@ options:
required: false
purge:
description:
- - Deletes any files/directories found in the destination that do not exist in the source (Toggles the `/purge` flag to RoboCopy). If "flags" is set, this will be ignored.
+ - Deletes any files/directories found in the destination that do not exist in the source (Toggles the `/purge` flag to RoboCopy). If "flags" is
+ set, this will be ignored.
choices:
- true
- false
@@ -65,7 +67,8 @@ options:
required: false
author: Corwin Brown (@blakfeld)
notes:
- - This is not a complete port of the "synchronize" module. Unlike the "synchronize" module this only performs the sync/copy on the remote machine, not from the master to the remote machine.
+ - This is not a complete port of the "synchronize" module. Unlike the "synchronize" module this only performs the sync/copy on the remote machine,
+ not from the master to the remote machine.
- This module does not currently support all Robocopy flags.
- Works on Windows 7, Windows 8, Windows Server 2k8, and Windows Server 2k12
'''
@@ -137,7 +140,7 @@ output:
description: The output of running the robocopy command.
returned: success
type: string
- sample: "-------------------------------------------------------------------------------\n ROBOCOPY :: Robust File Copy for Windows \n-------------------------------------------------------------------------------\n"
+ sample: "----------------------------------------\n ROBOCOPY :: Robust File Copy for Windows \n----------------------------------------\n"
msg:
description: Output intrepreted into a concise message.
returned: always
diff --git a/lib/ansible/modules/windows/win_say.py b/lib/ansible/modules/windows/win_say.py
index cf4d464998b..5cf59ead4fb 100644
--- a/lib/ansible/modules/windows/win_say.py
+++ b/lib/ansible/modules/windows/win_say.py
@@ -32,7 +32,8 @@ module: win_say
version_added: "2.3"
short_description: Text to speech module for Windows to speak messages and optionally play sounds
description:
- - Uses .NET libraries to convert text to speech and optionally play .wav sounds. Audio Service needs to be running and some kind of speakers or headphones need to be attached to the windows target(s) for the speech to be audible.
+ - Uses .NET libraries to convert text to speech and optionally play .wav sounds. Audio Service needs to be running and some kind of speakers or
+ headphones need to be attached to the windows target(s) for the speech to be audible.
options:
msg:
description:
@@ -41,12 +42,14 @@ options:
default: none
msg_file:
description:
- - Full path to a windows format text file containing the text to be spokend. Use either msg or msg_file. Optional so that you can use this module just to play sounds.
+ - Full path to a windows format text file containing the text to be spokend. Use either msg or msg_file. Optional so that you can use this module
+ just to play sounds.
required: false
default: none
voice:
description:
- - Which voice to use. See notes for how to discover installed voices. If the requested voice is not available the default voice will be used. Example voice names from Windows 10 are 'Microsoft Zira Desktop' and 'Microsoft Hazel Desktop'.
+ - Which voice to use. See notes for how to discover installed voices. If the requested voice is not available the default voice will be used.
+ Example voice names from Windows 10 are 'Microsoft Zira Desktop' and 'Microsoft Hazel Desktop'.
required: false
default: system default voice
speech_speed:
@@ -56,12 +59,14 @@ options:
default: 0
start_sound_path:
description:
- - Full path to a C(.wav) file containing a sound to play before the text is spoken. Useful on conference calls to alert other speakers that ansible has something to say.
+ - Full path to a C(.wav) file containing a sound to play before the text is spoken. Useful on conference calls to alert other speakers that ansible
+ has something to say.
required: false
default: null
end_sound_path:
description:
- - Full path to a C(.wav) file containing a sound to play after the text has been spoken. Useful on conference calls to alert other speakers that ansible has finished speaking.
+ - Full path to a C(.wav) file containing a sound to play after the text has been spoken. Useful on conference calls to alert other speakers that
+ ansible has finished speaking.
required: false
default: null
author: "Jon Hawkesworth (@jhawkesworth)"
@@ -112,4 +117,3 @@ voice_info:
type: string
sample: Could not load voice TestVoice, using system default voice
'''
-
diff --git a/lib/ansible/modules/windows/win_shell.py b/lib/ansible/modules/windows/win_shell.py
index 61183d18246..7c84c263977 100644
--- a/lib/ansible/modules/windows/win_shell.py
+++ b/lib/ansible/modules/windows/win_shell.py
@@ -50,14 +50,15 @@ options:
- set the specified path as the current working directory before executing a command
executable:
description:
- - change the shell used to execute the command (eg, C(cmd)). The target shell must accept a C(/c) parameter followed by the raw command line to be executed.
+ - change the shell used to execute the command (eg, C(cmd)). The target shell must accept a C(/c) parameter followed by the raw command line to be
+ executed.
notes:
- If you want to run an executable securely and predictably, it may be
better to use the M(win_command) module instead. Best practices when writing
playbooks will follow the trend of using M(win_command) unless C(win_shell) is
explicitly required. When running ad-hoc commands, use your best judgement.
- - WinRM will not return from a command execution until all child processes created have exited. Thus, it is not possible to use win_shell to spawn long-running child or background processes.
- Consider creating a Windows service for managing background processes.
+ - WinRM will not return from a command execution until all child processes created have exited. Thus, it is not possible to use win_shell to spawn
+ long-running child or background processes. Consider creating a Windows service for managing background processes.
author:
- Matt Davis
'''
diff --git a/lib/ansible/modules/windows/win_unzip.py b/lib/ansible/modules/windows/win_unzip.py
index 53919540772..f720d6c78dc 100644
--- a/lib/ansible/modules/windows/win_unzip.py
+++ b/lib/ansible/modules/windows/win_unzip.py
@@ -72,7 +72,9 @@ options:
required: no
default: null
notes:
-- For extracting any compression types other than .zip, the PowerShellCommunityExtensions (PSCX) Module is required. This module (in conjunction with PSCX) has the ability to recursively unzip files within the src zip file provided and also functionality for many other compression types. If the destination directory does not exist, it will be created before unzipping the file. Specifying rm parameter will force removal of the src file after extraction.
+- For extracting any compression types other than .zip, the PowerShellCommunityExtensions (PSCX) Module is required. This module (in conjunction with PSCX)
+ has the ability to recursively unzip files within the src zip file provided and also functionality for many other compression types. If the destination
+ directory does not exist, it will be created before unzipping the file. Specifying rm parameter will force removal of the src file after extraction.
author: Phil Schwartz
'''
diff --git a/lib/ansible/modules/windows/win_updates.py b/lib/ansible/modules/windows/win_updates.py
index 65e505d1886..fe154c217c3 100644
--- a/lib/ansible/modules/windows/win_updates.py
+++ b/lib/ansible/modules/windows/win_updates.py
@@ -70,7 +70,8 @@ notes:
- win_updates must be run by a user with membership in the local Administrators group
- win_updates will use the default update service configured for the machine (Windows Update, Microsoft Update, WSUS, etc)
- win_updates does not manage reboots, but will signal when a reboot is required with the reboot_required return value.
-- win_updates can take a significant amount of time to complete (hours, in some cases). Performance depends on many factors, including OS version, number of updates, system load, and update server load.
+- win_updates can take a significant amount of time to complete (hours, in some cases). Performance depends on many factors, including OS version, number of
+ updates, system load, and update server load.
'''
EXAMPLES = r'''
diff --git a/lib/ansible/modules/windows/win_uri.py b/lib/ansible/modules/windows/win_uri.py
index a1216b96887..6cfc2b61738 100644
--- a/lib/ansible/modules/windows/win_uri.py
+++ b/lib/ansible/modules/windows/win_uri.py
@@ -68,7 +68,9 @@ options:
- 'Key Value pairs for headers. Example "Host: www.somesite.com"'
use_basic_parsing:
description:
- - This module relies upon 'Invoke-WebRequest', which by default uses the Internet Explorer Engine to parse a webpage. There's an edge-case where if a user hasn't run IE before, this will fail. The only advantage to using the Internet Explorer praser is that you can traverse the DOM in a powershell script. That isn't useful for Ansible, so by default we toggle 'UseBasicParsing'. However, you can toggle that off here.
+ - This module relies upon 'Invoke-WebRequest', which by default uses the Internet Explorer Engine to parse a webpage. There's an edge-case where if a
+ user hasn't run IE before, this will fail. The only advantage to using the Internet Explorer praser is that you can traverse the DOM in a
+ powershell script. That isn't useful for Ansible, so by default we toggle 'UseBasicParsing'. However, you can toggle that off here.
choices:
- True
- False
@@ -143,7 +145,7 @@ raw_content:
description: The raw content of the HTTP response.
returned: success
type: string
- sample: 'HTTP/1.1 200 OK\nX-XSS-Protection: 1; mode=block\nX-Frame-Options: SAMEORIGIN\nAlternate-Protocol: 443:quic,p=1\nAlt-Svc: quic="www.google.com:443"; ma=2592000; v="30,29,28,27,26,25",quic=":443"; ma=2...'
+ sample: 'HTTP/1.1 200 OK\nX-XSS-Protection: 1; mode=block\nAlternate-Protocol: 443:quic,p=1\nAlt-Svc: quic="www.google.com:443";'
headers:
description: The Headers of the response.
returned: success
diff --git a/lib/ansible/modules/windows/win_webpicmd.py b/lib/ansible/modules/windows/win_webpicmd.py
index 6ff76de1a7f..7758de6bd71 100644
--- a/lib/ansible/modules/windows/win_webpicmd.py
+++ b/lib/ansible/modules/windows/win_webpicmd.py
@@ -32,7 +32,8 @@ module: win_webpicmd
version_added: "2.0"
short_description: Installs packages using Web Platform Installer command-line
description:
- - Installs packages using Web Platform Installer command-line (http://www.iis.net/learn/install/web-platform-installer/web-platform-installer-v4-command-line-webpicmdexe-rtw-release).
+ - Installs packages using Web Platform Installer command-line
+ (http://www.iis.net/learn/install/web-platform-installer/web-platform-installer-v4-command-line-webpicmdexe-rtw-release).
- Must be installed and present in PATH (see win_chocolatey module; 'webpicmd' is the package name, and you must install 'lessmsi' first too)
- Install IIS first (see win_feature module)
notes:
diff --git a/lib/ansible/parsing/mod_args.py b/lib/ansible/parsing/mod_args.py
index 73f659658c2..299554197ae 100644
--- a/lib/ansible/parsing/mod_args.py
+++ b/lib/ansible/parsing/mod_args.py
@@ -140,7 +140,8 @@ class ModuleArgsParser:
if templar._contains_vars(additional_args):
final_args['_variable_params'] = additional_args
else:
- raise AnsibleParserError("Complex args containing variables cannot use bare variables, and must use the full variable style ('{{var_name}}')")
+ raise AnsibleParserError("Complex args containing variables cannot use bare variables, and must use the full variable style "
+ "('{{var_name}}')")
elif isinstance(additional_args, dict):
final_args.update(additional_args)
else:
@@ -301,14 +302,17 @@ class ModuleArgsParser:
obj=self._task_ds)
else:
- raise AnsibleParserError("no action detected in task. This often indicates a misspelled module name, or incorrect module path.", obj=self._task_ds)
+ raise AnsibleParserError("no action detected in task. This often indicates a misspelled module name, or incorrect module path.",
+ obj=self._task_ds)
elif args.get('_raw_params', '') != '' and action not in RAW_PARAM_MODULES:
templar = Templar(loader=None)
raw_params = args.pop('_raw_params')
if templar._contains_vars(raw_params):
args['_variable_params'] = raw_params
else:
- raise AnsibleParserError("this task '%s' has extra params, which is only allowed in the following modules: %s" % (action, ", ".join(RAW_PARAM_MODULES)), obj=self._task_ds)
+ raise AnsibleParserError("this task '%s' has extra params, which is only allowed in the following modules: %s" % (action,
+ ", ".join(RAW_PARAM_MODULES)),
+ obj=self._task_ds)
# shell modules require special handling
(action, args) = self._handle_shell_weirdness(action, args)
diff --git a/lib/ansible/playbook/attribute.py b/lib/ansible/playbook/attribute.py
index 986e85ce4d0..a338dc834fe 100644
--- a/lib/ansible/playbook/attribute.py
+++ b/lib/ansible/playbook/attribute.py
@@ -23,7 +23,8 @@ from copy import deepcopy
class Attribute:
- def __init__(self, isa=None, private=False, default=None, required=False, listof=None, priority=0, class_type=None, always_post_validate=False, inherit=True):
+ def __init__(self, isa=None, private=False, default=None, required=False, listof=None, priority=0, class_type=None, always_post_validate=False,
+ inherit=True):
"""
:class:`Attribute` specifies constraints for attributes of objects which
derive from playbook data. The attributes of the object are basically
diff --git a/lib/ansible/playbook/role/__init__.py b/lib/ansible/playbook/role/__init__.py
index ab2a657d62e..5400a081192 100644
--- a/lib/ansible/playbook/role/__init__.py
+++ b/lib/ansible/playbook/role/__init__.py
@@ -162,7 +162,8 @@ class Role(Base, Become, Conditional, Taggable):
return r
except RuntimeError:
- raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles", obj=role_include._ds)
+ raise AnsibleError("A recursion loop was detected with the roles specified. Make sure child roles do not have dependencies on parent roles",
+ obj=role_include._ds)
def _load_role_data(self, role_include, parent_role=None):
self._role_name = role_include.role
@@ -213,7 +214,8 @@ class Role(Base, Become, Conditional, Taggable):
handler_data = self._load_role_yaml('handlers')
if handler_data:
try:
- self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader, variable_manager=self._variable_manager)
+ self._handler_blocks = load_list_of_blocks(handler_data, play=self._play, role=self, use_handlers=True, loader=self._loader,
+ variable_manager=self._variable_manager)
except AssertionError:
raise AnsibleParserError("The handlers/main.yml file for role '%s' must contain a list of tasks" % self._role_name , obj=handler_data)
diff --git a/lib/ansible/playbook/role/metadata.py b/lib/ansible/playbook/role/metadata.py
index 0a77849490f..a2009c6f4b0 100644
--- a/lib/ansible/playbook/role/metadata.py
+++ b/lib/ansible/playbook/role/metadata.py
@@ -88,7 +88,8 @@ class RoleMetadata(Base):
current_role_path = os.path.dirname(self._owner._role_path)
try:
- return load_list_of_roles(roles, play=self._owner._play, current_role_path=current_role_path, variable_manager=self._variable_manager, loader=self._loader)
+ return load_list_of_roles(roles, play=self._owner._play, current_role_path=current_role_path, variable_manager=self._variable_manager,
+ loader=self._loader)
except AssertionError:
raise AnsibleParserError("A malformed list of role dependencies was encountered.", obj=self._ds)
diff --git a/lib/ansible/plugins/action/__init__.py b/lib/ansible/plugins/action/__init__.py
index 0e902797cfd..8f31c4c28be 100644
--- a/lib/ansible/plugins/action/__init__.py
+++ b/lib/ansible/plugins/action/__init__.py
@@ -482,7 +482,8 @@ class ActionBase(with_metaclass(ABCMeta, object)):
get_checksum=checksum,
checksum_algo='sha1',
)
- mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars, tmp=tmp, delete_remote_tmp=(tmp is None), wrap_async=False)
+ mystat = self._execute_module(module_name='stat', module_args=module_args, task_vars=all_vars, tmp=tmp, delete_remote_tmp=(tmp is None),
+ wrap_async=False)
if mystat.get('failed'):
msg = mystat.get('module_stderr')
@@ -674,7 +675,8 @@ class ActionBase(with_metaclass(ABCMeta, object)):
if wrap_async:
# configure, upload, and chmod the async_wrapper module
- (async_module_style, shebang, async_module_data, async_module_path) = self._configure_module(module_name='async_wrapper', module_args=dict(), task_vars=task_vars)
+ (async_module_style, shebang, async_module_data, async_module_path) = self._configure_module(module_name='async_wrapper', module_args=dict(),
+ task_vars=task_vars)
async_module_remote_filename = self._connection._shell.get_remote_filename(async_module_path)
remote_async_module_path = self._connection._shell.join_path(tmp, async_module_remote_filename)
self._transfer_data(remote_async_module_path, async_module_data)
diff --git a/lib/ansible/plugins/action/set_fact.py b/lib/ansible/plugins/action/set_fact.py
index b16927b3f2a..7bd7ec6455b 100644
--- a/lib/ansible/plugins/action/set_fact.py
+++ b/lib/ansible/plugins/action/set_fact.py
@@ -41,7 +41,8 @@ class ActionModule(ActionBase):
if not isidentifier(k):
result['failed'] = True
- result['msg'] = "The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only letters, numbers and underscores." % k
+ result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only "
+ "letters, numbers and underscores." % k)
return result
if isinstance(v, string_types) and v.lower() in ('true', 'false', 'yes', 'no'):
diff --git a/lib/ansible/plugins/action/set_stats.py b/lib/ansible/plugins/action/set_stats.py
index 05366a8a164..13f866539ec 100644
--- a/lib/ansible/plugins/action/set_stats.py
+++ b/lib/ansible/plugins/action/set_stats.py
@@ -62,7 +62,8 @@ class ActionModule(ActionBase):
if not isidentifier(k):
result['failed'] = True
- result['msg'] = "The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only letters, numbers and underscores." % k
+ result['msg'] = ("The variable name '%s' is not valid. Variables must start with a letter or underscore character, and contain only "
+ "letters, numbers and underscores." % k)
return result
stats['data'][k] = self._templar.template(v)
diff --git a/lib/ansible/plugins/action/synchronize.py b/lib/ansible/plugins/action/synchronize.py
index 61d78571ca7..04eef304d87 100644
--- a/lib/ansible/plugins/action/synchronize.py
+++ b/lib/ansible/plugins/action/synchronize.py
@@ -191,7 +191,8 @@ class ActionModule(ActionBase):
# else only works with delegate_to
if delegate_to is None and self._connection.transport not in ('ssh', 'paramiko', 'local', 'docker'):
result['failed'] = True
- result['msg'] = "synchronize uses rsync to function. rsync needs to connect to the remote host via ssh, docker client or a direct filesystem copy. This remote host is being accessed via %s instead so it cannot work." % self._connection.transport
+ result['msg'] = ("synchronize uses rsync to function. rsync needs to connect to the remote host via ssh, docker client or a direct filesystem "
+ "copy. This remote host is being accessed via %s instead so it cannot work." % self._connection.transport)
return result
use_ssh_args = _tmp_args.pop('use_ssh_args', None)
@@ -382,5 +383,6 @@ class ActionModule(ActionBase):
# Emit a warning about using python3 because synchronize is
# somewhat unique in running on localhost
result['exception'] = result['msg']
- result['msg'] = 'SyntaxError parsing module. Perhaps invoking "python" on your local (or delegate_to) machine invokes python3. You can set ansible_python_interpreter for localhost (or the delegate_to machine) to the location of python2 to fix this'
+ result['msg'] = ('SyntaxError parsing module. Perhaps invoking "python" on your local (or delegate_to) machine invokes python3. '
+ 'You can set ansible_python_interpreter for localhost (or the delegate_to machine) to the location of python2 to fix this')
return result
diff --git a/lib/ansible/plugins/callback/default.py b/lib/ansible/plugins/callback/default.py
index 779d3f4f153..64079caf711 100644
--- a/lib/ansible/plugins/callback/default.py
+++ b/lib/ansible/plugins/callback/default.py
@@ -54,7 +54,9 @@ class CallbackModule(CallbackBase):
else:
if delegated_vars:
- self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR)
+ self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
+ self._dump_results(result._result)),
+ color=C.COLOR_ERROR)
else:
self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR)
@@ -114,7 +116,9 @@ class CallbackModule(CallbackBase):
delegated_vars = result._result.get('_ansible_delegated_vars', None)
if delegated_vars:
- self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
+ self._display.display("fatal: [%s -> %s]: UNREACHABLE! => %s" % (result._host.get_name(), delegated_vars['ansible_host'],
+ self._dump_results(result._result)),
+ color=C.COLOR_UNREACHABLE)
else:
self._display.display("fatal: [%s]: UNREACHABLE! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_UNREACHABLE)
diff --git a/lib/ansible/plugins/callback/logentries.py b/lib/ansible/plugins/callback/logentries.py
index d12d248900e..83af727350c 100644
--- a/lib/ansible/plugins/callback/logentries.py
+++ b/lib/ansible/plugins/callback/logentries.py
@@ -254,7 +254,8 @@ class CallbackModule(CallbackBase):
self.token = os.getenv('LOGENTRIES_ANSIBLE_TOKEN')
if self.token is None:
self.disabled = True
- self._display.warning('Logentries token could not be loaded. The logentries token can be provided using the `LOGENTRIES_TOKEN` environment variable')
+ self._display.warning('Logentries token could not be loaded. The logentries token can be provided using the `LOGENTRIES_TOKEN` environment '
+ 'variable')
self.flatten = os.getenv('LOGENTRIES_FLATTEN')
if self.flatten is None:
diff --git a/lib/ansible/plugins/callback/oneline.py b/lib/ansible/plugins/callback/oneline.py
index 12ff2aeb650..4930edf7219 100644
--- a/lib/ansible/plugins/callback/oneline.py
+++ b/lib/ansible/plugins/callback/oneline.py
@@ -56,13 +56,15 @@ class CallbackModule(CallbackBase):
else:
self._display.display(msg, color=C.COLOR_ERROR)
- self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_ERROR)
+ self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')),
+ color=C.COLOR_ERROR)
def v2_runner_on_ok(self, result):
if result._task.action in C.MODULE_NO_JSON:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color=C.COLOR_OK)
else:
- self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_OK)
+ self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')),
+ color=C.COLOR_OK)
def v2_runner_on_unreachable(self, result):
diff --git a/lib/ansible/plugins/connection/accelerate.py b/lib/ansible/plugins/connection/accelerate.py
index 968304ffa5d..7dd578c2e52 100644
--- a/lib/ansible/plugins/connection/accelerate.py
+++ b/lib/ansible/plugins/connection/accelerate.py
@@ -69,7 +69,8 @@ class Connection(ConnectionBase):
tries = 3
self.conn = socket.socket()
self.conn.settimeout(C.ACCELERATE_CONNECT_TIMEOUT)
- display.vvvv("attempting connection to %s via the accelerated port %d" % (self._play_context.remote_addr, self._play_context.accelerate_port), host=self._play_context.remote_addr)
+ display.vvvv("attempting connection to %s via the accelerated port %d" % (self._play_context.remote_addr, self._play_context.accelerate_port),
+ host=self._play_context.remote_addr)
while tries > 0:
try:
self.conn.connect((self._play_context.remote_addr,self._play_context.accelerate_port))
@@ -80,7 +81,8 @@ class Connection(ConnectionBase):
tries -= 1
if tries == 0:
display.vvv("Could not connect via the accelerated connection, exceeded # of tries", host=self._play_context.remote_addr)
- raise AnsibleConnectionFailure("Failed to connect to %s on the accelerated port %s" % (self._play_context.remote_addr, self._play_context.accelerate_port))
+ raise AnsibleConnectionFailure("Failed to connect to %s on the accelerated port %s" % (self._play_context.remote_addr,
+ self._play_context.accelerate_port))
elif wrong_user:
display.vvv("Restarting daemon with a different remote_user", host=self._play_context.remote_addr)
raise AnsibleError("The accelerated daemon was started on the remote with a different user")
diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py
index 790d1a22c14..d965dff1210 100644
--- a/lib/ansible/plugins/connection/paramiko_ssh.py
+++ b/lib/ansible/plugins/connection/paramiko_ssh.py
@@ -204,7 +204,8 @@ class Connection(ConnectionBase):
raise AnsibleError("paramiko is not installed")
port = self._play_context.port or 22
- display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr), host=self._play_context.remote_addr)
+ display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr),
+ host=self._play_context.remote_addr)
ssh = paramiko.SSHClient()
diff --git a/lib/ansible/plugins/connection/ssh.py b/lib/ansible/plugins/connection/ssh.py
index 6ad68957eeb..6fb2f412d84 100644
--- a/lib/ansible/plugins/connection/ssh.py
+++ b/lib/ansible/plugins/connection/ssh.py
@@ -657,11 +657,13 @@ class Connection(ConnectionBase):
if C.HOST_KEY_CHECKING:
if cmd[0] == b"sshpass" and p.returncode == 6:
- raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
+ raise AnsibleError('Using a SSH password instead of a key is not possible because Host Key checking is enabled and sshpass does not support '
+ 'this. Please add this host\'s fingerprint to your known_hosts file to manage this host.')
controlpersisterror = b'Bad configuration option: ControlPersist' in b_stderr or b'unknown configuration option: ControlPersist' in b_stderr
if p.returncode != 0 and controlpersisterror:
- raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" (or ssh_args in [ssh_connection] section of the config file) before running again')
+ raise AnsibleError('using -c ssh on certain older ssh versions may not support ControlPersist, set ANSIBLE_SSH_ARGS="" '
+ '(or ssh_args in [ssh_connection] section of the config file) before running again')
if p.returncode == 255 and in_data and checkrc:
raise AnsibleConnectionFailure('SSH Error: data could not be sent to remote host "%s". Make sure this host can be reached over ssh' % self.host)
diff --git a/lib/ansible/plugins/connection/winrm.py b/lib/ansible/plugins/connection/winrm.py
index 973b351a4a0..4157cf82a7e 100644
--- a/lib/ansible/plugins/connection/winrm.py
+++ b/lib/ansible/plugins/connection/winrm.py
@@ -260,7 +260,8 @@ class Connection(ConnectionBase):
stdin_push_failed = False
command_id = self.protocol.run_command(self.shell_id, to_bytes(command), map(to_bytes, args), console_mode_stdin=(stdin_iterator is None))
- # TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that comes from this)
+ # TODO: try/except around this, so we can get/return the command result on a broken pipe or other failure (probably more useful than the 500 that
+ # comes from this)
try:
if stdin_iterator:
for (data, is_last) in stdin_iterator:
diff --git a/lib/ansible/plugins/lookup/first_found.py b/lib/ansible/plugins/lookup/first_found.py
index 8d495241aef..70095aa7241 100644
--- a/lib/ansible/plugins/lookup/first_found.py
+++ b/lib/ansible/plugins/lookup/first_found.py
@@ -188,5 +188,6 @@ class LookupModule(LookupBase):
if skip:
return []
else:
- raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no files are found")
+ raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no "
+ "files are found")
diff --git a/lib/ansible/plugins/shell/fish.py b/lib/ansible/plugins/shell/fish.py
index c8aa148b5c0..c5746782d32 100644
--- a/lib/ansible/plugins/shell/fish.py
+++ b/lib/ansible/plugins/shell/fish.py
@@ -84,10 +84,10 @@ class ShellModule(ShModule):
# used by a variety of shells on the remote host to invoke a python
# "one-liner".
shell_escaped_path = shlex_quote(path)
- test = "set rc flag; [ -r %(p)s ] %(shell_or)s set rc 2; [ -f %(p)s ] %(shell_or)s set rc 1; [ -d %(p)s ] %(shell_and)s set rc 3; %(i)s -V 2>/dev/null %(shell_or)s set rc 4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"$rc \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR)
+ test = "set rc flag; [ -r %(p)s ] %(shell_or)s set rc 2; [ -f %(p)s ] %(shell_or)s set rc 1; [ -d %(p)s ] %(shell_and)s set rc 3; %(i)s -V 2>/dev/null %(shell_or)s set rc 4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"$rc \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) # NOQA
csums = [
- u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3)
- u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4
+ u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python > 2.4 (including python3)
+ u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python == 2.4
]
cmd = (" %s " % self._SHELL_OR).join(csums)
diff --git a/lib/ansible/plugins/shell/sh.py b/lib/ansible/plugins/shell/sh.py
index a12c66c0520..e3d3f8f57f6 100644
--- a/lib/ansible/plugins/shell/sh.py
+++ b/lib/ansible/plugins/shell/sh.py
@@ -72,10 +72,10 @@ class ShellModule(ShellBase):
# used by a variety of shells on the remote host to invoke a python
# "one-liner".
shell_escaped_path = shlex_quote(path)
- test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR)
+ test = "rc=flag; [ -r %(p)s ] %(shell_or)s rc=2; [ -f %(p)s ] %(shell_or)s rc=1; [ -d %(p)s ] %(shell_and)s rc=3; %(i)s -V 2>/dev/null %(shell_or)s rc=4; [ x\"$rc\" != \"xflag\" ] %(shell_and)s echo \"${rc} \"%(p)s %(shell_and)s exit 0" % dict(p=shell_escaped_path, i=python_interp, shell_and=self._SHELL_AND, shell_or=self._SHELL_OR) # NOQA
csums = [
- u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3)
- u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4
+ u"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python > 2.4 (including python3)
+ u"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # NOQA Python == 2.4
]
cmd = (" %s " % self._SHELL_OR).join(csums)
diff --git a/lib/ansible/plugins/strategy/__init__.py b/lib/ansible/plugins/strategy/__init__.py
index fd1b04e59d5..b0b876df60f 100644
--- a/lib/ansible/plugins/strategy/__init__.py
+++ b/lib/ansible/plugins/strategy/__init__.py
@@ -468,7 +468,8 @@ class StrategyBase:
# and if none were found, then we raise an error
if not found:
- msg = "The requested handler '%s' was not found in either the main handlers list nor in the listening handlers list" % handler_name
+ msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
+ "handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
@@ -693,8 +694,9 @@ class StrategyBase:
tags = tags.split(',')
if len(tags) > 0:
if len(included_file._task.tags) > 0:
- raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
- obj=included_file._task._ds)
+ raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). "
+ "Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
+ obj=included_file._task._ds)
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
included_file._task.tags = tags
diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py
index 12a9197f679..f18962c7667 100644
--- a/lib/ansible/plugins/strategy/linear.py
+++ b/lib/ansible/plugins/strategy/linear.py
@@ -96,7 +96,10 @@ class StrategyModule(StrategyBase):
num_rescue += 1
elif s.run_state == PlayIterator.ITERATING_ALWAYS:
num_always += 1
- display.debug("done counting tasks in each state of execution:\n\tnum_setups: %s\n\tnum_tasks: %s\n\tnum_rescue: %s\n\tnum_always: %s" % (num_setups, num_tasks, num_rescue, num_always))
+ display.debug("done counting tasks in each state of execution:\n\tnum_setups: %s\n\tnum_tasks: %s\n\tnum_rescue: %s\n\tnum_always: %s" % (num_setups,
+ num_tasks,
+ num_rescue,
+ num_always))
def _advance_selected_hosts(hosts, cur_block, cur_state):
'''
diff --git a/lib/ansible/template/__init__.py b/lib/ansible/template/__init__.py
index ab9bc466ee4..61850f6734b 100644
--- a/lib/ansible/template/__init__.py
+++ b/lib/ansible/template/__init__.py
@@ -331,7 +331,8 @@ class Templar:
self._available_variables = variables
self._cached_result = {}
- def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None, convert_data=True, static_vars=[''], cache=True, bare_deprecated=True, disable_lookups=False):
+ def template(self, variable, convert_bare=False, preserve_trailing_newlines=True, escape_backslashes=True, fail_on_undefined=None, overrides=None,
+ convert_data=True, static_vars=[''], cache=True, bare_deprecated=True, disable_lookups=False):
'''
Templates (possibly recursively) any given data as input. If convert_bare is
set to True, the given data will be wrapped as a jinja2 variable ('{{foo}}')
@@ -372,7 +373,14 @@ class Templar:
sha1_hash = None
if cache:
variable_hash = sha1(text_type(variable).encode('utf-8'))
- options_hash = sha1((text_type(preserve_trailing_newlines) + text_type(escape_backslashes) + text_type(fail_on_undefined) + text_type(overrides)).encode('utf-8'))
+ options_hash = sha1(
+ (
+ text_type(preserve_trailing_newlines) +
+ text_type(escape_backslashes) +
+ text_type(fail_on_undefined) +
+ text_type(overrides)
+ ).encode('utf-8')
+ )
sha1_hash = variable_hash.hexdigest() + options_hash.hexdigest()
if cache and sha1_hash in self._cached_result:
result = self._cached_result[sha1_hash]
@@ -507,7 +515,8 @@ class Templar:
raise AnsibleUndefinedVariable(e)
except Exception as e:
if self._fail_on_lookup_errors:
- raise AnsibleError("An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, original message: %s" % (name, type(e), e))
+ raise AnsibleError("An unhandled exception occurred while running the lookup plugin '%s'. Error was a %s, "
+ "original message: %s" % (name, type(e), e))
ran = None
if ran:
diff --git a/lib/ansible/utils/encrypt.py b/lib/ansible/utils/encrypt.py
index ae36879278a..ac6adeeb72a 100644
--- a/lib/ansible/utils/encrypt.py
+++ b/lib/ansible/utils/encrypt.py
@@ -122,7 +122,9 @@ def key_for_hostname(hostname):
raise AnsibleError('ACCELERATE_KEYS_DIR is not a directory.')
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_DIR_PERMS, 8):
- raise AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR, int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
+ raise AnsibleError('Incorrect permissions on the private key directory. Use `chmod 0%o %s` to correct this issue, and make sure any of the keys files '
+ 'contained within that directory are set to 0%o' % (int(C.ACCELERATE_KEYS_DIR_PERMS, 8), C.ACCELERATE_KEYS_DIR,
+ int(C.ACCELERATE_KEYS_FILE_PERMS, 8)))
key_path = os.path.join(key_path, hostname)
@@ -143,7 +145,8 @@ def key_for_hostname(hostname):
return key
if stat.S_IMODE(os.stat(key_path).st_mode) != int(C.ACCELERATE_KEYS_FILE_PERMS, 8):
- raise AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
+ raise AnsibleError('Incorrect permissions on the key file for this host. Use `chmod 0%o %s` to '
+ 'correct this issue.' % (int(C.ACCELERATE_KEYS_FILE_PERMS, 8), key_path))
fh = open(key_path)
key = AesKey.Read(fh.read())
fh.close()
diff --git a/lib/ansible/vars/__init__.py b/lib/ansible/vars/__init__.py
index 157a493d235..06d5edef4ce 100644
--- a/lib/ansible/vars/__init__.py
+++ b/lib/ansible/vars/__init__.py
@@ -331,7 +331,8 @@ class VariableManager:
raise AnsibleFileNotFound("vars file %s was not found" % vars_file_item)
except (UndefinedError, AnsibleUndefinedVariable):
if host is not None and self._fact_cache.get(host.name, dict()).get('module_setup') and task is not None:
- raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item, obj=vars_file_item)
+ raise AnsibleUndefinedVariable("an undefined variable was found when attempting to template the vars_files item '%s'" % vars_file_item,
+ obj=vars_file_item)
else:
# we do not have a full context here, and the missing variable could be
# because of that, so just show a warning and continue
@@ -378,7 +379,8 @@ class VariableManager:
if 'environment' not in all_vars:
all_vars['environment'] = task.environment
else:
- display.warning("The variable 'environment' appears to be used already, which is also used internally for environment variables set on the task/block/play. You should use a different variable name to avoid conflicts with this internal variable")
+ display.warning("The variable 'environment' appears to be used already, which is also used internally for environment variables set on the "
+ "task/block/play. You should use a different variable name to avoid conflicts with this internal variable")
# if we have a task and we're delegating to another host, figure out the
# variables for that host now so we don't have to rely on hostvars later
diff --git a/test/sanity/pep8/legacy-files.txt b/test/sanity/pep8/legacy-files.txt
index a4168885095..e69de29bb2d 100644
--- a/test/sanity/pep8/legacy-files.txt
+++ b/test/sanity/pep8/legacy-files.txt
@@ -1,213 +0,0 @@
-lib/ansible/cli/__init__.py
-lib/ansible/cli/galaxy.py
-lib/ansible/cli/playbook.py
-lib/ansible/constants.py
-lib/ansible/errors/__init__.py
-lib/ansible/executor/play_iterator.py
-lib/ansible/executor/task_executor.py
-lib/ansible/galaxy/role.py
-lib/ansible/inventory/dir.py
-lib/ansible/inventory/script.py
-lib/ansible/module_utils/basic.py
-lib/ansible/module_utils/ec2.py
-lib/ansible/module_utils/facts.py
-lib/ansible/module_utils/mysql.py
-lib/ansible/modules/cloud/amazon/_ec2_vpc.py
-lib/ansible/modules/cloud/amazon/aws_kms.py
-lib/ansible/modules/cloud/amazon/cloudformation.py
-lib/ansible/modules/cloud/amazon/cloudformation_facts.py
-lib/ansible/modules/cloud/amazon/cloudtrail.py
-lib/ansible/modules/cloud/amazon/cloudwatchevent_rule.py
-lib/ansible/modules/cloud/amazon/dynamodb_table.py
-lib/ansible/modules/cloud/amazon/ec2_ami.py
-lib/ansible/modules/cloud/amazon/ec2_ami_find.py
-lib/ansible/modules/cloud/amazon/ec2_asg.py
-lib/ansible/modules/cloud/amazon/ec2_asg_facts.py
-lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py
-lib/ansible/modules/cloud/amazon/ec2_elb.py
-lib/ansible/modules/cloud/amazon/ec2_eni_facts.py
-lib/ansible/modules/cloud/amazon/ec2_lc.py
-lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py
-lib/ansible/modules/cloud/amazon/ec2_remote_facts.py
-lib/ansible/modules/cloud/amazon/ec2_scaling_policy.py
-lib/ansible/modules/cloud/amazon/ec2_snapshot_facts.py
-lib/ansible/modules/cloud/amazon/ec2_tag.py
-lib/ansible/modules/cloud/amazon/ec2_vol_facts.py
-lib/ansible/modules/cloud/amazon/ec2_vpc_dhcp_options_facts.py
-lib/ansible/modules/cloud/amazon/ec2_vpc_net.py
-lib/ansible/modules/cloud/amazon/ec2_vpc_net_facts.py
-lib/ansible/modules/cloud/amazon/ec2_vpc_route_table_facts.py
-lib/ansible/modules/cloud/amazon/ec2_vpc_subnet_facts.py
-lib/ansible/modules/cloud/amazon/ec2_vpc_vgw_facts.py
-lib/ansible/modules/cloud/amazon/ec2_win_password.py
-lib/ansible/modules/cloud/amazon/ecs_service.py
-lib/ansible/modules/cloud/amazon/ecs_service_facts.py
-lib/ansible/modules/cloud/amazon/ecs_taskdefinition.py
-lib/ansible/modules/cloud/amazon/elasticache.py
-lib/ansible/modules/cloud/amazon/iam_cert.py
-lib/ansible/modules/cloud/amazon/iam_policy.py
-lib/ansible/modules/cloud/amazon/iam_role.py
-lib/ansible/modules/cloud/amazon/lambda.py
-lib/ansible/modules/cloud/amazon/rds.py
-lib/ansible/modules/cloud/amazon/rds_param_group.py
-lib/ansible/modules/cloud/amazon/rds_subnet_group.py
-lib/ansible/modules/cloud/amazon/redshift.py
-lib/ansible/modules/cloud/amazon/route53.py
-lib/ansible/modules/cloud/amazon/s3.py
-lib/ansible/modules/cloud/amazon/s3_lifecycle.py
-lib/ansible/modules/cloud/amazon/s3_sync.py
-lib/ansible/modules/cloud/amazon/s3_website.py
-lib/ansible/modules/cloud/amazon/sts_assume_role.py
-lib/ansible/modules/cloud/amazon/sts_session_token.py
-lib/ansible/modules/cloud/azure/azure.py
-lib/ansible/modules/cloud/azure/azure_rm_deployment.py
-lib/ansible/modules/cloud/azure/azure_rm_networkinterface_facts.py
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py
-lib/ansible/modules/cloud/azure/azure_rm_securitygroup_facts.py
-lib/ansible/modules/cloud/azure/azure_rm_subnet.py
-lib/ansible/modules/cloud/azure/azure_rm_virtualmachine.py
-lib/ansible/modules/cloud/cloudscale/cloudscale_server.py
-lib/ansible/modules/cloud/cloudstack/cs_host.py
-lib/ansible/modules/cloud/cloudstack/cs_instance.py
-lib/ansible/modules/cloud/cloudstack/cs_iso.py
-lib/ansible/modules/cloud/digital_ocean/digital_ocean.py
-lib/ansible/modules/cloud/google/gc_storage.py
-lib/ansible/modules/cloud/google/gce_tag.py
-lib/ansible/modules/cloud/google/gcpubsub.py
-lib/ansible/modules/cloud/misc/ovirt.py
-lib/ansible/modules/cloud/misc/proxmox_kvm.py
-lib/ansible/modules/cloud/misc/serverless.py
-lib/ansible/modules/cloud/openstack/_nova_compute.py
-lib/ansible/modules/cloud/packet/packet_device.py
-lib/ansible/modules/cloud/packet/packet_sshkey.py
-lib/ansible/modules/cloud/profitbricks/profitbricks.py
-lib/ansible/modules/cloud/profitbricks/profitbricks_datacenter.py
-lib/ansible/modules/cloud/softlayer/sl_vm.py
-lib/ansible/modules/cloud/vmware/vmware_guest.py
-lib/ansible/modules/cloud/vmware/vsphere_guest.py
-lib/ansible/modules/cloud/webfaction/webfaction_app.py
-lib/ansible/modules/cloud/webfaction/webfaction_db.py
-lib/ansible/modules/cloud/webfaction/webfaction_domain.py
-lib/ansible/modules/cloud/webfaction/webfaction_mailbox.py
-lib/ansible/modules/cloud/webfaction/webfaction_site.py
-lib/ansible/modules/clustering/consul.py
-lib/ansible/modules/commands/script.py
-lib/ansible/modules/database/influxdb/influxdb_retention_policy.py
-lib/ansible/modules/database/mongodb/mongodb_user.py
-lib/ansible/modules/database/mssql/mssql_db.py
-lib/ansible/modules/database/mysql/mysql_db.py
-lib/ansible/modules/database/mysql/mysql_replication.py
-lib/ansible/modules/database/mysql/mysql_user.py
-lib/ansible/modules/database/mysql/mysql_variables.py
-lib/ansible/modules/database/postgresql/postgresql_db.py
-lib/ansible/modules/database/postgresql/postgresql_ext.py
-lib/ansible/modules/database/postgresql/postgresql_privs.py
-lib/ansible/modules/database/postgresql/postgresql_schema.py
-lib/ansible/modules/database/postgresql/postgresql_user.py
-lib/ansible/modules/files/acl.py
-lib/ansible/modules/files/archive.py
-lib/ansible/modules/files/copy.py
-lib/ansible/modules/files/synchronize.py
-lib/ansible/modules/files/tempfile.py
-lib/ansible/modules/files/unarchive.py
-lib/ansible/modules/messaging/rabbitmq_queue.py
-lib/ansible/modules/monitoring/datadog_monitor.py
-lib/ansible/modules/monitoring/logicmonitor.py
-lib/ansible/modules/monitoring/nagios.py
-lib/ansible/modules/monitoring/pagerduty_alert.py
-lib/ansible/modules/monitoring/zabbix_screen.py
-lib/ansible/modules/network/a10/a10_server.py
-lib/ansible/modules/network/a10/a10_service_group.py
-lib/ansible/modules/network/basics/slurp.py
-lib/ansible/modules/network/cloudflare_dns.py
-lib/ansible/modules/network/dnsimple.py
-lib/ansible/modules/network/dnsmadeeasy.py
-lib/ansible/modules/network/f5/bigip_facts.py
-lib/ansible/modules/network/f5/bigip_monitor_tcp.py
-lib/ansible/modules/network/f5/bigip_node.py
-lib/ansible/modules/network/f5/bigip_pool.py
-lib/ansible/modules/network/f5/bigip_pool_member.py
-lib/ansible/modules/network/f5/bigip_virtual_server.py
-lib/ansible/modules/network/haproxy.py
-lib/ansible/modules/network/nmcli.py
-lib/ansible/modules/network/panos/panos_nat_policy.py
-lib/ansible/modules/network/snmp_facts.py
-lib/ansible/modules/notification/hall.py
-lib/ansible/modules/notification/sendgrid.py
-lib/ansible/modules/notification/sns.py
-lib/ansible/modules/packaging/language/maven_artifact.py
-lib/ansible/modules/packaging/os/apt.py
-lib/ansible/modules/packaging/os/apt_key.py
-lib/ansible/modules/packaging/os/dnf.py
-lib/ansible/modules/packaging/os/homebrew.py
-lib/ansible/modules/packaging/os/openbsd_pkg.py
-lib/ansible/modules/packaging/os/opkg.py
-lib/ansible/modules/packaging/os/pacman.py
-lib/ansible/modules/packaging/os/rhn_register.py
-lib/ansible/modules/packaging/os/swdepot.py
-lib/ansible/modules/packaging/os/zypper.py
-lib/ansible/modules/source_control/github_hooks.py
-lib/ansible/modules/storage/netapp/netapp_e_amg.py
-lib/ansible/modules/storage/netapp/netapp_e_amg_sync.py
-lib/ansible/modules/storage/netapp/netapp_e_auth.py
-lib/ansible/modules/storage/netapp/netapp_e_host.py
-lib/ansible/modules/storage/netapp/netapp_e_hostgroup.py
-lib/ansible/modules/storage/netapp/netapp_e_snapshot_group.py
-lib/ansible/modules/storage/netapp/netapp_e_snapshot_volume.py
-lib/ansible/modules/storage/netapp/netapp_e_storage_system.py
-lib/ansible/modules/storage/netapp/netapp_e_storagepool.py
-lib/ansible/modules/system/debconf.py
-lib/ansible/modules/system/firewalld.py
-lib/ansible/modules/system/known_hosts.py
-lib/ansible/modules/system/lvol.py
-lib/ansible/modules/system/pam_limits.py
-lib/ansible/modules/system/solaris_zone.py
-lib/ansible/modules/system/systemd.py
-lib/ansible/modules/utilities/helper/meta.py
-lib/ansible/modules/utilities/logic/include.py
-lib/ansible/modules/utilities/logic/include_role.py
-lib/ansible/modules/utilities/logic/include_vars.py
-lib/ansible/modules/utilities/logic/pause.py
-lib/ansible/modules/utilities/logic/set_fact.py
-lib/ansible/modules/utilities/logic/wait_for.py
-lib/ansible/modules/web_infrastructure/apache2_mod_proxy.py
-lib/ansible/modules/web_infrastructure/django_manage.py
-lib/ansible/modules/windows/win_acl.py
-lib/ansible/modules/windows/win_acl_inheritance.py
-lib/ansible/modules/windows/win_command.py
-lib/ansible/modules/windows/win_feature.py
-lib/ansible/modules/windows/win_lineinfile.py
-lib/ansible/modules/windows/win_nssm.py
-lib/ansible/modules/windows/win_package.py
-lib/ansible/modules/windows/win_path.py
-lib/ansible/modules/windows/win_regedit.py
-lib/ansible/modules/windows/win_regmerge.py
-lib/ansible/modules/windows/win_robocopy.py
-lib/ansible/modules/windows/win_say.py
-lib/ansible/modules/windows/win_shell.py
-lib/ansible/modules/windows/win_unzip.py
-lib/ansible/modules/windows/win_updates.py
-lib/ansible/modules/windows/win_uri.py
-lib/ansible/modules/windows/win_webpicmd.py
-lib/ansible/parsing/mod_args.py
-lib/ansible/playbook/attribute.py
-lib/ansible/playbook/role/__init__.py
-lib/ansible/playbook/role/metadata.py
-lib/ansible/plugins/action/set_fact.py
-lib/ansible/plugins/action/set_stats.py
-lib/ansible/plugins/action/synchronize.py
-lib/ansible/plugins/callback/default.py
-lib/ansible/plugins/callback/logentries.py
-lib/ansible/plugins/callback/oneline.py
-lib/ansible/plugins/connection/accelerate.py
-lib/ansible/plugins/connection/paramiko_ssh.py
-lib/ansible/plugins/connection/ssh.py
-lib/ansible/plugins/connection/winrm.py
-lib/ansible/plugins/lookup/first_found.py
-lib/ansible/plugins/shell/fish.py
-lib/ansible/plugins/shell/sh.py
-lib/ansible/plugins/strategy/__init__.py
-lib/ansible/plugins/strategy/linear.py
-lib/ansible/template/__init__.py
-lib/ansible/utils/encrypt.py
-lib/ansible/vars/__init__.py