diff --git a/contrib/inventory/apstra_aos.py b/contrib/inventory/apstra_aos.py index 2b30b318113..e8752183cd3 100755 --- a/contrib/inventory/apstra_aos.py +++ b/contrib/inventory/apstra_aos.py @@ -466,35 +466,30 @@ class AosInventory(object): except: if 'AOS_SERVER' in os.environ.keys(): self.aos_server = os.environ['AOS_SERVER'] - pass try: self.aos_server_port = config.get('aos', 'port') except: if 'AOS_PORT' in os.environ.keys(): self.aos_server_port = os.environ['AOS_PORT'] - pass try: self.aos_username = config.get('aos', 'username') except: if 'AOS_USERNAME' in os.environ.keys(): self.aos_username = os.environ['AOS_USERNAME'] - pass try: self.aos_password = config.get('aos', 'password') except: if 'AOS_PASSWORD' in os.environ.keys(): self.aos_password = os.environ['AOS_PASSWORD'] - pass try: self.aos_blueprint = config.get('aos', 'blueprint') except: if 'AOS_BLUEPRINT' in os.environ.keys(): self.aos_blueprint = os.environ['AOS_BLUEPRINT'] - pass try: if config.get('aos', 'blueprint_interface') in ['false', 'no']: diff --git a/contrib/inventory/collins.py b/contrib/inventory/collins.py index fb5efe57337..bcba11cc1cb 100755 --- a/contrib/inventory/collins.py +++ b/contrib/inventory/collins.py @@ -212,7 +212,7 @@ class CollinsInventory(object): cur_page += 1 num_retries = 0 except: - self.log.error("Error while communicating with Collins, retrying:\n%s" % traceback.format_exc()) + self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc()) num_retries += 1 return assets @@ -281,7 +281,7 @@ class CollinsInventory(object): try: server_assets = self.find_assets() except: - self.log.error("Error while locating assets from Collins:\n%s" % traceback.format_exc()) + self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc()) return False for asset in server_assets: @@ -305,7 +305,7 @@ class CollinsInventory(object): if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'): asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME') elif 'ADDRESSES' not in asset: - self.log.warning("No IP addresses found for asset '%s', skipping" % asset) + self.log.warning("No IP addresses found for asset '%s', skipping", asset) continue elif len(asset['ADDRESSES']) < ip_index + 1: self.log.warning( diff --git a/lib/ansible/config/manager.py b/lib/ansible/config/manager.py index 2756d7cdc99..dd4be362c97 100644 --- a/lib/ansible/config/manager.py +++ b/lib/ansible/config/manager.py @@ -301,7 +301,7 @@ class ConfigManager(object): except Exception as e: sys.stderr.write("Error while loading ini config %s: %s" % (cfile, to_native(e))) elif ftype == 'yaml': - pass # FIXME: implement, also , break down key from defs (. notation???) + # FIXME: implement, also , break down key from defs (. notation???) origin = cfile ''' diff --git a/lib/ansible/executor/task_executor.py b/lib/ansible/executor/task_executor.py index 0464b9a5159..4c6fa75d0f7 100644 --- a/lib/ansible/executor/task_executor.py +++ b/lib/ansible/executor/task_executor.py @@ -430,18 +430,18 @@ class TaskExecutor: except AnsibleError: # loop error takes precedence if self._loop_eval_error is not None: - raise self._loop_eval_error + raise self._loop_eval_error # pylint: disable=raising-bad-type # skip conditional exception in the case of includes as the vars needed might not be available except in the included tasks or due to tags if self._task.action not in ['include', 'include_tasks', 'include_role']: raise # Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task if self._loop_eval_error is not None: - raise self._loop_eval_error + raise self._loop_eval_error # pylint: disable=raising-bad-type # if we ran into an error while setting up the PlayContext, raise it now if context_validation_error is not None: - raise context_validation_error + raise context_validation_error # pylint: disable=raising-bad-type # if this task is a TaskInclude, we just return now with a success code so the # main thread can expand the task list for the given host diff --git a/lib/ansible/module_utils/basic.py b/lib/ansible/module_utils/basic.py index 67a26bc2e28..d4e4beaa387 100644 --- a/lib/ansible/module_utils/basic.py +++ b/lib/ansible/module_utils/basic.py @@ -729,8 +729,7 @@ def env_fallback(*args, **kwargs): for arg in args: if arg in os.environ: return os.environ[arg] - else: - raise AnsibleFallbackNotFound + raise AnsibleFallbackNotFound def _lenient_lowercase(lst): diff --git a/lib/ansible/module_utils/ec2.py b/lib/ansible/module_utils/ec2.py index d140922be14..a98cc518687 100644 --- a/lib/ansible/module_utils/ec2.py +++ b/lib/ansible/module_utils/ec2.py @@ -72,7 +72,7 @@ class AWSRetry(CloudRetry): return error.response['Error']['Code'] @staticmethod - def found(response_code, catch_extra_error_codes): + def found(response_code, catch_extra_error_codes=None): # This list of failures is based on this API Reference # http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html # diff --git a/lib/ansible/modules/cloud/amazon/aws_s3.py b/lib/ansible/modules/cloud/amazon/aws_s3.py index 2eb8b18d2ba..08f2a3ef6dc 100644 --- a/lib/ansible/modules/cloud/amazon/aws_s3.py +++ b/lib/ansible/modules/cloud/amazon/aws_s3.py @@ -434,13 +434,11 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None): if x >= retries: module.fail_json(msg="Failed while downloading %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response)) # otherwise, try again, this may be a transient timeout. - pass except SSLError as e: # will ClientError catch SSLError? # actually fail on last pass through the loop. if x >= retries: module.fail_json(msg="s3 download failed: %s." % e, exception=traceback.format_exc()) # otherwise, try again, this may be a transient timeout. - pass def download_s3str(module, s3, bucket, obj, version=None, validate=True): diff --git a/lib/ansible/modules/cloud/amazon/data_pipeline.py b/lib/ansible/modules/cloud/amazon/data_pipeline.py index 2e03d7fb4ba..261f60bb4ac 100644 --- a/lib/ansible/modules/cloud/amazon/data_pipeline.py +++ b/lib/ansible/modules/cloud/amazon/data_pipeline.py @@ -200,8 +200,7 @@ def pipeline_id(client, name): for dp in pipelines['pipelineIdList']: if dp['name'] == name: return dp['id'] - else: - raise DataPipelineNotFound + raise DataPipelineNotFound def pipeline_description(client, dp_id): @@ -233,8 +232,7 @@ def pipeline_field(client, dp_id, field): for field_key in dp_description['pipelineDescriptionList'][0]['fields']: if field_key['key'] == field: return field_key['stringValue'] - else: - raise KeyError("Field key {0} not found!".format(field)) + raise KeyError("Field key {0} not found!".format(field)) def run_with_timeout(timeout, func, *func_args, **func_kwargs): diff --git a/lib/ansible/modules/cloud/amazon/ec2_asg.py b/lib/ansible/modules/cloud/amazon/ec2_asg.py index 1a49f895f40..46e1a92d45d 100644 --- a/lib/ansible/modules/cloud/amazon/ec2_asg.py +++ b/lib/ansible/modules/cloud/amazon/ec2_asg.py @@ -587,7 +587,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id): for lb in as_group['LoadBalancerNames']: deregister_lb_instances(elb_connection, lb, instance_id) - log.debug("De-registering {0} from ELB {1}".format(instance_id, lb)) + log.debug("De-registering %s from ELB %s", instance_id, lb) wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and count > 0: @@ -597,7 +597,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id): for i in lb_instances['InstanceStates']: if i['InstanceId'] == instance_id and i['State'] == "InService": count += 1 - log.debug("{0}: {1}, {2}".format(i['InstanceId'], i['State'], i['Description'])) + log.debug("%s: %s, %s", i['InstanceId'], i['State'], i['Description']) time.sleep(10) if wait_timeout <= time.time(): @@ -614,7 +614,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name): for instance, settings in props['instance_facts'].items(): if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': instances.append(dict(InstanceId=instance)) - log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances)) + log.debug("ASG considers the following instances InService and Healthy: %s", instances) log.debug("ELB instance status:") lb_instances = list() for lb in as_group.get('LoadBalancerNames'): @@ -635,7 +635,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name): for i in lb_instances.get('InstanceStates'): if i['State'] == "InService": healthy_instances.add(i['InstanceId']) - log.debug("ELB Health State {0}: {1}".format(i['InstanceId'], i['State'])) + log.debug("ELB Health State %s: %s", i['InstanceId'], i['State']) return len(healthy_instances) @@ -648,7 +648,7 @@ def tg_healthy(asg_connection, elbv2_connection, module, group_name): for instance, settings in props['instance_facts'].items(): if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy': instances.append(dict(Id=instance)) - log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances)) + log.debug("ASG considers the following instances InService and Healthy: %s", instances) log.debug("Target Group instance status:") tg_instances = list() for tg in as_group.get('TargetGroupARNs'): @@ -669,7 +669,7 @@ def tg_healthy(asg_connection, elbv2_connection, module, group_name): for i in tg_instances.get('TargetHealthDescriptions'): if i['TargetHealth']['State'] == "healthy": healthy_instances.add(i['Target']['Id']) - log.debug("Target Group Health State {0}: {1}".format(i['Target']['Id'], i['TargetHealth']['State'])) + log.debug("Target Group Health State %s: %s", i['Target']['Id'], i['TargetHealth']['State']) return len(healthy_instances) @@ -695,12 +695,12 @@ def wait_for_elb(asg_connection, module, group_name): while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name) - log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances)) + log.debug("ELB thinks %s instances are healthy.", healthy_instances) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) - log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances)) + log.debug("Waiting complete. ELB thinks %s instances are healthy.", healthy_instances) def wait_for_target_group(asg_connection, module, group_name): @@ -725,12 +725,12 @@ def wait_for_target_group(asg_connection, module, group_name): while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time(): healthy_instances = tg_healthy(asg_connection, elbv2_connection, module, group_name) - log.debug("Target Group thinks {0} instances are healthy.".format(healthy_instances)) + log.debug("Target Group thinks %s instances are healthy.", healthy_instances) time.sleep(10) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime()) - log.debug("Waiting complete. Target Group thinks {0} instances are healthy.".format(healthy_instances)) + log.debug("Waiting complete. Target Group thinks %s instances are healthy.", healthy_instances) def suspend_processes(ec2_connection, as_group, module): @@ -1042,7 +1042,7 @@ def get_chunks(l, n): def update_size(connection, group, max_size, min_size, dc): log.debug("setting ASG sizes") - log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size)) + log.debug("minimum size: %s, desired_capacity: %s, max size: %s", min_size, dc, max_size) updated_group = dict() updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName'] updated_group['MinSize'] = min_size @@ -1083,7 +1083,7 @@ def replace(connection, module): # we don't want to spin up extra instances if not necessary if num_new_inst_needed < batch_size: - log.debug("Overriding batch size to {0}".format(num_new_inst_needed)) + log.debug("Overriding batch size to %s", num_new_inst_needed) batch_size = num_new_inst_needed if not old_instances: @@ -1143,14 +1143,14 @@ def get_instances_by_lc(props, lc_check, initial_instances): old_instances.append(i) else: - log.debug("Comparing initial instances with current: {0}".format(initial_instances)) + log.debug("Comparing initial instances with current: %s", initial_instances) for i in props['instances']: if i not in initial_instances: new_instances.append(i) else: old_instances.append(i) - log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances)) - log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances)) + log.debug("New instances: %s, %s", len(new_instances), new_instances) + log.debug("Old instances: %s, %s", len(old_instances), old_instances) return new_instances, old_instances @@ -1192,17 +1192,17 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le # and they have a non-current launch config instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances) - log.debug("new instances needed: {0}".format(num_new_inst_needed)) - log.debug("new instances: {0}".format(new_instances)) - log.debug("old instances: {0}".format(old_instances)) - log.debug("batch instances: {0}".format(",".join(instances_to_terminate))) + log.debug("new instances needed: %s", num_new_inst_needed) + log.debug("new instances: %s", new_instances) + log.debug("old instances: %s", old_instances) + log.debug("batch instances: %s", ",".join(instances_to_terminate)) if num_new_inst_needed == 0: decrement_capacity = True if as_group['MinSize'] != min_size: updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size) update_asg(connection, **updated_params) - log.debug("Updating minimum size back to original of {0}".format(min_size)) + log.debug("Updating minimum size back to original of %s", min_size) # if are some leftover old instances, but we are already at capacity with new ones # we don't want to decrement capacity if leftovers: @@ -1216,13 +1216,13 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le instances_to_terminate = instances_to_terminate[:num_new_inst_needed] decrement_capacity = False break_loop = False - log.debug("{0} new instances needed".format(num_new_inst_needed)) + log.debug("%s new instances needed", num_new_inst_needed) - log.debug("decrementing capacity: {0}".format(decrement_capacity)) + log.debug("decrementing capacity: %s", decrement_capacity) for instance_id in instances_to_terminate: elb_dreg(connection, module, group_name, instance_id) - log.debug("terminating instance: {0}".format(instance_id)) + log.debug("terminating instance: %s", instance_id) terminate_asg_instance(connection, instance_id, decrement_capacity) # we wait to make sure the machines we marked as Unhealthy are @@ -1248,7 +1248,7 @@ def wait_for_term_inst(connection, module, term_instances): for i in instances: lifecycle = instance_facts[i]['lifecycle_state'] health = instance_facts[i]['health_status'] - log.debug("Instance {0} has state of {1},{2}".format(i, lifecycle, health)) + log.debug("Instance %s has state of %s,%s", i, lifecycle, health) if lifecycle == 'Terminating' or health == 'Unhealthy': count += 1 time.sleep(10) @@ -1263,18 +1263,18 @@ def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size # make sure we have the latest stats after that last loop. as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group, module) - log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop])) + log.debug("Waiting for %s = %s, currently %s", prop, desired_size, props[prop]) # now we make sure that we have enough instances in a viable state wait_timeout = time.time() + wait_timeout while wait_timeout > time.time() and desired_size > props[prop]: - log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop])) + log.debug("Waiting for %s = %s, currently %s", prop, desired_size, props[prop]) time.sleep(10) as_group = describe_autoscaling_groups(connection, group_name)[0] props = get_properties(as_group, module) if wait_timeout <= time.time(): # waiting took too long module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime()) - log.debug("Reached {0}: {1}".format(prop, desired_size)) + log.debug("Reached %s: %s", prop, desired_size) return props diff --git a/lib/ansible/modules/cloud/amazon/ec2_key.py b/lib/ansible/modules/cloud/amazon/ec2_key.py index c79b0220bd6..756537522dc 100644 --- a/lib/ansible/modules/cloud/amazon/ec2_key.py +++ b/lib/ansible/modules/cloud/amazon/ec2_key.py @@ -219,7 +219,6 @@ def main(): module.fail_json(msg="timed out while waiting for the key to be re-created") changed = True - pass # if the key doesn't exist, create it now else: diff --git a/lib/ansible/modules/cloud/azure/_azure.py b/lib/ansible/modules/cloud/azure/_azure.py index 8c277ecadd5..bc40cb5d26b 100644 --- a/lib/ansible/modules/cloud/azure/_azure.py +++ b/lib/ansible/modules/cloud/azure/_azure.py @@ -613,7 +613,6 @@ class Wrapper(object): except AzureException as e: if not str(e).lower().find("temporary redirect") == -1: time.sleep(5) - pass else: raise e diff --git a/lib/ansible/modules/cloud/lxc/lxc_container.py b/lib/ansible/modules/cloud/lxc/lxc_container.py index ae8b196f8af..343557f9c09 100644 --- a/lib/ansible/modules/cloud/lxc/lxc_container.py +++ b/lib/ansible/modules/cloud/lxc/lxc_container.py @@ -668,8 +668,7 @@ class LxcContainerManagement(object): build_command.append( '%s %s' % (key, value) ) - else: - return build_command + return build_command def _get_vars(self, variables): """Return a dict of all variables as found within the module. @@ -689,8 +688,7 @@ class LxcContainerManagement(object): _var = self.module.params.get(k) if _var not in false_values: return_dict[v] = _var - else: - return return_dict + return return_dict def _run_command(self, build_command, unsafe_shell=False): """Return information from running an Ansible Command. @@ -975,16 +973,15 @@ class LxcContainerManagement(object): time.sleep(1) else: return True - else: - self.failure( - lxc_container=self._container_data(), - error='Failed to start container' - ' [ %s ]' % self.container_name, - rc=1, - msg='The container [ %s ] failed to start. Check to lxc is' - ' available and that the container is in a functional' - ' state.' % self.container_name - ) + self.failure( + lxc_container=self._container_data(), + error='Failed to start container' + ' [ %s ]' % self.container_name, + rc=1, + msg='The container [ %s ] failed to start. Check to lxc is' + ' available and that the container is in a functional' + ' state.' % self.container_name + ) def _check_archive(self): """Create a compressed archive of a container. diff --git a/lib/ansible/modules/cloud/misc/rhevm.py b/lib/ansible/modules/cloud/misc/rhevm.py index 341e26a582a..098ae76e0ec 100644 --- a/lib/ansible/modules/cloud/misc/rhevm.py +++ b/lib/ansible/modules/cloud/misc/rhevm.py @@ -1229,6 +1229,7 @@ class RHEV(object): self.__get_conn() return self.conn.set_VM_Host(vmname, vmhost) + # pylint: disable=unreachable VM = self.conn.get_VM(vmname) HOST = self.conn.get_Host(vmhost) diff --git a/lib/ansible/modules/cloud/rackspace/rax_dns.py b/lib/ansible/modules/cloud/rackspace/rax_dns.py index 2b26e0ca149..76ce9ec625b 100644 --- a/lib/ansible/modules/cloud/rackspace/rax_dns.py +++ b/lib/ansible/modules/cloud/rackspace/rax_dns.py @@ -122,7 +122,6 @@ def rax_dns(module, comment, email, name, state, ttl): domain = dns.find(name=name) except pyrax.exceptions.NotFound: domain = {} - pass except Exception as e: module.fail_json(msg='%s' % e.message) diff --git a/lib/ansible/modules/cloud/rackspace/rax_dns_record.py b/lib/ansible/modules/cloud/rackspace/rax_dns_record.py index c56e78d4559..6c04d68c9d0 100644 --- a/lib/ansible/modules/cloud/rackspace/rax_dns_record.py +++ b/lib/ansible/modules/cloud/rackspace/rax_dns_record.py @@ -270,7 +270,6 @@ def rax_dns_record(module, comment=None, data=None, domain=None, name=None, record = domain.find_record(record_type, name=name, data=data) except pyrax.exceptions.DomainRecordNotFound as e: record = {} - pass except pyrax.exceptions.DomainRecordNotUnique as e: module.fail_json(msg='%s' % e.message) diff --git a/lib/ansible/modules/clustering/kubernetes.py b/lib/ansible/modules/clustering/kubernetes.py index 9f723702af0..b4a76cb38f3 100755 --- a/lib/ansible/modules/clustering/kubernetes.py +++ b/lib/ansible/modules/clustering/kubernetes.py @@ -246,6 +246,7 @@ USER_AGENT = "ansible-k8s-module/0.0.1" def decode_cert_data(module): return + # pylint: disable=unreachable d = module.params.get("certificate_authority_data") if d and not d.startswith("-----BEGIN"): module.params["certificate_authority_data"] = base64.b64decode(d) diff --git a/lib/ansible/modules/monitoring/monit.py b/lib/ansible/modules/monitoring/monit.py index 38a3c0c68ba..0364bc1e807 100644 --- a/lib/ansible/modules/monitoring/monit.py +++ b/lib/ansible/modules/monitoring/monit.py @@ -111,8 +111,7 @@ def main(): if parts != '': return parts - else: - return '' + return '' def run_command(command): """Runs a monit command, and returns the new status.""" diff --git a/lib/ansible/modules/network/f5/bigip_configsync_action.py b/lib/ansible/modules/network/f5/bigip_configsync_action.py index 2d20bca2c68..771da30c9c7 100644 --- a/lib/ansible/modules/network/f5/bigip_configsync_action.py +++ b/lib/ansible/modules/network/f5/bigip_configsync_action.py @@ -286,7 +286,6 @@ class ModuleManager(object): if status in ['Changes Pending']: details = self._get_details_from_resource(resource) self._validate_pending_status(details) - pass elif status in ['Awaiting Initial Sync', 'Not All Devices Synced']: pass elif status == 'In Sync': diff --git a/lib/ansible/modules/packaging/language/maven_artifact.py b/lib/ansible/modules/packaging/language/maven_artifact.py index 8a3f23576f6..8136e3db25c 100644 --- a/lib/ansible/modules/packaging/language/maven_artifact.py +++ b/lib/ansible/modules/packaging/language/maven_artifact.py @@ -277,7 +277,7 @@ class MavenDownloader: if self.latest_version_found: return self.latest_version_found path = "/%s/maven-metadata.xml" % (artifact.path(False)) - xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) + xml = self._request(self.base + path, "Failed to download maven-metadata.xml", etree.parse) v = xml.xpath("/metadata/versioning/versions/version[last()]/text()") if v: self.latest_version_found = v[0] @@ -289,7 +289,7 @@ class MavenDownloader: if artifact.is_snapshot(): path = "/%s/maven-metadata.xml" % (artifact.path()) - xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r)) + xml = self._request(self.base + path, "Failed to download maven-metadata.xml", etree.parse) timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0] buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0] for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"): diff --git a/lib/ansible/modules/storage/netapp/netapp_e_flashcache.py b/lib/ansible/modules/storage/netapp/netapp_e_flashcache.py index cc50fd8efe1..e8ac7b22ddf 100644 --- a/lib/ansible/modules/storage/netapp/netapp_e_flashcache.py +++ b/lib/ansible/modules/storage/netapp/netapp_e_flashcache.py @@ -267,15 +267,15 @@ class NetAppESeriesFlashCache(object): @property def needs_more_disks(self): if len(self.cache_detail['driveRefs']) < self.disk_count: - self.debug("needs resize: current disk count %s < requested requested count %s" % ( - len(self.cache_detail['driveRefs']), self.disk_count)) + self.debug("needs resize: current disk count %s < requested requested count %s", + len(self.cache_detail['driveRefs']), self.disk_count) return True @property def needs_less_disks(self): if len(self.cache_detail['driveRefs']) > self.disk_count: - self.debug("needs resize: current disk count %s < requested requested count %s" % ( - len(self.cache_detail['driveRefs']), self.disk_count)) + self.debug("needs resize: current disk count %s < requested requested count %s", + len(self.cache_detail['driveRefs']), self.disk_count) return True @property @@ -292,8 +292,8 @@ class NetAppESeriesFlashCache(object): @property def needs_more_capacity(self): if self.current_size_bytes < self.requested_size_bytes: - self.debug("needs resize: current capacity %sb is less than requested minimum %sb" % ( - self.current_size_bytes, self.requested_size_bytes)) + self.debug("needs resize: current capacity %sb is less than requested minimum %sb", + self.current_size_bytes, self.requested_size_bytes) return True @property @@ -405,7 +405,7 @@ def main(): try: sp.apply() except Exception as e: - sp.debug("Exception in apply(): \n%s" % to_native(e)) + sp.debug("Exception in apply(): \n%s", to_native(e)) sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % to_native(e), exception=traceback.format_exc()) diff --git a/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py b/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py index dca9246016f..58d3a4723bf 100644 --- a/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py +++ b/lib/ansible/modules/storage/netapp/netapp_e_storagepool.py @@ -269,7 +269,7 @@ class NetAppESeriesStoragePool(object): min_total_capacity = min_total_capacity * self._size_unit_map[size_unit] # filter clearly invalid/unavailable drives first - drives = select(lambda d: self._is_valid_drive(d), drives) + drives = select(self._is_valid_drive, drives) if interface_type: drives = select(lambda d: d['phyDriveType'] == interface_type, drives) @@ -390,7 +390,7 @@ class NetAppESeriesStoragePool(object): msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." % (self.ssid, str(err), self.state, rc)) - self.debug("searching for storage pool '%s'" % storage_pool_name) + self.debug("searching for storage pool '%s'", storage_pool_name) pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None) @@ -514,7 +514,7 @@ class NetAppESeriesStoragePool(object): return needs_migration def migrate_raid_level(self): - self.debug("migrating storage pool to raid level '%s'..." % self.raid_level) + self.debug("migrating storage pool to raid level '%s'...", self.raid_level) sp_raid_migrate_req = dict( raidLevel=self.raid_level ) @@ -637,7 +637,7 @@ class NetAppESeriesStoragePool(object): def expand_storage_pool(self): drives_to_add = self.get_expansion_candidate_drives() - self.debug("adding %s drives to storage pool..." % len(drives_to_add)) + self.debug("adding %s drives to storage pool...", len(drives_to_add)) sp_expand_req = dict( drives=drives_to_add ) @@ -723,8 +723,8 @@ class NetAppESeriesStoragePool(object): if self.needs_raid_level_migration: self.debug( - "CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'" % ( - self.pool_detail['raidLevel'], self.raid_level)) + "CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'", + self.pool_detail['raidLevel'], self.raid_level) changed = True # if self.reserved_drive_count_differs: @@ -813,7 +813,7 @@ def main(): sp.apply() except Exception: e = get_exception() - sp.debug("Exception in apply(): \n%s" % format_exc(e)) + sp.debug("Exception in apply(): \n%s", format_exc(e)) raise diff --git a/lib/ansible/modules/storage/netapp/netapp_e_volume.py b/lib/ansible/modules/storage/netapp/netapp_e_volume.py index 36f8ccc4013..45620581844 100644 --- a/lib/ansible/modules/storage/netapp/netapp_e_volume.py +++ b/lib/ansible/modules/storage/netapp/netapp_e_volume.py @@ -235,7 +235,7 @@ class NetAppESeriesVolume(object): volumes.extend(thinvols) - self.debug("searching for volume '%s'" % volume_name) + self.debug("searching for volume '%s'", volume_name) volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None) if volume_detail: @@ -257,7 +257,7 @@ class NetAppESeriesVolume(object): self.module.fail_json( msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, str(err))) - self.debug("searching for storage pool '%s'" % storage_pool_name) + self.debug("searching for storage pool '%s'", storage_pool_name) pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None) if pool_detail: @@ -277,7 +277,7 @@ class NetAppESeriesVolume(object): dataAssuranceEnabled=data_assurance_enabled, ) - self.debug("creating volume '%s'" % name) + self.debug("creating volume '%s'", name) try: (rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid), data=json.dumps(volume_add_req), headers=HEADERS, method='POST', @@ -302,7 +302,7 @@ class NetAppESeriesVolume(object): dataAssuranceEnabled=data_assurance_enabled, ) - self.debug("creating thin-volume '%s'" % name) + self.debug("creating thin-volume '%s'", name) try: (rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid), data=json.dumps(thin_volume_add_req), headers=HEADERS, method='POST', @@ -318,7 +318,7 @@ class NetAppESeriesVolume(object): def delete_volume(self): # delete the volume - self.debug("deleting volume '%s'" % self.volume_detail['name']) + self.debug("deleting volume '%s'", self.volume_detail['name']) try: (rc, resp) = request( self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name, @@ -445,7 +445,7 @@ class NetAppESeriesVolume(object): action = resp['action'] percent_complete = resp['percentComplete'] - self.debug('expand action %s, %s complete...' % (action, percent_complete)) + self.debug('expand action %s, %s complete...', action, percent_complete) if action == 'none': self.debug('expand complete') @@ -469,11 +469,8 @@ class NetAppESeriesVolume(object): elif self.state == 'present': # check requested volume size, see if expansion is necessary if self.volume_needs_expansion: - self.debug( - "CHANGED: requested volume size %s%s is larger than current size %sb" % (self.size, - self.size_unit, - self.volume_detail[ - 'capacity'])) + self.debug("CHANGED: requested volume size %s%s is larger than current size %sb", + self.size, self.size_unit, self.volume_detail['capacity']) changed = True if self.volume_properties_changed: @@ -543,7 +540,7 @@ def main(): v.apply() except Exception: e = get_exception() - v.debug("Exception in apply(): \n%s" % format_exc(e)) + v.debug("Exception in apply(): \n%s", format_exc(e)) v.module.fail_json(msg="Module failed. Error [%s]." % (str(e))) diff --git a/lib/ansible/modules/storage/netapp/sf_volume_manager.py b/lib/ansible/modules/storage/netapp/sf_volume_manager.py index 61aa7817934..d95c9a1dc9b 100644 --- a/lib/ansible/modules/storage/netapp/sf_volume_manager.py +++ b/lib/ansible/modules/storage/netapp/sf_volume_manager.py @@ -306,7 +306,6 @@ class SolidFireVolume(object): if changed: if self.module.check_mode: result_message = "Check mode, skipping changes" - pass else: if self.state == 'present': if not volume_exists: diff --git a/lib/ansible/modules/storage/purestorage/purefa_snap.py b/lib/ansible/modules/storage/purestorage/purefa_snap.py index ed2ae457ef1..5d8ab3762f6 100644 --- a/lib/ansible/modules/storage/purestorage/purefa_snap.py +++ b/lib/ansible/modules/storage/purestorage/purefa_snap.py @@ -126,7 +126,6 @@ def get_snapshot(module, array): for s in array.get_volume(module.params['name'], snap='true'): if s['name'] == snapname: return snapname - break except: return None diff --git a/lib/ansible/modules/system/interfaces_file.py b/lib/ansible/modules/system/interfaces_file.py index 0128d9bfcdf..dc0dfd881f2 100755 --- a/lib/ansible/modules/system/interfaces_file.py +++ b/lib/ansible/modules/system/interfaces_file.py @@ -306,7 +306,6 @@ def setInterfaceOption(module, lines, iface, option, raw_value, state): module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state) return changed, lines - pass def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options): diff --git a/lib/ansible/plugins/cache/__init__.py b/lib/ansible/plugins/cache/__init__.py index 27bb0d3064c..7bc11226aa7 100644 --- a/lib/ansible/plugins/cache/__init__.py +++ b/lib/ansible/plugins/cache/__init__.py @@ -183,7 +183,6 @@ class BaseFileCacheModule(BaseCacheModule): return False else: display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e))) - pass def delete(self, key): try: diff --git a/lib/ansible/plugins/callback/syslog_json.py b/lib/ansible/plugins/callback/syslog_json.py index 6385e65550a..3a4cdc80915 100644 --- a/lib/ansible/plugins/callback/syslog_json.py +++ b/lib/ansible/plugins/callback/syslog_json.py @@ -78,22 +78,22 @@ class CallbackModule(CallbackBase): self.hostname = socket.gethostname() def runner_on_failed(self, host, res, ignore_errors=False): - self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname, host, self._dump_results(res))) + self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res)) def runner_on_ok(self, host, res): - self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname, host, self._dump_results(res))) + self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res)) def runner_on_skipped(self, host, item=None): - self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname, host, 'skipped')) + self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s', self.hostname, host, 'skipped') def runner_on_unreachable(self, host, res): - self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname, host, self._dump_results(res))) + self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s', self.hostname, host, self._dump_results(res)) def runner_on_async_failed(self, host, res, jid): - self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname, host, self._dump_results(res))) + self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res)) def playbook_on_import_for_host(self, host, imported_file): - self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s' % (self.hostname, host, imported_file)) + self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s', self.hostname, host, imported_file) def playbook_on_not_import_for_host(self, host, missing_file): - self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s' % (self.hostname, host, missing_file)) + self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s', self.hostname, host, missing_file) diff --git a/lib/ansible/plugins/connection/paramiko_ssh.py b/lib/ansible/plugins/connection/paramiko_ssh.py index 924addb6f5c..e6ecaac0556 100644 --- a/lib/ansible/plugins/connection/paramiko_ssh.py +++ b/lib/ansible/plugins/connection/paramiko_ssh.py @@ -502,7 +502,6 @@ class Connection(ConnectionBase): # unable to save keys, including scenario when key was invalid # and caught earlier traceback.print_exc() - pass fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN) self.ssh.close() diff --git a/lib/ansible/plugins/loader.py b/lib/ansible/plugins/loader.py index cbceef12e19..1e1dc84a6c0 100644 --- a/lib/ansible/plugins/loader.py +++ b/lib/ansible/plugins/loader.py @@ -413,7 +413,7 @@ class PluginLoader: for i in self._get_paths(): all_matches.extend(glob.glob(os.path.join(i, "*.py"))) - for path in sorted(all_matches, key=lambda match: os.path.basename(match)): + for path in sorted(all_matches, key=os.path.basename): name = os.path.basename(os.path.splitext(path)[0]) if '__init__' in name: diff --git a/lib/ansible/plugins/lookup/first_found.py b/lib/ansible/plugins/lookup/first_found.py index be4f7d145bf..ff7905daac3 100644 --- a/lib/ansible/plugins/lookup/first_found.py +++ b/lib/ansible/plugins/lookup/first_found.py @@ -185,9 +185,7 @@ class LookupModule(LookupBase): path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True) if path is not None: return [path] - else: - if skip: - return [] - else: - raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no " - "files are found") + if skip: + return [] + raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no " + "files are found") diff --git a/lib/ansible/plugins/strategy/free.py b/lib/ansible/plugins/strategy/free.py index 7b79639efa5..9604892dc0a 100644 --- a/lib/ansible/plugins/strategy/free.py +++ b/lib/ansible/plugins/strategy/free.py @@ -127,7 +127,6 @@ class StrategyModule(StrategyBase): # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug("templating failed for some reason") - pass run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False) if run_once: diff --git a/lib/ansible/plugins/strategy/linear.py b/lib/ansible/plugins/strategy/linear.py index 71d5448f8e6..3db70d2c1c4 100644 --- a/lib/ansible/plugins/strategy/linear.py +++ b/lib/ansible/plugins/strategy/linear.py @@ -266,7 +266,6 @@ class StrategyModule(StrategyBase): # just ignore any errors during task name templating, # we don't care if it just shows the raw name display.debug("templating failed for some reason") - pass display.debug("here goes the callback...") self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False) task.name = saved_name diff --git a/test/sanity/pylint/disable.txt b/test/sanity/pylint/disable.txt index 82ee589a3c7..5e525cb3ab3 100644 --- a/test/sanity/pylint/disable.txt +++ b/test/sanity/pylint/disable.txt @@ -8,7 +8,6 @@ attribute-defined-outside-init bad-continuation bad-indentation bad-mcs-classmethod-argument -bad-open-mode bad-whitespace bare-except blacklisted-name @@ -34,8 +33,6 @@ invalid-encoded-data invalid-name line-too-long locally-disabled -logging-format-interpolation -logging-not-lazy method-hidden misplaced-comparison-constant missing-docstring @@ -52,18 +49,14 @@ old-style-class pointless-statement pointless-string-statement protected-access -raising-bad-type redefined-builtin redefined-outer-name redefined-variable-type -redundant-unittest-assert reimported relative-import -signature-differs simplifiable-if-statement super-init-not-called superfluous-parens -suppressed-message too-few-public-methods too-many-ancestors too-many-arguments @@ -80,10 +73,7 @@ too-many-statements undefined-loop-variable ungrouped-imports unidiomatic-typecheck -unnecessary-lambda -unnecessary-pass unneeded-not -unreachable unsubscriptable-object unsupported-membership-test unused-argument @@ -91,7 +81,6 @@ unused-import unused-variable unused-wildcard-import used-before-assignment -useless-else-on-loop wildcard-import wrong-import-order wrong-import-position diff --git a/test/units/module_utils/json_utils/test_filter_non_json_lines.py b/test/units/module_utils/json_utils/test_filter_non_json_lines.py index a92e38298d2..d0287ae600f 100644 --- a/test/units/module_utils/json_utils/test_filter_non_json_lines.py +++ b/test/units/module_utils/json_utils/test_filter_non_json_lines.py @@ -87,6 +87,6 @@ class TestAnsibleModuleExitJson(unittest.TestCase): for i in self.unparsable_cases: self.assertRaises( ValueError, - lambda data: _filter_non_json_lines(data), + _filter_non_json_lines, data=i ) diff --git a/test/units/modules/network/radware/test_vdirect_file.py b/test/units/modules/network/radware/test_vdirect_file.py index 1b7c6158a8e..b17b3546723 100644 --- a/test/units/modules/network/radware/test_vdirect_file.py +++ b/test/units/modules/network/radware/test_vdirect_file.py @@ -106,7 +106,7 @@ class TestManager(unittest.TestCase): params = NONE_PARAMS.copy() del params['vdirect_ip'] vdirect_file.VdirectFile(params) - self.assertFalse("KeyError was not thrown for missing parameter") + self.fail("KeyError was not thrown for missing parameter") except KeyError: assert True @@ -134,7 +134,7 @@ class TestManager(unittest.TestCase): file = vdirect_file.VdirectFile(NONE_PARAMS) try: file.upload("missing_file.vm") - self.assertFalse("IOException was not thrown for missing file") + self.fail("IOException was not thrown for missing file") except IOError: assert True diff --git a/test/units/parsing/vault/test_vault_editor.py b/test/units/parsing/vault/test_vault_editor.py index 3c6e73b65db..a7318505e07 100644 --- a/test/units/parsing/vault/test_vault_editor.py +++ b/test/units/parsing/vault/test_vault_editor.py @@ -467,7 +467,6 @@ class TestVaultEditor(unittest.TestCase): try: ve.decrypt_file(v11_file.name) except errors.AnsibleError: - raise error_hit = True # verify decrypted content @@ -493,7 +492,6 @@ class TestVaultEditor(unittest.TestCase): try: ve.rekey_file(v10_file.name, vault.match_encrypt_secret(new_secrets)[1]) except errors.AnsibleError: - raise error_hit = True # verify decrypted content @@ -510,7 +508,6 @@ class TestVaultEditor(unittest.TestCase): try: dec_data = vl.decrypt(fdata) except errors.AnsibleError: - raise error_hit = True os.unlink(v10_file.name)