Enable more pylint rules and fix reported issues. (#30539)

* Enable pylint unreachable test.
* Enable pylint suppressed-message test.
* Enable pylint redundant-unittest-assert test.
* Enable pylint bad-open-mode test.
* Enable pylint signature-differs test.
* Enable pylint unnecessary-pass test.
* Enable pylint unnecessary-lambda test.
* Enable pylint raising-bad-type test.
* Enable pylint logging-not-lazy test.
* Enable pylint logging-format-interpolation test.
* Enable pylint useless-else-on-loop test.
pull/30542/head
Matt Clay 7 years ago committed by GitHub
parent 01563ccd5d
commit 7714dcd04e

@ -466,35 +466,30 @@ class AosInventory(object):
except:
if 'AOS_SERVER' in os.environ.keys():
self.aos_server = os.environ['AOS_SERVER']
pass
try:
self.aos_server_port = config.get('aos', 'port')
except:
if 'AOS_PORT' in os.environ.keys():
self.aos_server_port = os.environ['AOS_PORT']
pass
try:
self.aos_username = config.get('aos', 'username')
except:
if 'AOS_USERNAME' in os.environ.keys():
self.aos_username = os.environ['AOS_USERNAME']
pass
try:
self.aos_password = config.get('aos', 'password')
except:
if 'AOS_PASSWORD' in os.environ.keys():
self.aos_password = os.environ['AOS_PASSWORD']
pass
try:
self.aos_blueprint = config.get('aos', 'blueprint')
except:
if 'AOS_BLUEPRINT' in os.environ.keys():
self.aos_blueprint = os.environ['AOS_BLUEPRINT']
pass
try:
if config.get('aos', 'blueprint_interface') in ['false', 'no']:

@ -212,7 +212,7 @@ class CollinsInventory(object):
cur_page += 1
num_retries = 0
except:
self.log.error("Error while communicating with Collins, retrying:\n%s" % traceback.format_exc())
self.log.error("Error while communicating with Collins, retrying:\n%s", traceback.format_exc())
num_retries += 1
return assets
@ -281,7 +281,7 @@ class CollinsInventory(object):
try:
server_assets = self.find_assets()
except:
self.log.error("Error while locating assets from Collins:\n%s" % traceback.format_exc())
self.log.error("Error while locating assets from Collins:\n%s", traceback.format_exc())
return False
for asset in server_assets:
@ -305,7 +305,7 @@ class CollinsInventory(object):
if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'):
asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME')
elif 'ADDRESSES' not in asset:
self.log.warning("No IP addresses found for asset '%s', skipping" % asset)
self.log.warning("No IP addresses found for asset '%s', skipping", asset)
continue
elif len(asset['ADDRESSES']) < ip_index + 1:
self.log.warning(

@ -301,7 +301,7 @@ class ConfigManager(object):
except Exception as e:
sys.stderr.write("Error while loading ini config %s: %s" % (cfile, to_native(e)))
elif ftype == 'yaml':
pass # FIXME: implement, also , break down key from defs (. notation???)
# FIXME: implement, also , break down key from defs (. notation???)
origin = cfile
'''

@ -430,18 +430,18 @@ class TaskExecutor:
except AnsibleError:
# loop error takes precedence
if self._loop_eval_error is not None:
raise self._loop_eval_error
raise self._loop_eval_error # pylint: disable=raising-bad-type
# skip conditional exception in the case of includes as the vars needed might not be available except in the included tasks or due to tags
if self._task.action not in ['include', 'include_tasks', 'include_role']:
raise
# Not skipping, if we had loop error raised earlier we need to raise it now to halt the execution of this task
if self._loop_eval_error is not None:
raise self._loop_eval_error
raise self._loop_eval_error # pylint: disable=raising-bad-type
# if we ran into an error while setting up the PlayContext, raise it now
if context_validation_error is not None:
raise context_validation_error
raise context_validation_error # pylint: disable=raising-bad-type
# if this task is a TaskInclude, we just return now with a success code so the
# main thread can expand the task list for the given host

@ -729,7 +729,6 @@ def env_fallback(*args, **kwargs):
for arg in args:
if arg in os.environ:
return os.environ[arg]
else:
raise AnsibleFallbackNotFound

@ -72,7 +72,7 @@ class AWSRetry(CloudRetry):
return error.response['Error']['Code']
@staticmethod
def found(response_code, catch_extra_error_codes):
def found(response_code, catch_extra_error_codes=None):
# This list of failures is based on this API Reference
# http://docs.aws.amazon.com/AWSEC2/latest/APIReference/errors-overview.html
#

@ -434,13 +434,11 @@ def download_s3file(module, s3, bucket, obj, dest, retries, version=None):
if x >= retries:
module.fail_json(msg="Failed while downloading %s." % obj, exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
# otherwise, try again, this may be a transient timeout.
pass
except SSLError as e: # will ClientError catch SSLError?
# actually fail on last pass through the loop.
if x >= retries:
module.fail_json(msg="s3 download failed: %s." % e, exception=traceback.format_exc())
# otherwise, try again, this may be a transient timeout.
pass
def download_s3str(module, s3, bucket, obj, version=None, validate=True):

@ -200,7 +200,6 @@ def pipeline_id(client, name):
for dp in pipelines['pipelineIdList']:
if dp['name'] == name:
return dp['id']
else:
raise DataPipelineNotFound
@ -233,7 +232,6 @@ def pipeline_field(client, dp_id, field):
for field_key in dp_description['pipelineDescriptionList'][0]['fields']:
if field_key['key'] == field:
return field_key['stringValue']
else:
raise KeyError("Field key {0} not found!".format(field))

@ -587,7 +587,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
for lb in as_group['LoadBalancerNames']:
deregister_lb_instances(elb_connection, lb, instance_id)
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
log.debug("De-registering %s from ELB %s", instance_id, lb)
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and count > 0:
@ -597,7 +597,7 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
for i in lb_instances['InstanceStates']:
if i['InstanceId'] == instance_id and i['State'] == "InService":
count += 1
log.debug("{0}: {1}, {2}".format(i['InstanceId'], i['State'], i['Description']))
log.debug("%s: %s, %s", i['InstanceId'], i['State'], i['Description'])
time.sleep(10)
if wait_timeout <= time.time():
@ -614,7 +614,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(dict(InstanceId=instance))
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
log.debug("ASG considers the following instances InService and Healthy: %s", instances)
log.debug("ELB instance status:")
lb_instances = list()
for lb in as_group.get('LoadBalancerNames'):
@ -635,7 +635,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
for i in lb_instances.get('InstanceStates'):
if i['State'] == "InService":
healthy_instances.add(i['InstanceId'])
log.debug("ELB Health State {0}: {1}".format(i['InstanceId'], i['State']))
log.debug("ELB Health State %s: %s", i['InstanceId'], i['State'])
return len(healthy_instances)
@ -648,7 +648,7 @@ def tg_healthy(asg_connection, elbv2_connection, module, group_name):
for instance, settings in props['instance_facts'].items():
if settings['lifecycle_state'] == 'InService' and settings['health_status'] == 'Healthy':
instances.append(dict(Id=instance))
log.debug("ASG considers the following instances InService and Healthy: {0}".format(instances))
log.debug("ASG considers the following instances InService and Healthy: %s", instances)
log.debug("Target Group instance status:")
tg_instances = list()
for tg in as_group.get('TargetGroupARNs'):
@ -669,7 +669,7 @@ def tg_healthy(asg_connection, elbv2_connection, module, group_name):
for i in tg_instances.get('TargetHealthDescriptions'):
if i['TargetHealth']['State'] == "healthy":
healthy_instances.add(i['Target']['Id'])
log.debug("Target Group Health State {0}: {1}".format(i['Target']['Id'], i['TargetHealth']['State']))
log.debug("Target Group Health State %s: %s", i['Target']['Id'], i['TargetHealth']['State'])
return len(healthy_instances)
@ -695,12 +695,12 @@ def wait_for_elb(asg_connection, module, group_name):
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
healthy_instances = elb_healthy(asg_connection, elb_connection, module, group_name)
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
log.debug("ELB thinks %s instances are healthy.", healthy_instances)
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
log.debug("Waiting complete. ELB thinks %s instances are healthy.", healthy_instances)
def wait_for_target_group(asg_connection, module, group_name):
@ -725,12 +725,12 @@ def wait_for_target_group(asg_connection, module, group_name):
while healthy_instances < as_group.get('MinSize') and wait_timeout > time.time():
healthy_instances = tg_healthy(asg_connection, elbv2_connection, module, group_name)
log.debug("Target Group thinks {0} instances are healthy.".format(healthy_instances))
log.debug("Target Group thinks %s instances are healthy.", healthy_instances)
time.sleep(10)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for ELB instances to be healthy. %s" % time.asctime())
log.debug("Waiting complete. Target Group thinks {0} instances are healthy.".format(healthy_instances))
log.debug("Waiting complete. Target Group thinks %s instances are healthy.", healthy_instances)
def suspend_processes(ec2_connection, as_group, module):
@ -1042,7 +1042,7 @@ def get_chunks(l, n):
def update_size(connection, group, max_size, min_size, dc):
log.debug("setting ASG sizes")
log.debug("minimum size: {0}, desired_capacity: {1}, max size: {2}".format(min_size, dc, max_size))
log.debug("minimum size: %s, desired_capacity: %s, max size: %s", min_size, dc, max_size)
updated_group = dict()
updated_group['AutoScalingGroupName'] = group['AutoScalingGroupName']
updated_group['MinSize'] = min_size
@ -1083,7 +1083,7 @@ def replace(connection, module):
# we don't want to spin up extra instances if not necessary
if num_new_inst_needed < batch_size:
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
log.debug("Overriding batch size to %s", num_new_inst_needed)
batch_size = num_new_inst_needed
if not old_instances:
@ -1143,14 +1143,14 @@ def get_instances_by_lc(props, lc_check, initial_instances):
old_instances.append(i)
else:
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
log.debug("Comparing initial instances with current: %s", initial_instances)
for i in props['instances']:
if i not in initial_instances:
new_instances.append(i)
else:
old_instances.append(i)
log.debug("New instances: {0}, {1}".format(len(new_instances), new_instances))
log.debug("Old instances: {0}, {1}".format(len(old_instances), old_instances))
log.debug("New instances: %s, %s", len(new_instances), new_instances)
log.debug("Old instances: %s, %s", len(old_instances), old_instances)
return new_instances, old_instances
@ -1192,17 +1192,17 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
# and they have a non-current launch config
instances_to_terminate = list_purgeable_instances(props, lc_check, replace_instances, initial_instances)
log.debug("new instances needed: {0}".format(num_new_inst_needed))
log.debug("new instances: {0}".format(new_instances))
log.debug("old instances: {0}".format(old_instances))
log.debug("batch instances: {0}".format(",".join(instances_to_terminate)))
log.debug("new instances needed: %s", num_new_inst_needed)
log.debug("new instances: %s", new_instances)
log.debug("old instances: %s", old_instances)
log.debug("batch instances: %s", ",".join(instances_to_terminate))
if num_new_inst_needed == 0:
decrement_capacity = True
if as_group['MinSize'] != min_size:
updated_params = dict(AutoScalingGroupName=as_group['AutoScalingGroupName'], MinSize=min_size)
update_asg(connection, **updated_params)
log.debug("Updating minimum size back to original of {0}".format(min_size))
log.debug("Updating minimum size back to original of %s", min_size)
# if are some leftover old instances, but we are already at capacity with new ones
# we don't want to decrement capacity
if leftovers:
@ -1216,13 +1216,13 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
instances_to_terminate = instances_to_terminate[:num_new_inst_needed]
decrement_capacity = False
break_loop = False
log.debug("{0} new instances needed".format(num_new_inst_needed))
log.debug("%s new instances needed", num_new_inst_needed)
log.debug("decrementing capacity: {0}".format(decrement_capacity))
log.debug("decrementing capacity: %s", decrement_capacity)
for instance_id in instances_to_terminate:
elb_dreg(connection, module, group_name, instance_id)
log.debug("terminating instance: {0}".format(instance_id))
log.debug("terminating instance: %s", instance_id)
terminate_asg_instance(connection, instance_id, decrement_capacity)
# we wait to make sure the machines we marked as Unhealthy are
@ -1248,7 +1248,7 @@ def wait_for_term_inst(connection, module, term_instances):
for i in instances:
lifecycle = instance_facts[i]['lifecycle_state']
health = instance_facts[i]['health_status']
log.debug("Instance {0} has state of {1},{2}".format(i, lifecycle, health))
log.debug("Instance %s has state of %s,%s", i, lifecycle, health)
if lifecycle == 'Terminating' or health == 'Unhealthy':
count += 1
time.sleep(10)
@ -1263,18 +1263,18 @@ def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size
# make sure we have the latest stats after that last loop.
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
log.debug("Waiting for %s = %s, currently %s", prop, desired_size, props[prop])
# now we make sure that we have enough instances in a viable state
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and desired_size > props[prop]:
log.debug("Waiting for {0} = {1}, currently {2}".format(prop, desired_size, props[prop]))
log.debug("Waiting for %s = %s, currently %s", prop, desired_size, props[prop])
time.sleep(10)
as_group = describe_autoscaling_groups(connection, group_name)[0]
props = get_properties(as_group, module)
if wait_timeout <= time.time():
# waiting took too long
module.fail_json(msg="Waited too long for new instances to become viable. %s" % time.asctime())
log.debug("Reached {0}: {1}".format(prop, desired_size))
log.debug("Reached %s: %s", prop, desired_size)
return props

@ -219,7 +219,6 @@ def main():
module.fail_json(msg="timed out while waiting for the key to be re-created")
changed = True
pass
# if the key doesn't exist, create it now
else:

@ -613,7 +613,6 @@ class Wrapper(object):
except AzureException as e:
if not str(e).lower().find("temporary redirect") == -1:
time.sleep(5)
pass
else:
raise e

@ -668,7 +668,6 @@ class LxcContainerManagement(object):
build_command.append(
'%s %s' % (key, value)
)
else:
return build_command
def _get_vars(self, variables):
@ -689,7 +688,6 @@ class LxcContainerManagement(object):
_var = self.module.params.get(k)
if _var not in false_values:
return_dict[v] = _var
else:
return return_dict
def _run_command(self, build_command, unsafe_shell=False):
@ -975,7 +973,6 @@ class LxcContainerManagement(object):
time.sleep(1)
else:
return True
else:
self.failure(
lxc_container=self._container_data(),
error='Failed to start container'

@ -1229,6 +1229,7 @@ class RHEV(object):
self.__get_conn()
return self.conn.set_VM_Host(vmname, vmhost)
# pylint: disable=unreachable
VM = self.conn.get_VM(vmname)
HOST = self.conn.get_Host(vmhost)

@ -122,7 +122,6 @@ def rax_dns(module, comment, email, name, state, ttl):
domain = dns.find(name=name)
except pyrax.exceptions.NotFound:
domain = {}
pass
except Exception as e:
module.fail_json(msg='%s' % e.message)

@ -270,7 +270,6 @@ def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotFound as e:
record = {}
pass
except pyrax.exceptions.DomainRecordNotUnique as e:
module.fail_json(msg='%s' % e.message)

@ -246,6 +246,7 @@ USER_AGENT = "ansible-k8s-module/0.0.1"
def decode_cert_data(module):
return
# pylint: disable=unreachable
d = module.params.get("certificate_authority_data")
if d and not d.startswith("-----BEGIN"):
module.params["certificate_authority_data"] = base64.b64decode(d)

@ -111,7 +111,6 @@ def main():
if parts != '':
return parts
else:
return ''
def run_command(command):

@ -286,7 +286,6 @@ class ModuleManager(object):
if status in ['Changes Pending']:
details = self._get_details_from_resource(resource)
self._validate_pending_status(details)
pass
elif status in ['Awaiting Initial Sync', 'Not All Devices Synced']:
pass
elif status == 'In Sync':

@ -277,7 +277,7 @@ class MavenDownloader:
if self.latest_version_found:
return self.latest_version_found
path = "/%s/maven-metadata.xml" % (artifact.path(False))
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", etree.parse)
v = xml.xpath("/metadata/versioning/versions/version[last()]/text()")
if v:
self.latest_version_found = v[0]
@ -289,7 +289,7 @@ class MavenDownloader:
if artifact.is_snapshot():
path = "/%s/maven-metadata.xml" % (artifact.path())
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", lambda r: etree.parse(r))
xml = self._request(self.base + path, "Failed to download maven-metadata.xml", etree.parse)
timestamp = xml.xpath("/metadata/versioning/snapshot/timestamp/text()")[0]
buildNumber = xml.xpath("/metadata/versioning/snapshot/buildNumber/text()")[0]
for snapshotArtifact in xml.xpath("/metadata/versioning/snapshotVersions/snapshotVersion"):

@ -267,15 +267,15 @@ class NetAppESeriesFlashCache(object):
@property
def needs_more_disks(self):
if len(self.cache_detail['driveRefs']) < self.disk_count:
self.debug("needs resize: current disk count %s < requested requested count %s" % (
len(self.cache_detail['driveRefs']), self.disk_count))
self.debug("needs resize: current disk count %s < requested requested count %s",
len(self.cache_detail['driveRefs']), self.disk_count)
return True
@property
def needs_less_disks(self):
if len(self.cache_detail['driveRefs']) > self.disk_count:
self.debug("needs resize: current disk count %s < requested requested count %s" % (
len(self.cache_detail['driveRefs']), self.disk_count))
self.debug("needs resize: current disk count %s < requested requested count %s",
len(self.cache_detail['driveRefs']), self.disk_count)
return True
@property
@ -292,8 +292,8 @@ class NetAppESeriesFlashCache(object):
@property
def needs_more_capacity(self):
if self.current_size_bytes < self.requested_size_bytes:
self.debug("needs resize: current capacity %sb is less than requested minimum %sb" % (
self.current_size_bytes, self.requested_size_bytes))
self.debug("needs resize: current capacity %sb is less than requested minimum %sb",
self.current_size_bytes, self.requested_size_bytes)
return True
@property
@ -405,7 +405,7 @@ def main():
try:
sp.apply()
except Exception as e:
sp.debug("Exception in apply(): \n%s" % to_native(e))
sp.debug("Exception in apply(): \n%s", to_native(e))
sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % to_native(e),
exception=traceback.format_exc())

@ -269,7 +269,7 @@ class NetAppESeriesStoragePool(object):
min_total_capacity = min_total_capacity * self._size_unit_map[size_unit]
# filter clearly invalid/unavailable drives first
drives = select(lambda d: self._is_valid_drive(d), drives)
drives = select(self._is_valid_drive, drives)
if interface_type:
drives = select(lambda d: d['phyDriveType'] == interface_type, drives)
@ -390,7 +390,7 @@ class NetAppESeriesStoragePool(object):
msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." %
(self.ssid, str(err), self.state, rc))
self.debug("searching for storage pool '%s'" % storage_pool_name)
self.debug("searching for storage pool '%s'", storage_pool_name)
pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None)
@ -514,7 +514,7 @@ class NetAppESeriesStoragePool(object):
return needs_migration
def migrate_raid_level(self):
self.debug("migrating storage pool to raid level '%s'..." % self.raid_level)
self.debug("migrating storage pool to raid level '%s'...", self.raid_level)
sp_raid_migrate_req = dict(
raidLevel=self.raid_level
)
@ -637,7 +637,7 @@ class NetAppESeriesStoragePool(object):
def expand_storage_pool(self):
drives_to_add = self.get_expansion_candidate_drives()
self.debug("adding %s drives to storage pool..." % len(drives_to_add))
self.debug("adding %s drives to storage pool...", len(drives_to_add))
sp_expand_req = dict(
drives=drives_to_add
)
@ -723,8 +723,8 @@ class NetAppESeriesStoragePool(object):
if self.needs_raid_level_migration:
self.debug(
"CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'" % (
self.pool_detail['raidLevel'], self.raid_level))
"CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'",
self.pool_detail['raidLevel'], self.raid_level)
changed = True
# if self.reserved_drive_count_differs:
@ -813,7 +813,7 @@ def main():
sp.apply()
except Exception:
e = get_exception()
sp.debug("Exception in apply(): \n%s" % format_exc(e))
sp.debug("Exception in apply(): \n%s", format_exc(e))
raise

@ -235,7 +235,7 @@ class NetAppESeriesVolume(object):
volumes.extend(thinvols)
self.debug("searching for volume '%s'" % volume_name)
self.debug("searching for volume '%s'", volume_name)
volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None)
if volume_detail:
@ -257,7 +257,7 @@ class NetAppESeriesVolume(object):
self.module.fail_json(
msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
self.debug("searching for storage pool '%s'" % storage_pool_name)
self.debug("searching for storage pool '%s'", storage_pool_name)
pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None)
if pool_detail:
@ -277,7 +277,7 @@ class NetAppESeriesVolume(object):
dataAssuranceEnabled=data_assurance_enabled,
)
self.debug("creating volume '%s'" % name)
self.debug("creating volume '%s'", name)
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
data=json.dumps(volume_add_req), headers=HEADERS, method='POST',
@ -302,7 +302,7 @@ class NetAppESeriesVolume(object):
dataAssuranceEnabled=data_assurance_enabled,
)
self.debug("creating thin-volume '%s'" % name)
self.debug("creating thin-volume '%s'", name)
try:
(rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
data=json.dumps(thin_volume_add_req), headers=HEADERS, method='POST',
@ -318,7 +318,7 @@ class NetAppESeriesVolume(object):
def delete_volume(self):
# delete the volume
self.debug("deleting volume '%s'" % self.volume_detail['name'])
self.debug("deleting volume '%s'", self.volume_detail['name'])
try:
(rc, resp) = request(
self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name,
@ -445,7 +445,7 @@ class NetAppESeriesVolume(object):
action = resp['action']
percent_complete = resp['percentComplete']
self.debug('expand action %s, %s complete...' % (action, percent_complete))
self.debug('expand action %s, %s complete...', action, percent_complete)
if action == 'none':
self.debug('expand complete')
@ -469,11 +469,8 @@ class NetAppESeriesVolume(object):
elif self.state == 'present':
# check requested volume size, see if expansion is necessary
if self.volume_needs_expansion:
self.debug(
"CHANGED: requested volume size %s%s is larger than current size %sb" % (self.size,
self.size_unit,
self.volume_detail[
'capacity']))
self.debug("CHANGED: requested volume size %s%s is larger than current size %sb",
self.size, self.size_unit, self.volume_detail['capacity'])
changed = True
if self.volume_properties_changed:
@ -543,7 +540,7 @@ def main():
v.apply()
except Exception:
e = get_exception()
v.debug("Exception in apply(): \n%s" % format_exc(e))
v.debug("Exception in apply(): \n%s", format_exc(e))
v.module.fail_json(msg="Module failed. Error [%s]." % (str(e)))

@ -306,7 +306,6 @@ class SolidFireVolume(object):
if changed:
if self.module.check_mode:
result_message = "Check mode, skipping changes"
pass
else:
if self.state == 'present':
if not volume_exists:

@ -126,7 +126,6 @@ def get_snapshot(module, array):
for s in array.get_volume(module.params['name'], snap='true'):
if s['name'] == snapname:
return snapname
break
except:
return None

@ -306,7 +306,6 @@ def setInterfaceOption(module, lines, iface, option, raw_value, state):
module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
return changed, lines
pass
def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options):

@ -183,7 +183,6 @@ class BaseFileCacheModule(BaseCacheModule):
return False
else:
display.warning("error in '%s' cache plugin while trying to stat %s : %s" % (self.plugin_name, cachefile, to_bytes(e)))
pass
def delete(self, key):
try:

@ -78,22 +78,22 @@ class CallbackModule(CallbackBase):
self.hostname = socket.gethostname()
def runner_on_failed(self, host, res, ignore_errors=False):
self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname, host, self._dump_results(res)))
self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
def runner_on_ok(self, host, res):
self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s' % (self.hostname, host, self._dump_results(res)))
self.logger.info('%s ansible-command: task execution OK; host: %s; message: %s', self.hostname, host, self._dump_results(res))
def runner_on_skipped(self, host, item=None):
self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s' % (self.hostname, host, 'skipped'))
self.logger.info('%s ansible-command: task execution SKIPPED; host: %s; message: %s', self.hostname, host, 'skipped')
def runner_on_unreachable(self, host, res):
self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s' % (self.hostname, host, self._dump_results(res)))
self.logger.error('%s ansible-command: task execution UNREACHABLE; host: %s; message: %s', self.hostname, host, self._dump_results(res))
def runner_on_async_failed(self, host, res, jid):
self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s' % (self.hostname, host, self._dump_results(res)))
self.logger.error('%s ansible-command: task execution FAILED; host: %s; message: %s', self.hostname, host, self._dump_results(res))
def playbook_on_import_for_host(self, host, imported_file):
self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s' % (self.hostname, host, imported_file))
self.logger.info('%s ansible-command: playbook IMPORTED; host: %s; message: imported file %s', self.hostname, host, imported_file)
def playbook_on_not_import_for_host(self, host, missing_file):
self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s' % (self.hostname, host, missing_file))
self.logger.info('%s ansible-command: playbook NOT IMPORTED; host: %s; message: missing file %s', self.hostname, host, missing_file)

@ -502,7 +502,6 @@ class Connection(ConnectionBase):
# unable to save keys, including scenario when key was invalid
# and caught earlier
traceback.print_exc()
pass
fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
self.ssh.close()

@ -413,7 +413,7 @@ class PluginLoader:
for i in self._get_paths():
all_matches.extend(glob.glob(os.path.join(i, "*.py")))
for path in sorted(all_matches, key=lambda match: os.path.basename(match)):
for path in sorted(all_matches, key=os.path.basename):
name = os.path.basename(os.path.splitext(path)[0])
if '__init__' in name:

@ -185,9 +185,7 @@ class LookupModule(LookupBase):
path = self.find_file_in_search_path(variables, subdir, fn, ignore_missing=True)
if path is not None:
return [path]
else:
if skip:
return []
else:
raise AnsibleLookupError("No file was found when using with_first_found. Use the 'skip: true' option to allow this task to be skipped if no "
"files are found")

@ -127,7 +127,6 @@ class StrategyModule(StrategyBase):
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
display.debug("templating failed for some reason")
pass
run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
if run_once:

@ -266,7 +266,6 @@ class StrategyModule(StrategyBase):
# just ignore any errors during task name templating,
# we don't care if it just shows the raw name
display.debug("templating failed for some reason")
pass
display.debug("here goes the callback...")
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
task.name = saved_name

@ -8,7 +8,6 @@ attribute-defined-outside-init
bad-continuation
bad-indentation
bad-mcs-classmethod-argument
bad-open-mode
bad-whitespace
bare-except
blacklisted-name
@ -34,8 +33,6 @@ invalid-encoded-data
invalid-name
line-too-long
locally-disabled
logging-format-interpolation
logging-not-lazy
method-hidden
misplaced-comparison-constant
missing-docstring
@ -52,18 +49,14 @@ old-style-class
pointless-statement
pointless-string-statement
protected-access
raising-bad-type
redefined-builtin
redefined-outer-name
redefined-variable-type
redundant-unittest-assert
reimported
relative-import
signature-differs
simplifiable-if-statement
super-init-not-called
superfluous-parens
suppressed-message
too-few-public-methods
too-many-ancestors
too-many-arguments
@ -80,10 +73,7 @@ too-many-statements
undefined-loop-variable
ungrouped-imports
unidiomatic-typecheck
unnecessary-lambda
unnecessary-pass
unneeded-not
unreachable
unsubscriptable-object
unsupported-membership-test
unused-argument
@ -91,7 +81,6 @@ unused-import
unused-variable
unused-wildcard-import
used-before-assignment
useless-else-on-loop
wildcard-import
wrong-import-order
wrong-import-position

@ -87,6 +87,6 @@ class TestAnsibleModuleExitJson(unittest.TestCase):
for i in self.unparsable_cases:
self.assertRaises(
ValueError,
lambda data: _filter_non_json_lines(data),
_filter_non_json_lines,
data=i
)

@ -106,7 +106,7 @@ class TestManager(unittest.TestCase):
params = NONE_PARAMS.copy()
del params['vdirect_ip']
vdirect_file.VdirectFile(params)
self.assertFalse("KeyError was not thrown for missing parameter")
self.fail("KeyError was not thrown for missing parameter")
except KeyError:
assert True
@ -134,7 +134,7 @@ class TestManager(unittest.TestCase):
file = vdirect_file.VdirectFile(NONE_PARAMS)
try:
file.upload("missing_file.vm")
self.assertFalse("IOException was not thrown for missing file")
self.fail("IOException was not thrown for missing file")
except IOError:
assert True

@ -467,7 +467,6 @@ class TestVaultEditor(unittest.TestCase):
try:
ve.decrypt_file(v11_file.name)
except errors.AnsibleError:
raise
error_hit = True
# verify decrypted content
@ -493,7 +492,6 @@ class TestVaultEditor(unittest.TestCase):
try:
ve.rekey_file(v10_file.name, vault.match_encrypt_secret(new_secrets)[1])
except errors.AnsibleError:
raise
error_hit = True
# verify decrypted content
@ -510,7 +508,6 @@ class TestVaultEditor(unittest.TestCase):
try:
dec_data = vl.decrypt(fdata)
except errors.AnsibleError:
raise
error_hit = True
os.unlink(v10_file.name)

Loading…
Cancel
Save