Cleaning up diffs after extras modules merge

pull/18777/head
James Cammarata 8 years ago committed by Matt Clay
parent 2cfa25c930
commit c65ba07d2c

@ -31,11 +31,6 @@ options:
description:
- the source region that AMI should be copied from
required: true
region:
description:
- the destination region that AMI should be copied to
required: true
aliases: ['aws_region', 'ec2_region', 'dest_region']
source_image_id:
description:
- the id of the image in source region that should be copied
@ -80,7 +75,9 @@ options:
default: null
author: Amir Moulavi <amir.moulavi@gmail.com>
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''

@ -34,7 +34,9 @@ options:
required: false
default: null
aliases: ['elb_ids', 'ec2_elbs']
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''

@ -202,7 +202,7 @@ interface:
mac_address:
description: interface's physical address
type: string
sample: "06:9a:27:6a:6f:99"
sample: "00:00:5E:00:53:23"
owner_id:
description: aws account id
type: string

@ -91,13 +91,6 @@ def list_ec2_snapshots_boto3(connection, module):
module.exit_json(**snaked_network_interfaces_result)
def get_error_message(xml_string):
root = ET.fromstring(xml_string)
for message in root.findall('.//Message'):
return message.text
def get_eni_info(interface):
# Private addresses
@ -138,15 +131,13 @@ def get_eni_info(interface):
def list_eni(connection, module):
eni_id = module.params.get("eni_id")
filters = module.params.get("filters")
interface_dict_array = []
try:
all_eni = connection.get_all_network_interfaces(filters=filters)
except BotoServerError as e:
module.fail_json(msg=get_error_message(e.args[2]))
module.fail_json(msg=e.message)
for interface in all_eni:
interface_dict_array.append(get_eni_info(interface))

@ -23,7 +23,7 @@ module: ec2_vol_facts
short_description: Gather facts about ec2 volumes in AWS
description:
- Gather facts about ec2 volumes in AWS
version_added: "2.0"
version_added: "2.1"
author: "Rob White (@wimnat)"
options:
filters:

@ -31,19 +31,15 @@ options:
- The VPC ID for the VPC in which to manage the Internet Gateway.
required: true
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
state:
description:
- Create or terminate the IGW
required: false
default: present
choices: [ 'present', 'absent' ]
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''

@ -24,7 +24,7 @@ module: ec2_vpc_nat_gateway
short_description: Manage AWS VPC NAT Gateways.
description:
- Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids.
version_added: "2.1"
version_added: "2.2"
requirements: [boto3, botocore]
options:
state:

@ -70,8 +70,9 @@ options:
description:
- "VPC ID of the VPC in which to create the route table."
required: true
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''

@ -31,14 +31,9 @@ options:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeRouteTables.html) for possible filters.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''

@ -24,7 +24,7 @@ short_description: Manage subnets in AWS virtual private clouds
description:
- Manage subnets in AWS virtual private clouds
version_added: "2.0"
author: Robert Estelle, @erydo
author: Robert Estelle (@erydo)
options:
az:
description:
@ -33,7 +33,7 @@ options:
default: null
cidr:
description:
- "The CIDR block for the subnet. E.g. 10.0.0.0/16. Only required when state=present."
- "The CIDR block for the subnet. E.g. 192.0.2.0/24. Only required when state=present."
required: false
default: null
tags:
@ -53,8 +53,9 @@ options:
- "VPC ID of the VPC in which to create the subnet."
required: false
default: null
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
@ -159,7 +160,7 @@ def get_resource_tags(vpc_conn, resource_id):
vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
def ensure_tags(vpc_conn, resource_id, tags, add_only, dry_run):
def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
try:
cur_tags = get_resource_tags(vpc_conn, resource_id)
if cur_tags == tags:
@ -167,11 +168,11 @@ def ensure_tags(vpc_conn, resource_id, tags, add_only, dry_run):
to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
if to_delete and not add_only:
vpc_conn.delete_tags(resource_id, to_delete, dry_run=dry_run)
vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags or cur_tags[k] != tags[k])
if to_add:
vpc_conn.create_tags(resource_id, to_add, dry_run=dry_run)
vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
latest_tags = get_resource_tags(vpc_conn, resource_id)
return {'changed': True, 'tags': latest_tags}
@ -203,13 +204,6 @@ def ensure_subnet_present(vpc_conn, vpc_id, cidr, az, tags, check_mode):
subnet.tags = tags
changed = True
if tags is not None:
tag_result = ensure_tags(vpc_conn, subnet.id, tags, add_only=True,
dry_run=check_mode)
tags = tag_result['tags']
changed = changed or tag_result['changed']
else:
tags = get_resource_tags(vpc_conn, subnet.id)
subnet_info = get_subnet_info(subnet)
return {
@ -222,11 +216,9 @@ def ensure_subnet_absent(vpc_conn, vpc_id, cidr, check_mode):
subnet = get_matching_subnet(vpc_conn, vpc_id, cidr)
if subnet is None:
return {'changed': False}
elif check_mode:
return {'changed': True}
try:
vpc_conn.delete_subnet(subnet.id)
vpc_conn.delete_subnet(subnet.id, dry_run=check_mode)
return {'changed': True}
except EC2ResponseError as e:
raise AnsibleVPCSubnetDeletionException(
@ -244,16 +236,14 @@ def main():
tags = dict(default=None, required=False, type='dict', aliases=['resource_tags']),
vpc_id = dict(default=None, required=True)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_BOTO:
module.fail_json(msg='boto is required for this module')
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if not region:
module.fail_json(msg='Region must be specified')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
@ -267,14 +257,14 @@ def main():
tags = module.params.get('tags')
cidr = module.params.get('cidr')
az = module.params.get('az')
state = module.params.get('state', 'present')
state = module.params.get('state')
try:
if state == 'present':
result = ensure_subnet_present(vpc_conn, vpc_id, cidr, az, tags,
result = ensure_subnet_present(connection, vpc_id, cidr, az, tags,
check_mode=module.check_mode)
elif state == 'absent':
result = ensure_subnet_absent(vpc_conn, vpc_id, cidr,
result = ensure_subnet_absent(connection, vpc_id, cidr,
check_mode=module.check_mode)
except AnsibleVPCSubnetException as e:
module.fail_json(msg=str(e))

@ -31,14 +31,9 @@ options:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See U(http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html) for possible filters.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. See U(http://docs.aws.amazon.com/general/latest/gr/rande.html#ec2_region)
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''

@ -40,12 +40,6 @@ options:
- The passphrase for the instance key pair. The key must use DES or 3DES encryption for this module to decrypt it. You can use openssl to convert your password protected keys if they do not use DES or 3DES. ex) openssl rsa -in current_key -out new_key -des3.
required: false
default: null
region:
description:
- The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
default: null
aliases: [ 'aws_region', 'ec2_region' ]
wait:
version_added: "2.0"
description:
@ -60,7 +54,9 @@ options:
required: false
default: 120
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''

@ -24,7 +24,8 @@ module: ecs_taskdefinition
short_description: register a task definition in ecs
description:
- Creates or terminates task definitions
version_added: "1.9"
version_added: "2.0"
author: Mark Chance(@Java1Guy)
requirements: [ json, boto, botocore, boto3 ]
options:
state:

@ -54,10 +54,10 @@ options:
required: false
next_marker:
description:
- Some requests such as list_command: hosted_zones will return a maximum
- "Some requests such as list_command: hosted_zones will return a maximum
number of entries - EG 100. If the number of entries exceeds this maximum
another request can be sent using the NextMarker entry from the first response
to get the next page of results
to get the next page of results"
required: false
delegation_set_id:
description:
@ -65,8 +65,8 @@ options:
required: false
start_record_name:
description:
- The first name in the lexicographic ordering of domain names that you want
the list_command: record_sets to start listing from
- "The first name in the lexicographic ordering of domain names that you want
the list_command: record_sets to start listing from"
required: false
type:
description:
@ -89,9 +89,9 @@ options:
required: false
hosted_zone_method:
description:
- This is used in conjunction with query: hosted_zone.
- "This is used in conjunction with query: hosted_zone.
It allows for listing details, counts or tags of various
hosted zone details.
hosted zone details."
required: false
choices: [
'details',
@ -103,9 +103,9 @@ options:
default: 'list'
health_check_method:
description:
- This is used in conjunction with query: health_check.
- "This is used in conjunction with query: health_check.
It allows for listing details, counts or tags of various
health check details.
health check details."
required: false
choices: [
'list',

@ -93,7 +93,9 @@ options:
default: 3
choices: [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
author: "zimbatm (@zimbatm)"
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''

@ -50,7 +50,9 @@ options:
- Comment associated with the zone
required: false
default: ''
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
author: "Christopher Troup (@minichate)"
'''

@ -42,11 +42,6 @@ options:
- The JSON policy as a string.
required: false
default: null
region:
description:
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard."
required: false
default: null
s3_url:
description:
- S3 URL endpoint for usage with Ceph, Eucalypus, fakes3, etc. Otherwise assumes AWS
@ -79,8 +74,9 @@ options:
required: false
default: null
choices: [ 'yes', 'no' ]
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''

@ -50,11 +50,6 @@ options:
- "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket."
required: false
default: null
region:
description:
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard."
required: false
default: null
rule_id:
description:
- "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided."
@ -89,8 +84,9 @@ options:
- "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required."
required: false
default: null
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''

@ -30,11 +30,6 @@ options:
description:
- "Name of the s3 bucket."
required: true
region:
description:
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard."
required: false
default: null
state:
description:
- "Enable or disable logging."
@ -51,8 +46,9 @@ options:
- "The prefix that should be prepended to the generated log files written to the target_bucket."
required: false
default: ""
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''

@ -63,11 +63,11 @@ options:
default: []
purge_subscriptions:
description:
- Whether to purge any subscriptions not listed here. NOTE: AWS does not
- "Whether to purge any subscriptions not listed here. NOTE: AWS does not
allow you to purge any PendingConfirmation subscriptions, so if any
exist and would be purged, they are silently skipped. This means that
somebody could come back later and confirm the subscription. Sorry.
Blame Amazon.
Blame Amazon."
required: False
default: True
extends_documentation_fragment: aws

@ -62,7 +62,9 @@ options:
default: null
notes:
- In order to use the assumed role in a following playbook task you must pass the access_key, access_secret and access_token
extends_documentation_fragment: aws
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''

@ -239,7 +239,7 @@ EXAMPLES = '''
imageOffer: "UbuntuServer"
OSDiskName: "osdiskforlinuxsimple"
nicName: "myVMNic"
addressPrefix: "10.0.0.0/16"
addressPrefix: "192.0.2.0/24"
subnetName: "Subnet"
subnetPrefix: "10.0.0.0/24"
storageAccountType: "Standard_LRS"

@ -85,7 +85,7 @@ notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account passwod for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
@ -302,7 +302,7 @@ class ClcAlertPolicy:
"""
Ensures that the alert policy is present
:return: (changed, policy)
canged: A flag representing if anything is modified
changed: A flag representing if anything is modified
policy: the created/updated alert policy
"""
changed = False
@ -327,7 +327,7 @@ class ClcAlertPolicy:
"""
Ensures that the alert policy is absent
:return: (changed, None)
canged: A flag representing if anything is modified
changed: A flag representing if anything is modified
"""
changed = False
p = self.module.params
@ -349,10 +349,10 @@ class ClcAlertPolicy:
def _ensure_alert_policy_is_updated(self, alert_policy):
"""
Ensures the aliert policy is updated if anything is changed in the alert policy configuration
:param alert_policy: the targetalert policy
Ensures the alert policy is updated if anything is changed in the alert policy configuration
:param alert_policy: the target alert policy
:return: (changed, policy)
canged: A flag representing if anything is modified
changed: A flag representing if anything is modified
policy: the updated the alert policy
"""
changed = False

@ -28,7 +28,7 @@ module: clc_group
short_description: Create/delete Server Groups at Centurylink Cloud
description:
- Create or delete Server Groups at Centurylink Centurylink Cloud
version_added: 1.0
version_added: "2.0"
options:
name:
description:
@ -70,8 +70,8 @@ notes:
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN: the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS: the account alias associated with the centurylink cloud
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''

@ -28,7 +28,7 @@ module: clc_publicip
short_description: Add and Delete public ips on servers in CenturyLink Cloud.
description:
- An Ansible module to add or delete public ip addresses on an existing server or servers in CenturyLink Cloud.
version_added: 1.0
version_added: "2.0"
options:
protocol:
description:
@ -70,8 +70,8 @@ notes:
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN: the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS: the account alias associated with the centurylink cloud
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''

@ -374,7 +374,7 @@ def main():
result = acs_acc.get_result(account)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -26,7 +26,8 @@ DOCUMENTATION = '''
---
module: cs_affinitygroup
short_description: Manages affinity groups on Apache CloudStack based clouds.
description: Create and remove affinity groups.
description:
- Create and remove affinity groups.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
@ -60,6 +61,11 @@ options:
- Account the affinity group is related to.
required: false
default: null
project:
description:
- Name of the project the affinity group is related to.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
@ -104,6 +110,21 @@ affinity_type:
returned: success
type: string
sample: host anti-affinity
project:
description: Name of project the affinity group is related to.
returned: success
type: string
sample: Production
domain:
description: Domain the affinity group is related to.
returned: success
type: string
sample: example domain
account:
description: Account the affinity group is related to.
returned: success
type: string
sample: example account
'''
# import cloudstack common
@ -223,7 +244,7 @@ def main():
result = acs_ag.get_result(affinity_group)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -242,8 +242,8 @@ def main():
path = dict(required=True),
state = dict(choices=['present', 'absent'], default='present'),
network_domain = dict(default=None),
clean_up = dict(choices=BOOLEANS, default=False),
poll_async = dict(choices=BOOLEANS, default=True),
clean_up = dict(type='bool', default=False),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
@ -263,7 +263,7 @@ def main():
result = acs_dom.get_result(domain)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -26,7 +26,8 @@ DOCUMENTATION = '''
---
module: cs_firewall
short_description: Manages firewall rules on Apache CloudStack based clouds.
description: Creates and removes firewall rules.
description:
- Creates and removes firewall rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
@ -102,6 +103,12 @@ options:
- Name of the project the firewall rule is related to.
required: false
default: null
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
@ -378,10 +385,11 @@ def main():
start_port = dict(type='int', aliases=['port'], default=None),
end_port = dict(type='int', default=None),
state = dict(choices=['present', 'absent'], default='present'),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
poll_async = dict(type='bool', default=True),
))
required_together = cs_required_together()
@ -414,7 +422,7 @@ def main():
result = acs_fw.get_result(fw_rule)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -53,7 +53,6 @@ options:
state:
description:
- State of the instance.
- C(restored) added in version 2.1.
required: false
default: 'present'
choices: [ 'deployed', 'started', 'stopped', 'restarted', 'restored', 'destroyed', 'expunged', 'present', 'absent' ]
@ -212,7 +211,7 @@ options:
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- If you want to delete all tags, set a empty list e.g. C(tags: []).
- "If you want to delete all tags, set a empty list e.g. C(tags: [])."
required: false
default: null
aliases: [ 'tag' ]
@ -899,12 +898,15 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
def restore_instance(self):
instance = self.get_instance()
if not instance:
instance = self.deploy_instance()
return instance
self.result['changed'] = True
# in check mode intance may not be instanciated
if instance:
args = {}
args['templateid'] = self.get_template_or_iso(key='id')
args['virtualmachineid'] = instance['id']
res = self.cs.restoreVirtualMachine(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
@ -963,7 +965,7 @@ def main():
user_data = dict(default=None),
zone = dict(default=None),
ssh_key = dict(default=None),
force = dict(choices=BOOLEANS, default=False),
force = dict(type='bool', default=False),
tags = dict(type='list', aliases=[ 'tag' ], default=None),
vpc = dict(default=None),
poll_async = dict(type='bool', default=True),
@ -998,6 +1000,7 @@ def main():
instance = acs_instance.expunge_instance()
elif state in ['restored']:
acs_instance.present_instance()
instance = acs_instance.restore_instance()
elif state in ['present', 'deployed']:
@ -1020,7 +1023,7 @@ def main():
result = acs_instance.get_result(instance)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -194,7 +194,7 @@ def main():
result = acs_ig.get_result(instance_group)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -70,7 +70,7 @@ options:
default: null
zone:
description:
- Name of the zone in which the virtual machine is in.
- Name of the zone in which the IP address is in.
- If not set, default zone is used.
required: false
default: null
@ -88,7 +88,7 @@ EXAMPLES = '''
module: cs_ip_address
network: My Network
register: ip_address
when: create_instance|changed
when: instance.public_ip is undefined
# Disassociate an IP address
- local_action:
@ -210,7 +210,7 @@ def main():
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
@ -233,7 +233,7 @@ def main():
result = acs_ip_address.get_result(ip_address)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -304,10 +304,10 @@ def main():
account = dict(default=None),
project = dict(default=None),
checksum = dict(default=None),
is_ready = dict(choices=BOOLEANS, default=False),
bootable = dict(choices=BOOLEANS, default=True),
is_featured = dict(choices=BOOLEANS, default=False),
is_dynamically_scalable = dict(choices=BOOLEANS, default=False),
is_ready = dict(type='bool', default=False),
bootable = dict(type='bool', default=True),
is_featured = dict(type='bool', default=False),
is_dynamically_scalable = dict(type='bool', default=False),
state = dict(choices=['present', 'absent'], default='present'),
))
@ -328,7 +328,7 @@ def main():
result = acs_iso.get_result(iso)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -348,12 +348,12 @@ def main():
ip_address = dict(required=True, aliases=['public_ip']),
cidr = dict(default=None),
project = dict(default=None),
open_firewall = dict(choices=BOOLEANS, default=False),
open_firewall = dict(type='bool', default=False),
tags = dict(type='list', aliases=['tag'], default=None),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
@ -373,7 +373,7 @@ def main():
result = acs_lb_rule.get_result(rule)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -333,7 +333,7 @@ def main():
domain = dict(default=None),
project = dict(default=None),
account = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
@ -353,7 +353,7 @@ def main():
result = acs_lb_rule_member.get_result(rule)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -517,14 +517,14 @@ def main():
vlan = dict(default=None),
vpc = dict(default=None),
isolated_pvlan = dict(default=None),
clean_up = dict(type='bool', choices=BOOLEANS, default=False),
clean_up = dict(type='bool', default=False),
network_domain = dict(default=None),
state = dict(choices=['present', 'absent', 'restarted' ], default='present'),
acl_type = dict(choices=['account', 'domain'], default='account'),
project = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
poll_async = dict(type='bool', choices=BOOLEANS, default=True),
poll_async = dict(type='bool', default=True),
))
required_together = cs_required_together()
required_together.extend([
@ -553,7 +553,7 @@ def main():
result = acs_network.get_result(network)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -53,21 +53,21 @@ options:
required: false
default: 'tcp'
choices: [ 'tcp', 'udp' ]
public_port
public_port:
description:
- Start public port for this rule.
required: true
public_end_port
public_end_port:
description:
- End public port for this rule.
- If not specified equal C(public_port).
required: false
default: null
private_port
private_port:
description:
- Start private port for this rule.
required: true
private_end_port
private_end_port:
description:
- End private port for this rule.
- If not specified equal C(private_port).
@ -350,14 +350,14 @@ def main():
private_port = dict(type='int', required=True),
private_end_port = dict(type='int', default=None),
state = dict(choices=['present', 'absent'], default='present'),
open_firewall = dict(choices=BOOLEANS, default=False),
open_firewall = dict(type='bool', default=False),
vm_guest_ip = dict(default=None),
vm = dict(default=None),
zone = dict(default=None),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
@ -376,7 +376,7 @@ def main():
result = acs_pf.get_result(pf_rule)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -300,7 +300,7 @@ def main():
result = acs_project.get_result(project)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -23,9 +23,11 @@ ANSIBLE_METADATA = {'status': ['stableinterface'],
'version': '1.0'}
DOCUMENTATION = '''
---
module: cs_securitygroup
short_description: Manages security groups on Apache CloudStack based clouds.
description: Create and remove security groups.
description:
- Create and remove security groups.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
@ -210,7 +212,7 @@ def main():
result = acs_sg.get_result(sg)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -26,7 +26,8 @@ DOCUMENTATION = '''
---
module: cs_securitygroup_rule
short_description: Manages security group rules on Apache CloudStack based clouds.
description: Add and remove security group rules.
description:
- Add and remove security group rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
@ -383,7 +384,7 @@ def main():
end_port = dict(type='int', default=None),
state = dict(choices=['present', 'absent'], default='present'),
project = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
poll_async = dict(type='bool', default=True),
))
required_together = cs_required_together()
required_together.extend([
@ -413,7 +414,7 @@ def main():
result = acs_sg_rule.get_result(sg_rule)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -244,7 +244,7 @@ def main():
result = acs_sshkey.get_result(ssh_key)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -251,7 +251,7 @@ def main():
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
@ -271,7 +271,7 @@ def main():
result = acs_static_nat.get_result(ip_address)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -605,19 +605,19 @@ def main():
vm = dict(default=None),
snapshot = dict(default=None),
os_type = dict(default=None),
is_ready = dict(type='bool', choices=BOOLEANS, default=False),
is_public = dict(type='bool', choices=BOOLEANS, default=True),
is_featured = dict(type='bool', choices=BOOLEANS, default=False),
is_dynamically_scalable = dict(type='bool', choices=BOOLEANS, default=False),
is_extractable = dict(type='bool', choices=BOOLEANS, default=False),
is_routing = dict(type='bool', choices=BOOLEANS, default=False),
is_ready = dict(type='bool', default=False),
is_public = dict(type='bool', default=True),
is_featured = dict(type='bool', default=False),
is_dynamically_scalable = dict(type='bool', default=False),
is_extractable = dict(type='bool', default=False),
is_routing = dict(type='bool', default=False),
checksum = dict(default=None),
template_filter = dict(default='self', choices=['featured', 'self', 'selfexecutable', 'sharedexecutable', 'executable', 'community']),
hypervisor = dict(choices=CS_HYPERVISORS, default=None),
requires_hvm = dict(type='bool', default=False),
password_enabled = dict(type='bool', default=False),
template_tag = dict(default=None),
sshkey_enabled = dict(type='bool', choices=BOOLEANS, default=False),
sshkey_enabled = dict(type='bool', default=False),
format = dict(choices=['QCOW2', 'RAW', 'VHD', 'OVA'], default=None),
details = dict(default=None),
bits = dict(type='int', choices=[ 32, 64 ], default=64),
@ -628,7 +628,7 @@ def main():
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', choices=BOOLEANS, default=True),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
@ -661,7 +661,7 @@ def main():
result = acs_tpl.get_result(tpl)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -413,7 +413,7 @@ def main():
last_name = dict(default=None),
password = dict(default=None, no_log=True),
timezone = dict(default=None),
poll_async = dict(choices=BOOLEANS, default=True),
poll_async = dict(type='bool', default=True),
))
module = AnsibleModule(
@ -444,7 +444,7 @@ def main():
result = acs_acc.get_result(user)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -26,7 +26,8 @@ DOCUMENTATION = '''
---
module: cs_vmsnapshot
short_description: Manages VM snapshots on Apache CloudStack based clouds.
description: Create, remove and revert VM from snapshots.
description:
- Create, remove and revert VM from snapshots.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
@ -260,12 +261,12 @@ def main():
vm = dict(required=True),
description = dict(default=None),
zone = dict(default=None),
snapshot_memory = dict(choices=BOOLEANS, default=False),
snapshot_memory = dict(type='bool', default=False),
state = dict(choices=['present', 'absent', 'revert'], default='present'),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
poll_async = dict(type='bool', choices=BOOLEANS, default=True),
poll_async = dict(type='bool', default=True),
))
required_together = cs_required_together()
@ -292,7 +293,7 @@ def main():
result = acs_vmsnapshot.get_result(snapshot)
except CloudStackException, e:
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)

@ -258,10 +258,8 @@ class AnsibleCloudStackVolume(AnsibleCloudStack):
if not disk_offering:
return None
args = {}
args['domainid'] = self.get_domain(key='id')
disk_offerings = self.cs.listDiskOfferings(**args)
# Do not add domain filter for disk offering listing.
disk_offerings = self.cs.listDiskOfferings()
if disk_offerings:
for d in disk_offerings['diskoffering']:
if disk_offering in [d['displaytext'], d['name'], d['id']]:

@ -199,8 +199,8 @@ EXAMPLES = '''
record_data:
- '192.0.2.23'
- '10.4.5.6'
- '10.7.8.9'
- '192.168.5.10'
- '198.51.100.5'
- '203.0.113.10'
# Change the value of an existing record with multiple record_data.
- gcdns_record:

@ -260,9 +260,9 @@ ovirt:
url: https://ovirt.example.com
hostname: testansible
domain: ansible.local
ip: 192.168.1.100
ip: 192.0.2.100
netmask: 255.255.255.0
gateway: 192.168.1.1
gateway: 192.0.2.1
rootpw: bigsecret
'''

@ -175,9 +175,9 @@ vm:
"size": 40
}
],
"eth0": "00:1b:4a:1f:de:f4",
"eth1": "00:1b:4a:1f:de:f5",
"eth2": "00:1b:4a:1f:de:f6",
"eth0": "00:00:5E:00:53:00",
"eth1": "00:00:5E:00:53:01",
"eth2": "00:00:5E:00:53:02",
"exists": true,
"failed": false,
"ifaces": [

@ -234,12 +234,6 @@ class LibvirtConnection(object):
self.module = module
cmd = "uname -r"
rc, stdout, stderr = self.module.run_command(cmd)
if "xen" in stdout:
conn = libvirt.open(None)
else:
conn = libvirt.open(uri)
if not conn:
@ -253,14 +247,12 @@ class LibvirtConnection(object):
results = []
# Get active entries
entries = self.conn.listStoragePools()
for name in entries:
for name in self.conn.listStoragePools():
entry = self.conn.storagePoolLookupByName(name)
results.append(entry)
# Get inactive entries
entries = self.conn.listDefinedStoragePools()
for name in entries:
for name in self.conn.listDefinedStoragePools():
entry = self.conn.storagePoolLookupByName(name)
results.append(entry)
@ -445,24 +437,18 @@ class VirtStoragePool(object):
return self.conn.find_entry(entryid)
def list_pools(self, state=None):
entries = self.conn.find_entry(-1)
results = []
for x in entries:
try:
for entry in self.conn.find_entry(-1):
if state:
entrystate = self.conn.get_status2(x)
if entrystate == state:
results.append(x.name())
if state == self.conn.get_status2(entry):
results.append(entry.name())
else:
results.append(x.name())
except:
pass
results.append(entry.name())
return results
def state(self):
entries = self.list_pools()
results = []
for entry in entries:
for entry in self.list_pools():
state_blurb = self.conn.get_status(entry)
results.append("%s %s" % (entry,state_blurb))
return results
@ -509,13 +495,12 @@ class VirtStoragePool(object):
def refresh(self, entryid):
return self.conn.refresh(entryid)
def info(self, facts_mode='info'):
return self.facts(facts_mode)
def info(self):
return self.facts(facts_mode='info')
def facts(self, facts_mode='facts'):
entries = self.list_pools()
results = dict()
for entry in entries:
for entry in self.list_pools():
results[entry] = dict()
if self.conn.find_entry(entry):
data = self.conn.get_info(entry)

@ -166,7 +166,7 @@ openstack_ports:
description: The MAC address.
returned: success
type: string
sample: "fa:16:30:5f:10:f1"
sample: "00:00:5E:00:53:42"
name:
description: The port name.
returned: success

@ -155,9 +155,9 @@ def main():
choices=['none', 'http', 'icmp', 'oco']),
state=dict(default='present', choices=['present', 'absent']),
endpoint=dict(required=True),
application_key=dict(required=True),
application_secret=dict(required=True),
consumer_key=dict(required=True),
application_key=dict(required=True, no_log=True),
application_secret=dict(required=True, no_log=True),
consumer_key=dict(required=True, no_log=True),
timeout=dict(default=120, type='int')
)
)

@ -72,8 +72,8 @@ EXAMPLES = '''
state: present
label: my_entity
named_ip_addresses:
web_box: 192.168.0.10
db_box: 192.168.0.11
web_box: 192.0.2.4
db_box: 192.0.2.5
meta:
hurf: durf
register: the_entity

@ -2,17 +2,21 @@
# Copyright (c) 2015 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# This file is part of Ansible
#
# http://www.apache.org/licenses/LICENSE-2.0
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
@ -28,7 +32,6 @@ version_added: "2.0"
author: Peter Sprygada (@privateip)
options:
fw_rules:
version_added: "2.0"
description:
- A list of firewall rules to be added to the gateway, Please see examples on valid entries
required: True
@ -50,12 +53,12 @@ EXAMPLES = '''
fw_rules:
- description: "ben testing"
source_ip: "Any"
dest_ip: 192.168.2.11
dest_ip: 192.0.2.23
- description: "ben testing 2"
source_ip: 192.168.2.100
source_ip: 192.0.2.50
source_port: "Any"
dest_port: "22"
dest_ip: 192.168.2.13
dest_ip: 192.0.2.101
is_enable: "true"
enable_logging: "false"
protocol: "Tcp"

@ -57,8 +57,8 @@ EXAMPLES = '''
state: 'present'
nat_rules:
- rule_type: SNAT
original_ip: 192.168.2.10
translated_ip: 107.189.95.208
original_ip: 192.0.2.42
translated_ip: 203.0.113.23
#example for a DNAT
- hosts: localhost
@ -70,9 +70,9 @@ EXAMPLES = '''
state: 'present'
nat_rules:
- rule_type: DNAT
original_ip: 107.189.95.208
original_ip: 203.0.113.23
original_port: 22
translated_ip: 192.168.2.10
translated_ip: 192.0.2.42
translated_port: 22
'''

@ -139,7 +139,7 @@ Example from Ansible playbook
- name: create the VM
vmware_guest:
validate_certs: False
hostname: 192.168.1.209
hostname: 192.0.2.44
username: administrator@vsphere.local
password: vmware
name: testvm_2
@ -159,7 +159,7 @@ Example from Ansible playbook
osid: centos64guest
scsi: paravirtual
datacenter: datacenter1
esxi_hostname: 192.168.1.117
esxi_hostname: 192.0.2.117
template: template_el7
wait_for_ip_address: yes
register: deploy
@ -536,11 +536,8 @@ class PyVmomiHelper(object):
if current_state == expected_state:
result['changed'] = False
result['failed'] = False
else:
task = None
try:
if expected_state == 'poweredoff':
task = vm.PowerOff()
@ -597,9 +594,6 @@ class PyVmomiHelper(object):
mac = device.macAddress
ips = list(device.ipAddress)
netDict[mac] = ips
#facts['network'] = {}
#facts['network']['ipaddress_v4'] = None
#facts['network']['ipaddress_v6'] = None
for k,v in netDict.iteritems():
for ipaddress in v:
if ipaddress:
@ -609,7 +603,6 @@ class PyVmomiHelper(object):
facts['ipv4'] = ipaddress
for idx,entry in enumerate(vm.config.hardware.device):
if not hasattr(entry, 'macAddress'):
continue
@ -624,7 +617,6 @@ class PyVmomiHelper(object):
}
facts['hw_interfaces'].append('eth'+str(idx))
#import epdb; epdb.st()
return facts

@ -51,6 +51,12 @@ options:
- The port to connect to
required: false
default: 27017
login_database:
version_added: "2.0"
description:
- The database where login credentials are stored
required: false
default: null
replica_set:
version_added: "1.6"
description:
@ -336,6 +342,7 @@ def main():
login_password=dict(default=None),
login_host=dict(default='localhost'),
login_port=dict(default='27017'),
login_database=dict(default=None),
replica_set=dict(default=None),
database=dict(required=True, aliases=['db']),
name=dict(required=True, aliases=['user']),
@ -396,7 +403,7 @@ def main():
module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided')
if login_user is not None and login_password is not None:
client.admin.authenticate(login_user, login_password)
client.admin.authenticate(login_user, login_password, source=login_database)
elif LooseVersion(PyMongoVersion) >= LooseVersion('3.0'):
if db_name != "admin":
module.fail_json(msg='The localhost login exception only allows the first admin account to be created')

@ -102,7 +102,7 @@ notes:
this needs to be in the redis.conf in the masterauth variable
requirements: [ redis ]
author: Xabier Larrakoetxea
author: "Xabier Larrakoetxea (@slok)"
'''
EXAMPLES = '''

@ -79,7 +79,7 @@ options:
master_ssl:
description:
- same as mysql variable
possible values: 0,1
choices: [ 0, 1 ]
master_ssl_ca:
description:
- same as mysql variable
@ -96,7 +96,7 @@ options:
description:
- same as mysql variable
master_auto_position:
descrtiption:
description:
- does the host uses GTID based replication or not
required: false
default: null

@ -69,7 +69,7 @@ notes:
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
requirements: [ psycopg2 ]
author: Daniel Schep
author: "Daniel Schep (@dschep)"
'''
EXAMPLES = '''

@ -113,7 +113,7 @@ notes:
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements: [ psycopg2 ]
author: Jens Depuydt
author: "Jens Depuydt (@jensdepuydt)"
'''
EXAMPLES = '''

@ -23,7 +23,7 @@ ANSIBLE_METADATA = {'status': ['preview'],
DOCUMENTATION = """
---
module: vertica_configuration
version_added: '1.0'
version_added: '2.0'
short_description: Updates Vertica configuration parameters.
description:
- Updates Vertica configuration parameters.
@ -71,7 +71,7 @@ notes:
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: Dariusz Owczarek
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """

@ -23,7 +23,7 @@ ANSIBLE_METADATA = {'status': ['preview'],
DOCUMENTATION = """
---
module: vertica_facts
version_added: '1.0'
version_added: '2.0'
short_description: Gathers Vertica database facts.
description:
- Gathers Vertica database facts.
@ -63,7 +63,7 @@ notes:
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: Dariusz Owczarek
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """

@ -23,7 +23,7 @@ ANSIBLE_METADATA = {'status': ['preview'],
DOCUMENTATION = """
---
module: vertica_role
version_added: '1.0'
version_added: '2.0'
short_description: Adds or removes Vertica database roles and assigns roles to them.
description:
- Adds or removes Vertica database role and, optionally, assign other roles.
@ -79,7 +79,7 @@ notes:
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: Dariusz Owczarek
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """

@ -23,7 +23,7 @@ ANSIBLE_METADATA = {'status': ['preview'],
DOCUMENTATION = """
---
module: vertica_schema
version_added: '1.0'
version_added: '2.0'
short_description: Adds or removes Vertica database schema and roles.
description:
- Adds or removes Vertica database schema and, optionally, roles
@ -95,7 +95,7 @@ notes:
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: Dariusz Owczarek
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """

@ -23,7 +23,7 @@ ANSIBLE_METADATA = {'status': ['preview'],
DOCUMENTATION = """
---
module: vertica_user
version_added: '1.0'
version_added: '2.0'
short_description: Adds or removes Vertica database users and assigns roles.
description:
- Adds or removes Vertica database user and, optionally, assigns roles.
@ -111,7 +111,7 @@ notes:
and both C(ErrorMessagesPath = /opt/vertica/lib64) and C(DriverManagerEncoding = UTF-16)
to be added to the C(Driver) section of either C(/etc/vertica.ini) or C($HOME/.vertica.ini).
requirements: [ 'unixODBC', 'pyodbc' ]
author: Dariusz Owczarek
author: "Dariusz Owczarek (@dareko)"
"""
EXAMPLES = """

@ -120,7 +120,7 @@ EXAMPLES = r"""
dest: /etc/network/interfaces
block: |
iface eth0 inet static
address 192.168.0.1
address 192.0.2.23
netmask 255.255.255.0
- name: insert/update HTML surrounded by custom markers after <body> line

@ -1,10 +1,30 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rabbitmq_exchange
author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
@ -56,14 +76,14 @@ options:
required: false
choices: [ "yes", "no" ]
default: yes
exchangeType:
exchange_type:
description:
- type for the exchange
required: false
choices: [ "fanout", "direct", "headers", "topic" ]
aliases: [ "type" ]
default: direct
autoDelete:
auto_delete:
description:
- if the exchange should delete itself after all queues/exchanges unbound from it
required: false
@ -104,7 +124,7 @@ def main():
state = dict(default='present', choices=['present', 'absent'], type='str'),
name = dict(required=True, type='str'),
login_user = dict(default='guest', type='str'),
login_password = dict(default='guest', type='str'),
login_password = dict(default='guest', type='str', no_log=True),
login_host = dict(default='localhost', type='str'),
login_port = dict(default='15672', type='str'),
vhost = dict(default='/', type='str'),
@ -121,7 +141,7 @@ def main():
module.params['login_host'],
module.params['login_port'],
urllib.quote(module.params['vhost'],''),
module.params['name']
urllib.quote(module.params['name'],'')
)
# Check if exchange already exists
@ -145,12 +165,12 @@ def main():
change_required = exchange_exists
# Check if attributes change on existing exchange
if not changeRequired and r.status_code==200 and module.params['state'] == 'present':
if not change_required and r.status_code==200 and module.params['state'] == 'present':
if not (
response['durable'] == module.params['durable'] and
response['auto_delete'] == module.params['autoDelete'] and
response['auto_delete'] == module.params['auto_delete'] and
response['internal'] == module.params['internal'] and
response['type'] == module.params['exchangeType']
response['type'] == module.params['exchange_type']
):
module.fail_json(
msg = "RabbitMQ RESTAPI doesn't support attribute changes for existing exchanges"
@ -159,15 +179,14 @@ def main():
# Exit if check_mode
if module.check_mode:
module.exit_json(
changed= changeRequired,
result = "Success",
changed= change_required,
name = module.params['name'],
details = response,
arguments = module.params['arguments']
)
# Do changes
if changeRequired:
if change_required:
if module.params['state'] == 'present':
r = requests.put(
url,
@ -175,9 +194,9 @@ def main():
headers = { "content-type": "application/json"},
data = json.dumps({
"durable": module.params['durable'],
"auto_delete": module.params['autoDelete'],
"auto_delete": module.params['auto_delete'],
"internal": module.params['internal'],
"type": module.params['exchangeType'],
"type": module.params['exchange_type'],
"arguments": module.params['arguments']
})
)
@ -187,7 +206,6 @@ def main():
if r.status_code == 204:
module.exit_json(
changed = True,
result = "Success",
name = module.params['name']
)
else:
@ -200,7 +218,6 @@ def main():
else:
module.exit_json(
changed = False,
result = "Success",
name = module.params['name']
)

@ -1,10 +1,30 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Manuel Sousa <manuel.sousa@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: rabbitmq_queue
author: "Manuel Sousa (@manuel-sousa)"
version_added: "2.0"
@ -56,13 +76,13 @@ options:
required: false
choices: [ "yes", "no" ]
default: yes
autoDelete:
auto_delete:
description:
- if the queue should delete itself after all queues/queues unbound from it
required: false
choices: [ "yes", "no" ]
default: no
messageTTL:
message_ttl:
description:
- How long a message can live in queue before it is discarded (milliseconds)
required: False
@ -119,7 +139,7 @@ def main():
state = dict(default='present', choices=['present', 'absent'], type='str'),
name = dict(required=True, type='str'),
login_user = dict(default='guest', type='str'),
login_password = dict(default='guest', type='str'),
login_password = dict(default='guest', type='str', no_log=True),
login_host = dict(default='localhost', type='str'),
login_port = dict(default='15672', type='str'),
vhost = dict(default='/', type='str'),
@ -146,10 +166,10 @@ def main():
r = requests.get( url, auth=(module.params['login_user'],module.params['login_password']))
if r.status_code==200:
queueExists = True
queue_exists = True
response = r.json()
elif r.status_code==404:
queueExists = False
queue_exists = False
response = r.text
else:
module.fail_json(
@ -157,27 +177,35 @@ def main():
details = r.text
)
changeRequired = not queueExists if module.params['state']=='present' else queueExists
if module.params['state']=='present':
change_required = not queue_exists
else:
change_required = queue_exists
# Check if attributes change on existing queue
if not changeRequired and r.status_code==200 and module.params['state'] == 'present':
if not change_required and r.status_code==200 and module.params['state'] == 'present':
if not (
response['durable'] == module.params['durable'] and
response['auto_delete'] == module.params['autoDelete'] and
response['auto_delete'] == module.params['auto_delete'] and
(
response['arguments']['x-message-ttl'] == module.params['messageTTL'] if 'x-message-ttl' in response['arguments'] else module.params['messageTTL'] is None
( 'x-message-ttl' in response['arguments'] and response['arguments']['x-message-ttl'] == module.params['message_ttl'] ) or
( 'x-message-ttl' not in response['arguments'] and module.params['message_ttl'] is None )
) and
(
response['arguments']['x-expires'] == module.params['autoExpire'] if 'x-expires' in response['arguments'] else module.params['autoExpire'] is None
( 'x-expires' in response['arguments'] and response['arguments']['x-expires'] == module.params['auto_expires'] ) or
( 'x-expires' not in response['arguments'] and module.params['auto_expires'] is None )
) and
(
response['arguments']['x-max-length'] == module.params['maxLength'] if 'x-max-length' in response['arguments'] else module.params['maxLength'] is None
( 'x-max-length' in response['arguments'] and response['arguments']['x-max-length'] == module.params['max_length'] ) or
( 'x-max-length' not in response['arguments'] and module.params['max_length'] is None )
) and
(
response['arguments']['x-dead-letter-exchange'] == module.params['deadLetterExchange'] if 'x-dead-letter-exchange' in response['arguments'] else module.params['deadLetterExchange'] is None
( 'x-dead-letter-exchange' in response['arguments'] and response['arguments']['x-dead-letter-exchange'] == module.params['dead_letter_exchange'] ) or
( 'x-dead-letter-exchange' not in response['arguments'] and module.params['dead_letter_exchange'] is None )
) and
(
response['arguments']['x-dead-letter-routing-key'] == module.params['deadLetterRoutingKey'] if 'x-dead-letter-routing-key' in response['arguments'] else module.params['deadLetterRoutingKey'] is None
( 'x-dead-letter-routing-key' in response['arguments'] and response['arguments']['x-dead-letter-routing-key'] == module.params['dead_letter_routing_key'] ) or
( 'x-dead-letter-routing-key' not in response['arguments'] and module.params['dead_letter_routing_key'] is None )
)
):
module.fail_json(
@ -187,11 +215,11 @@ def main():
# Copy parameters to arguments as used by RabbitMQ
for k,v in {
'messageTTL': 'x-message-ttl',
'autoExpire': 'x-expires',
'maxLength': 'x-max-length',
'deadLetterExchange': 'x-dead-letter-exchange',
'deadLetterRoutingKey': 'x-dead-letter-routing-key'
'message_ttl': 'x-message-ttl',
'auto_expires': 'x-expires',
'max_length': 'x-max-length',
'dead_letter_exchange': 'x-dead-letter-exchange',
'dead_letter_routing_key': 'x-dead-letter-routing-key'
}.items():
if module.params[k]:
module.params['arguments'][v] = module.params[k]
@ -199,15 +227,14 @@ def main():
# Exit if check_mode
if module.check_mode:
module.exit_json(
changed= changeRequired,
result = "Success",
changed= change_required,
name = module.params['name'],
details = response,
arguments = module.params['arguments']
)
# Do changes
if changeRequired:
if change_required:
if module.params['state'] == 'present':
r = requests.put(
url,
@ -215,7 +242,7 @@ def main():
headers = { "content-type": "application/json"},
data = json.dumps({
"durable": module.params['durable'],
"auto_delete": module.params['autoDelete'],
"auto_delete": module.params['auto_delete'],
"arguments": module.params['arguments']
})
)
@ -225,7 +252,6 @@ def main():
if r.status_code == 204:
module.exit_json(
changed = True,
result = "Success",
name = module.params['name']
)
else:
@ -238,7 +264,6 @@ def main():
else:
module.exit_json(
changed = False,
result = "Success",
name = module.params['name']
)

@ -23,7 +23,7 @@ ANSIBLE_METADATA = {'status': ['preview'],
DOCUMENTATION = '''
---
module: bigpanda
author: BigPanda
author: "Hagai Kariti (@hkariti)"
short_description: Notify BigPanda about deployments
version_added: "1.8"
description:

@ -28,7 +28,7 @@ module: circonus_annotation
short_description: create an annotation in circonus
description:
- Create an annotation event with a given category, title and description. Optionally start, end or durations can be provided
author: Nick Harring
author: "Nick Harring (@NickatEpic)"
version_added: 2.0
requirements:
- urllib3

@ -58,6 +58,7 @@ options:
required: false
default: Ansible
comment:
version_added: "2.0"
description:
- Comment for C(downtime) action.
required: false

@ -26,7 +26,9 @@ short_description: Pause/unpause Pingdom alerts
description:
- This module will let you pause/unpause Pingdom alerts
version_added: "1.2"
author: Justin Johns
author:
- "Dylan Silva (@thaumos)"
- "Justin Johns"
requirements:
- "This pingdom python library: https://github.com/mbabineau/pingdom-python"
options:
@ -132,7 +134,7 @@ def main():
)
if not HAS_PINGDOM:
module.fail_json(msg="Missing requried pingdom module (check docs)")
module.fail_json(msg="Missing required pingdom module (check docs)")
checkid = module.params['checkid']
state = module.params['state']

@ -26,7 +26,7 @@ short_description: Send code deploy and annotation events to stackdriver
description:
- Send code deploy and annotation events to Stackdriver
version_added: "1.6"
author: Ben Whaley
author: "Ben Whaley (@bwhaley)"
options:
key:
description:

@ -25,7 +25,7 @@ module: uptimerobot
short_description: Pause and start Uptime Robot monitoring
description:
- This module will let you start and pause Uptime Robot Monitoring
author: Nate Kingsley
author: "Nate Kingsley (@nate-kingsley)"
version_added: "1.9"
requirements:
- Valid Uptime Robot API Key

@ -30,7 +30,9 @@ short_description: Zabbix host macro creates/updates/deletes
description:
- manages Zabbix host macros, it can create, update or delete them.
version_added: "2.0"
author: Dean Hailin Song
author:
- "(@cave)"
- Dean Hailin Song
requirements:
- "python >= 2.6"
- zabbix-api

@ -197,7 +197,7 @@ record:
description: the record content (details depend on record type)
returned: success
type: string
sample: 192.168.100.20
sample: 192.0.2.91
created_on:
description: the record creation date
returned: success

@ -97,7 +97,7 @@ options:
default: null
requirements: [ dnsimple ]
author: Alex Coomans
author: "Alex Coomans (@drcapulet)"
'''
EXAMPLES = '''

@ -66,7 +66,7 @@ EXAMPLES = '''
- name: Set NTP server
bigip_device_ntp:
ntp_servers:
- "192.168.10.12"
- "192.0.2.23"
password: "secret"
server: "lb.mydomain.com"
user: "admin"
@ -88,7 +88,7 @@ ntp_servers:
description: The NTP servers that were set on the device
returned: changed
type: list
sample: ["192.168.10.10", "172.27.10.10"]
sample: ["192.0.2.23", "192.0.2.42"]
timezone:
description: The timezone that was set on the device
returned: changed

@ -73,7 +73,7 @@ EXAMPLES = '''
- name: Enable virtual server
local_action: >
bigip_gtm_virtual_server
server=192.168.0.1
server=192.0.2.1
user=admin
password=mysecret
virtual_server_name=myname

@ -61,7 +61,7 @@ EXAMPLES = '''
- name: Set lb method
local_action: >
bigip_gtm_wide_ip
server=192.168.0.1
server=192.0.2.1
user=admin
password=mysecret
lb_method=round_robin

@ -197,7 +197,7 @@ address:
description: The address for the Self IP
returned: created
type: string
sample: "192.168.10.10"
sample: "192.0.2.10"
name:
description: The name of the Self IP
returned:

@ -114,7 +114,7 @@ mac:
description: MAC address to use for VNIC
returned: if mac is specified
type: string
sample: "00:aa:bc:fe:11:22"
sample: "00:00:5E:00:53:42"
vlan:
description: VLAN to use for VNIC
returned: success

@ -29,7 +29,7 @@ short_description: get details reported by lldp
description:
- Reads data out of lldpctl
options: {}
author: Andy Hill
author: "Andy Hill (@andyhky)"
notes:
- Requires lldpd running and lldp enabled on switches
'''

@ -77,16 +77,16 @@ options:
required: False
default: None
description:
- 'The IPv4 address to this interface using this format ie: "192.168.1.24/24"'
- 'The IPv4 address to this interface using this format ie: "192.0.2.24/24"'
gw4:
required: False
description:
- 'The IPv4 gateway for this interface using this format ie: "192.168.100.1"'
- 'The IPv4 gateway for this interface using this format ie: "192.0.2.1"'
dns4:
required: False
default: None
description:
- 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["8.8.8.8 8.8.4.4"]'
- 'A list of upto 3 dns servers, ipv4 format e.g. To add two IPv4 DNS server addresses: ["192.0.2.53", "198.51.100.53"]'
ip6:
required: False
default: None
@ -232,9 +232,9 @@ The following examples are working examples that I have run in the field. I foll
```yml
---
#devops_os_define_network
storage_gw: "192.168.0.254"
external_gw: "10.10.0.254"
tenant_gw: "172.100.0.254"
storage_gw: "192.0.2.254"
external_gw: "198.51.100.254"
tenant_gw: "203.0.113.254"
#Team vars
nmcli_team:
@ -312,9 +312,9 @@ nmcli_ethernet:
### host_vars
```yml
---
storage_ip: "192.168.160.21/23"
external_ip: "10.10.152.21/21"
tenant_ip: "192.168.200.21/23"
storage_ip: "192.0.2.91/23"
external_ip: "198.51.100.23/21"
tenant_ip: "203.0.113.77/23"
```
@ -920,8 +920,8 @@ class Nmcli(object):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for creating ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
cmd.append('con')
cmd.append('add')
cmd.append('type')
@ -957,8 +957,8 @@ class Nmcli(object):
cmd=[self.module.get_bin_path('nmcli', True)]
# format for modifying ethernet interface
# To add an Ethernet connection with static IP configuration, issue a command as follows
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.168.100.100/24 gw4=192.168.100.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.168.100.100/24 gw4 192.168.100.1
# - nmcli: name=add conn_name=my-eth1 ifname=eth1 type=ethernet ip4=192.0.2.100/24 gw4=192.0.2.1 state=present
# nmcli con add con-name my-eth1 ifname eth1 type ethernet ip4 192.0.2.100/24 gw4 192.0.2.1
cmd.append('con')
cmd.append('mod')
cmd.append(self.conn_name)

@ -96,7 +96,7 @@ EXAMPLES = '''
state: present
set: Interface vlan10
# Assign interface id server1-vifeth6 and mac address 52:54:00:30:6d:11
# Assign interface id server1-vifeth6 and mac address 00:00:5E:00:53:23
# to port vifeth6 and setup port to be managed by a controller.
- openvswitch_port:
bridge: br-int

@ -27,13 +27,13 @@ DOCUMENTATION = """
module: hall
short_description: Send notification to Hall
description:
- The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms.
version_added: 1.6
author: Billy Kimble <basslines@gmail.com>
- "The M(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms."
version_added: "2.0"
author: Billy Kimble (@bkimble) <basslines@gmail.com>
options:
room_token:
description:
- Room token provided to you by setting up the Ansible room integation on U(https://hall.com)
- "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)"
required: true
msg:
description:
@ -45,7 +45,7 @@ options:
required: true
picture:
description:
- The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)
- "The full URL to the image you wish to use for the Icon of the message. Defaults to U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)"
required: false
"""

@ -76,7 +76,7 @@ options:
description:
- API url if using a self-hosted hipchat server. For hipchat api version 2 use C(/v2) path in URI
required: false
default: 'https://api.hipchat.com/v1/rooms/message'
default: 'https://api.hipchat.com/v1'
version_added: 1.6.0
@ -101,7 +101,6 @@ EXAMPLES = '''
# HipChat module specific support methods.
#
MSG_URI = "https://api.hipchat.com/v1/rooms/message"
import urllib
try:
import json
@ -195,7 +194,7 @@ def main():
msg_format=dict(default="text", choices=["text", "html"]),
notify=dict(default=True, type='bool'),
validate_certs=dict(default='yes', type='bool'),
api=dict(default=MSG_URI),
api=dict(default=DEFAULT_URI),
),
supports_check_mode=True
)

@ -67,11 +67,11 @@ options:
"light_blue", "pink", "gray", "light_gray"]
channel:
description:
- Channel name
- Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
required: true
nick_to:
description:
- A list of nicknames to send the message to. When both channel and nick_to are defined, the message will be send to both of them.
- A list of nicknames to send the message to. One of nick_to or channel needs to be set. When both are defined, the message will be sent to both of them.
required: false
default: null
version_added: "2.0"

@ -115,7 +115,7 @@ requirements: [ mosquitto ]
notes:
- This module requires a connection to an MQTT broker such as Mosquitto
U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)).
author: Jan-Piet Mens
author: "Jan-Piet Mens (@jpmens)"
'''
EXAMPLES = '''

@ -41,7 +41,9 @@ options:
What voice to use
required: false
requirements: [ say ]
author: Michael DeHaan
author:
- "Ansible Core Team"
- "Michael DeHaan (@mpdehaan)"
'''
EXAMPLES = '''

@ -22,7 +22,7 @@ ANSIBLE_METADATA = {'status': ['preview'],
DOCUMENTATION = '''
---
author: Willy Barro
author: "Willy Barro (@willybarro)"
requirements: [ pushbullet.py ]
module: pushbullet
short_description: Sends notifications to Pushbullet

@ -32,7 +32,7 @@ module: elasticsearch_plugin
short_description: Manage Elasticsearch plugins
description:
- Manages Elasticsearch plugins.
version_added: ""
version_added: "2.0"
author: Mathew Davies (@ThePixelDeveloper)
options:
name:
@ -52,7 +52,7 @@ options:
default: None
timeout:
description:
- Timeout setting: 30s, 1m, 1h...
- "Timeout setting: 30s, 1m, 1h..."
required: False
default: 1m
plugin_bin:

@ -27,7 +27,7 @@ ANSIBLE_METADATA = {'status': ['stableinterface'],
DOCUMENTATION = '''
---
module: dnf
version_added: historical
version_added: 1.9
short_description: Manages packages with the I(dnf) package manager
description:
- Installs, upgrade, removes, and lists packages and groups with the I(dnf) package manager.
@ -36,7 +36,6 @@ options:
description:
- "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: dnf -y update. You can also pass a url or a local path to a rpm file."
required: true
version_added: "1.8"
default: null
aliases: []
@ -44,7 +43,6 @@ options:
description:
- Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples.
required: false
version_added: "1.8"
default: null
state:
@ -52,7 +50,6 @@ options:
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
required: false
choices: [ "present", "latest", "absent" ]
version_added: "1.8"
default: "present"
enablerepo:
@ -61,7 +58,6 @@ options:
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
required: false
version_added: "1.8"
default: null
aliases: []
@ -71,7 +67,6 @@ options:
These repos will not persist beyond the transaction.
When specifying multiple repos, separate them with a ",".
required: false
version_added: "1.8"
default: null
aliases: []
@ -79,7 +74,6 @@ options:
description:
- The remote dnf configuration file to use for the transaction.
required: false
version_added: "1.8"
default: null
aliases: []
@ -88,7 +82,6 @@ options:
- Whether to disable the GPG checking of signatures of packages being
installed. Has an effect only if state is I(present) or I(latest).
required: false
version_added: "1.8"
default: "no"
choices: ["yes", "no"]
aliases: []

@ -46,7 +46,7 @@ options:
aliases: ['pkg', 'package', 'formula']
path:
description:
- ':' separated list of paths to search for 'brew' executable. Since A package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system.
- "':' separated list of paths to search for 'brew' executable. Since A package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
required: false
default: '/usr/local/bin'
state:

@ -25,7 +25,7 @@ ANSIBLE_METADATA = {'status': ['preview'],
DOCUMENTATION = '''
---
module: macports
author: Jimmy Tang
author: "Jimmy Tang (@jcftang)"
short_description: Package manager for MacPorts
description:
- Manages MacPorts packages

@ -25,6 +25,7 @@ DOCUMENTATION = '''
module: pkg5
author: "Peter Oliver (@mavit)"
short_description: Manages packages with the Solaris 11 Image Packaging System
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
notes:

@ -25,6 +25,7 @@ DOCUMENTATION = '''
module: pkg5_publisher
author: "Peter Oliver (@mavit)"
short_description: Manages Solaris 11 Image Packaging System publishers
version_added: 1.9
description:
- IPS packages are the native packages in Solaris 11 and higher.
- This modules will configure which publishers a client will download IPS

@ -278,9 +278,8 @@ def annotate_packages(module, pkgng_path, packages, annotation, dir_arg):
for package in packages:
for _annotation in annotations:
annotate_c += ( 1 if operation[_annotation['operation']](
module, pkgng_path, package,
_annotation['tag'], _annotation['value'], rootdir_arg) else 0 )
if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
annotate_c += 1
if annotate_c > 0:
return (True, "added %s annotations." % annotate_c)

@ -55,7 +55,7 @@ options:
choices: [ true, false ]
author: Kim Nørgaard (@KimNorgaard)
requirements: [ "Slackware" >= 12.2 ]
requirements: [ "Slackware >= 12.2" ]
'''
EXAMPLES = '''

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save