Merge remote-tracking branch 'upstream/devel' into devel

reviewable/pr18780/r1
Juho-Mikko Pellinen 9 years ago
commit d85ad1087f

@ -14,3 +14,4 @@ script:
- python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py - python2.4 -m compileall -fq cloud/amazon/_ec2_ami_search.py cloud/amazon/ec2_facts.py
- python2.6 -m compileall -fq . - python2.6 -m compileall -fq .
- python2.7 -m compileall -fq . - python2.7 -m compileall -fq .
#- ./test-docs.sh core

@ -51,6 +51,7 @@ options:
template: template:
description: description:
- The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present" - The local path of the cloudformation template. This parameter is mutually exclusive with 'template_url'. Either one of them is required if "state" parameter is "present"
Must give full path to the file, relative to the working directory. If using roles this may look like "roles/cloudformation/files/cloudformation-example.json"
required: false required: false
default: null default: null
aliases: [] aliases: []
@ -115,6 +116,22 @@ EXAMPLES = '''
tags: tags:
Stack: "ansible-cloudformation" Stack: "ansible-cloudformation"
# Basic role example
- name: launch ansible cloudformation example
cloudformation:
stack_name: "ansible-cloudformation"
state: "present"
region: "us-east-1"
disable_rollback: true
template: "roles/cloudformation/files/cloudformation-example.json"
template_parameters:
KeyName: "jmartin"
DiskType: "ephemeral"
InstanceType: "m1.small"
ClusterSize: 3
tags:
Stack: "ansible-cloudformation"
# Removal example # Removal example
- name: tear down old deployment - name: tear down old deployment
cloudformation: cloudformation:

@ -76,6 +76,14 @@ options:
required: false required: false
default: null default: null
aliases: [] aliases: []
spot_type:
version_added: "2.0"
description:
- Type of spot request; one of "one-time" or "persistent". Defaults to "one-time" if not supplied.
required: false
default: "one-time"
choices: [ "one-time", "persistent" ]
aliases: []
image: image:
description: description:
- I(ami) ID to use for the instance - I(ami) ID to use for the instance
@ -208,7 +216,7 @@ options:
volumes: volumes:
version_added: "1.5" version_added: "1.5"
description: description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. Encrypt the volume by passing 'encrypted: true' in the volume dict. - "a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. Encrypt the volume by passing 'encrypted: true' in the volume dict."
required: false required: false
default: null default: null
aliases: [] aliases: []
@ -783,6 +791,7 @@ def create_instances(module, ec2, vpc, override_count=None):
instance_type = module.params.get('instance_type') instance_type = module.params.get('instance_type')
tenancy = module.params.get('tenancy') tenancy = module.params.get('tenancy')
spot_price = module.params.get('spot_price') spot_price = module.params.get('spot_price')
spot_type = module.params.get('spot_type')
image = module.params.get('image') image = module.params.get('image')
if override_count: if override_count:
count = override_count count = override_count
@ -976,6 +985,7 @@ def create_instances(module, ec2, vpc, override_count=None):
params.update(dict( params.update(dict(
count = count_remaining, count = count_remaining,
type = spot_type,
)) ))
res = ec2.request_spot_instances(spot_price, **params) res = ec2.request_spot_instances(spot_price, **params)
@ -1220,6 +1230,7 @@ def main():
zone = dict(aliases=['aws_zone', 'ec2_zone']), zone = dict(aliases=['aws_zone', 'ec2_zone']),
instance_type = dict(aliases=['type']), instance_type = dict(aliases=['type']),
spot_price = dict(), spot_price = dict(),
spot_type = dict(default='one-time', choices=["one-time", "persistent"]),
image = dict(), image = dict(),
kernel = dict(), kernel = dict(),
count = dict(type='int', default='1'), count = dict(type='int', default='1'),

@ -43,7 +43,7 @@ options:
launch_config_name: launch_config_name:
description: description:
- Name of the Launch configuration to use for the group. See the ec2_lc module for managing these. - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
required: false required: true
min_size: min_size:
description: description:
- Minimum number of instances in group - Minimum number of instances in group

@ -384,9 +384,33 @@ class ElbManager(object):
'hosted_zone_name': check_elb.canonical_hosted_zone_name, 'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id, 'hosted_zone_id': check_elb.canonical_hosted_zone_name_id,
'lb_cookie_policy': lb_cookie_policy, 'lb_cookie_policy': lb_cookie_policy,
'app_cookie_policy': app_cookie_policy 'app_cookie_policy': app_cookie_policy,
'instances': [instance.id for instance in check_elb.instances],
'out_of_service_count': 0,
'in_service_count': 0,
'unknown_instance_state_count': 0
} }
# status of instances behind the ELB
if info['instances']:
info['instance_health'] = [ dict(
instance_id = instance_state.instance_id,
reason_code = instance_state.reason_code,
state = instance_state.state
) for instance_state in self.elb_conn.describe_instance_health(self.name)]
else:
info['instance_health'] = []
# instance state counts: InService or OutOfService
if info['instance_health']:
for instance_state in info['instance_health']:
if instance_state['state'] == "InService":
info['in_service_count'] += 1
elif instance_state['state'] == "OutOfService":
info['out_of_service_count'] += 1
else:
info['unknown_instance_state_count'] += 1
if check_elb.health_check: if check_elb.health_check:
info['health_check'] = { info['health_check'] = {
'target': check_elb.health_check.target, 'target': check_elb.health_check.target,

@ -116,6 +116,18 @@ options:
default: false default: false
aliases: [] aliases: []
version_added: "1.8" version_added: "1.8"
classic_link_vpc_id:
description:
- Id of ClassicLink enabled VPC
required: false
default: null
version_added: "2.0"
classic_link_vpc_security_groups:
description:
- A list of security group ids with which to associate the ClassicLink VPC instances.
required: false
default: null
version_added: "2.0"
extends_documentation_fragment: aws extends_documentation_fragment: aws
""" """
@ -184,6 +196,8 @@ def create_launch_config(connection, module):
ramdisk_id = module.params.get('ramdisk_id') ramdisk_id = module.params.get('ramdisk_id')
instance_profile_name = module.params.get('instance_profile_name') instance_profile_name = module.params.get('instance_profile_name')
ebs_optimized = module.params.get('ebs_optimized') ebs_optimized = module.params.get('ebs_optimized')
classic_link_vpc_id = module.params.get('classic_link_vpc_id')
classic_link_vpc_security_groups = module.params.get('classic_link_vpc_security_groups')
bdm = BlockDeviceMapping() bdm = BlockDeviceMapping()
if volumes: if volumes:
@ -206,10 +220,12 @@ def create_launch_config(connection, module):
kernel_id=kernel_id, kernel_id=kernel_id,
spot_price=spot_price, spot_price=spot_price,
instance_monitoring=instance_monitoring, instance_monitoring=instance_monitoring,
associate_public_ip_address = assign_public_ip, associate_public_ip_address=assign_public_ip,
ramdisk_id=ramdisk_id, ramdisk_id=ramdisk_id,
instance_profile_name=instance_profile_name, instance_profile_name=instance_profile_name,
ebs_optimized=ebs_optimized, ebs_optimized=ebs_optimized,
classic_link_vpc_security_groups=classic_link_vpc_security_groups,
classic_link_vpc_id=classic_link_vpc_id,
) )
launch_configs = connection.get_all_launch_configurations(names=[name]) launch_configs = connection.get_all_launch_configurations(names=[name])
@ -225,7 +241,8 @@ def create_launch_config(connection, module):
module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time), module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time),
image_id=result.image_id, arn=result.launch_configuration_arn, image_id=result.image_id, arn=result.launch_configuration_arn,
security_groups=result.security_groups, instance_type=instance_type) security_groups=result.security_groups, instance_type=result.instance_type,
result=result)
def delete_launch_config(connection, module): def delete_launch_config(connection, module):
@ -257,7 +274,9 @@ def main():
ebs_optimized=dict(default=False, type='bool'), ebs_optimized=dict(default=False, type='bool'),
associate_public_ip_address=dict(type='bool'), associate_public_ip_address=dict(type='bool'),
instance_monitoring=dict(default=False, type='bool'), instance_monitoring=dict(default=False, type='bool'),
assign_public_ip=dict(type='bool') assign_public_ip=dict(type='bool'),
classic_link_vpc_security_groups=dict(type='list'),
classic_link_vpc_id=dict(type='str')
) )
) )

@ -0,0 +1,344 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: ec2_vpc_net
short_description: configure AWS virtual private clouds
description:
- Create or terminates AWS virtual private clouds. This module has a dependency on python-boto.
version_added: "2.0"
options:
name:
description:
- The name to give your VPC. This is used in combination with the cidr_block paramater to determine if a VPC already exists.
required: yes
cidr_block:
description:
- The CIDR of the VPC
required: yes
aliases: []
tenancy:
description:
- Whether to be default or dedicated tenancy. This cannot be changed after the VPC has been created.
required: false
default: default
dns_support:
description:
- Whether to enable AWS DNS support.
required: false
default: true
dns_hostnames:
description:
- Whether to enable AWS hostname support.
required: false
default: true
dhcp_id:
description:
- the id of the DHCP options to use for this vpc
default: null
required: false
tags:
description:
- The tags you want attached to the VPC. This is independent of the name value, note if you pass a 'Name' key it would override the Name of the VPC if it's different.
default: None
required: false
state:
description:
- The state of the VPC. Either absent or present.
default: present
required: false
multi_ok:
description:
- By default the module will not create another VPC if there is another VPC with the same name and CIDR block. Specify this as true if you want duplicate VPCs created.
default: false
required: false
author: Jonathan Davila
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Create a VPC with dedicate tenancy and a couple of tags
- ec2_vpc:
name: Module_dev2
cidr_block: 170.10.0.0/16
region: us-east-1
tags:
new_vpc: ec2_vpc_module
this: works22
tenancy: dedicated
'''
import time
import sys
try:
import boto
import boto.ec2
import boto.vpc
from boto.exception import EC2ResponseError
HAS_BOTO=True
except ImportError:
HAS_BOTO=False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def vpc_exists(module, vpc, name, cidr_block, multi):
"""Returns True or False in regards to the existance of a VPC. When supplied
with a CIDR, it will check for matching tags to determine if it is a match
otherwise it will assume the VPC does not exist and thus return false.
"""
exists=False
matched_vpc=None
try:
matching_vpcs=vpc.get_all_vpcs(filters={'tag:Name' : name, 'cidr-block' : cidr_block})
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if len(matching_vpcs) == 1 and not multi:
exists=True
matched_vpc=str(matching_vpcs).split(':')[1].split(']')[0]
elif len(matching_vpcs) > 1 and not multi:
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
'CIDR block you specified. If you would like to create '
'the VPC anyways please pass True to the multi_ok param.' % len(matching_vpcs))
return exists, matched_vpc
def vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags):
"""This returns True or False. Intended to run after vpc_exists.
It will check all the characteristics of the parameters passed and compare them
to the active VPC. If any discrepancy is found, it will report true, meaning that
the VPC needs to be update in order to match the specified state in the params.
"""
update_dhcp=False
update_tags=False
dhcp_match=False
try:
dhcp_list=vpc.get_all_dhcp_options()
if dhcp_id is not None:
has_default=vpc.get_all_vpcs(filters={'dhcp-options-id' : 'default', 'vpc-id' : vpc_id})
for opts in dhcp_list:
if (str(opts).split(':')[1] == dhcp_id) or has_default:
dhcp_match=True
break
else:
pass
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if not dhcp_match or (has_default and dhcp_id != 'default'):
update_dhcp=True
if dns_hostnames and dns_support == False:
module.fail_json('In order to enable DNS Hostnames you must have DNS support enabled')
else:
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
# which is needed in order to detect the current status of DNS options. For now we just update
# the attribute each time and is not used as a changed-factor.
try:
vpc.modify_vpc_attribute(vpc_id, enable_dns_support=dns_support)
vpc.modify_vpc_attribute(vpc_id, enable_dns_hostnames=dns_hostnames)
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if tags:
try:
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id}))
if not set(tags.items()).issubset(set(current_tags.items())):
update_tags=True
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
return update_dhcp, update_tags
def update_vpc_tags(module, vpc, vpc_id, tags, name):
tags.update({'Name': name})
try:
vpc.create_tags(vpc_id, tags)
updated_tags=dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id}))
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
return updated_tags
def update_dhcp_opts(module, vpc, vpc_id, dhcp_id):
try:
vpc.associate_dhcp_options(dhcp_id, vpc_id)
dhcp_list=vpc.get_all_dhcp_options()
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
for opts in dhcp_list:
vpc_dhcp=vpc.get_all_vpcs(filters={'dhcp-options-id' : opts, 'vpc-id' : vpc_id})
matched=False
if opts == dhcp_id:
matched=True
return opts
if matched == False:
return dhcp_id
def main():
argument_spec=ec2_argument_spec()
argument_spec.update(dict(
name=dict(type='str', default=None, required=True),
cidr_block=dict(type='str', default=None, required=True),
tenancy=dict(choices=['default', 'dedicated'], default='default'),
dns_support=dict(type='bool', default=True),
dns_hostnames=dict(type='bool', default=True),
dhcp_opts_id=dict(type='str', default=None, required=False),
tags=dict(type='dict', required=False, default=None),
state=dict(choices=['present', 'absent'], default='present'),
region=dict(type='str', required=True),
multi_ok=dict(type='bool', default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='Boto is required for this module')
name=module.params.get('name')
cidr_block=module.params.get('cidr_block')
tenancy=module.params.get('tenancy')
dns_support=module.params.get('dns_support')
dns_hostnames=module.params.get('dns_hostnames')
dhcp_id=module.params.get('dhcp_opts_id')
tags=module.params.get('tags')
state=module.params.get('state')
multi=module.params.get('multi_ok')
changed=False
new_dhcp_opts=None
new_tags=None
update_dhcp=False
update_tags=False
region, ec2_url, aws_connect_kwargs=get_aws_connection_info(module)
try:
vpc=boto.vpc.connect_to_region(
region,
**aws_connect_kwargs
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
already_exists, vpc_id=vpc_exists(module, vpc, name, cidr_block, multi)
if already_exists:
update_dhcp, update_tags=vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags)
if update_dhcp or update_tags:
changed=True
try:
e_tags=dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_id}))
dhcp_list=vpc.get_all_dhcp_options()
has_default=vpc.get_all_vpcs(filters={'dhcp-options-id' : 'default', 'vpc-id' : vpc_id})
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
dhcp_opts=None
try:
for opts in dhcp_list:
if vpc.get_all_vpcs(filters={'dhcp-options-id' : opts, 'vpc-id' : vpc_id}):
dhcp_opts=opts
break
else:
pass
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
if not dhcp_opts and has_default:
dhcp_opts='default'
if state == 'present':
if not changed and already_exists:
module.exit_json(changed=changed, vpc_id=vpc_id)
elif changed:
if update_dhcp:
dhcp_opts=update_dhcp_opts(module, vpc, vpc_id, dhcp_id)
if update_tags:
e_tags=update_vpc_tags(module, vpc, vpc_id, tags, name)
module.exit_json(changed=changed, name=name, dhcp_options_id=dhcp_opts, tags=e_tags)
if not already_exists:
try:
vpc_id=str(vpc.create_vpc(cidr_block, instance_tenancy=tenancy)).split(':')[1]
vpc.create_tags(vpc_id, dict(Name=name))
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg=e_msg)
update_dhcp, update_tags=vpc_needs_update(module, vpc, vpc_id, dns_support, dns_hostnames, dhcp_id, tags)
if update_dhcp:
new_dhcp_opts=update_dhcp_opts(module, vpc, vpc_id, dhcp_id)
if update_tags:
new_tags=update_vpc_tags(module, vpc, vpc_id, tags, name)
module.exit_json(changed=True, name=name, vpc_id=vpc_id, dhcp_options=new_dhcp_opts, tags=new_tags)
elif state == 'absent':
if already_exists:
changed=True
try:
vpc.delete_vpc(vpc_id)
module.exit_json(changed=changed, vpc_id=vpc_id)
except Exception, e:
e_msg=boto_exception(e)
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
"and/or ec2_vpc_rt modules to ensure the other components are absent." % e_msg)
else:
module.exit_json(msg="VPC is absent")
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()

@ -146,6 +146,7 @@ import sys
try: try:
import boto import boto
import boto.iam import boto.iam
import boto.ec2
HAS_BOTO = True HAS_BOTO = True
except ImportError: except ImportError:
HAS_BOTO = False HAS_BOTO = False
@ -390,7 +391,7 @@ def create_group(module=None, iam=None, name=None, path=None):
return name, changed return name, changed
def delete_group(module, iam, name): def delete_group(module=None, iam=None, name=None):
changed = False changed = False
try: try:
iam.delete_group(name) iam.delete_group(name)
@ -508,7 +509,7 @@ def main():
groups=dict(type='list', default=None, required=False), groups=dict(type='list', default=None, required=False),
state=dict( state=dict(
default=None, required=True, choices=['present', 'absent', 'update']), default=None, required=True, choices=['present', 'absent', 'update']),
password=dict(default=None, required=False), password=dict(default=None, required=False, no_log=True),
update_password=dict(default='always', required=False, choices=['always', 'on_create']), update_password=dict(default='always', required=False, choices=['always', 'on_create']),
access_key_state=dict(default=None, required=False, choices=[ access_key_state=dict(default=None, required=False, choices=[
'active', 'inactive', 'create', 'remove', 'active', 'inactive', 'create', 'remove',
@ -565,13 +566,10 @@ def main():
module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, " module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, "
"please specificy present or absent") "please specificy present or absent")
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try: try:
iam = boto.iam.connection.IAMConnection( iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
)
except boto.exception.NoAuthHandlerFound, e: except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))
@ -664,7 +662,7 @@ def main():
group_exists = name in orig_group_list group_exists = name in orig_group_list
if state == 'present' and not group_exists: if state == 'present' and not group_exists:
new_group, changed = create_group(iam, name, path) new_group, changed = create_group(iam=iam, name=name, path=path)
module.exit_json(changed=changed, group_name=new_group) module.exit_json(changed=changed, group_name=new_group)
elif state in ['present', 'update'] and group_exists: elif state in ['present', 'update'] and group_exists:
changed, updated_name, updated_path, cur_path = update_group( changed, updated_name, updated_path, cur_path = update_group(
@ -692,7 +690,7 @@ def main():
changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name) changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name)
elif state == 'absent': elif state == 'absent':
if name in orig_group_list: if name in orig_group_list:
removed_group, changed = delete_group(iam, name) removed_group, changed = delete_group(iam=iam, name=name)
module.exit_json(changed=changed, delete_group=removed_group) module.exit_json(changed=changed, delete_group=removed_group)
else: else:
module.exit_json(changed=changed, msg="Group already absent") module.exit_json(changed=changed, msg="Group already absent")

@ -0,0 +1,294 @@
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: iam_cert
short_description: Manage server certificates for use on ELBs and CloudFront
description:
- Allows for the management of server certificates
version_added: "2.0"
options:
name:
description:
- Name of certificate to add, update or remove.
required: true
aliases: []
new_name:
description:
- When present, this will update the name of the cert with the value passed here.
required: false
aliases: []
new_path:
description:
- When present, this will update the path of the cert with the value passed here.
required: false
aliases: []
state:
description:
- Whether to create, delete certificate. When present is specified it will attempt to make an update if new_path or new_name is specified.
required: true
default: null
choices: [ "present", "absent" ]
aliases: []
path:
description:
- When creating or updating, specify the desired path of the certificate
required: false
default: "/"
aliases: []
cert_chain:
description:
- The path to the CA certificate chain in PEM encoded format.
required: false
default: null
aliases: []
cert:
description:
- The path to the certificate body in PEM encoded format.
required: false
aliases: []
key:
description:
- The path to the private key of the certificate in PEM encoded format.
dup_ok:
description:
- By default the module will not upload a certifcate that is already uploaded into AWS. If set to True, it will upload the certifcate as long as the name is unique.
required: false
default: False
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
requirements: [ "boto" ]
author: Jonathan I. Davila
extends_documentation_fragment: aws
'''
EXAMPLES = '''
# Basic server certificate upload
tasks:
- name: Upload Certifcate
iam_cert:
name: very_ssl
state: present
cert: somecert.pem
key: privcertkey
cert_chain: myverytrustedchain
'''
import json
import sys
try:
import boto
import boto.iam
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
error = err.error_message
elif hasattr(err, 'message'):
error = err.message
else:
error = '%s: %s' % (Exception, err)
return error
def cert_meta(iam, name):
opath = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
path
ocert = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
certificate_body
ocert_id = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
server_certificate_id
upload_date = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
upload_date
exp = iam.get_server_certificate(name).get_server_certificate_result.\
server_certificate.\
server_certificate_metadata.\
expiration
return opath, ocert, ocert_id, upload_date, exp
def dup_check(module, iam, name, new_name, cert, orig_cert_names, orig_cert_bodies, dup_ok):
update=False
if any(ct in orig_cert_names for ct in [name, new_name]):
for i_name in [name, new_name]:
if i_name is None:
continue
if cert is not None:
try:
c_index=orig_cert_names.index(i_name)
except NameError:
continue
else:
if orig_cert_bodies[c_index] == cert:
update=True
break
elif orig_cert_bodies[c_index] != cert:
module.fail_json(changed=False, msg='A cert with the name %s already exists and'
' has a different certificate body associated'
' with it. Certifcates cannot have the same name')
else:
update=True
break
elif cert in orig_cert_bodies and not dup_ok:
for crt_name, crt_body in zip(orig_cert_names, orig_cert_bodies):
if crt_body == cert:
module.fail_json(changed=False, msg='This certificate already'
' exists under the name %s' % crt_name)
return update
def cert_action(module, iam, name, cpath, new_name, new_path, state,
cert, key, chain, orig_cert_names, orig_cert_bodies, dup_ok):
if state == 'present':
update = dup_check(module, iam, name, new_name, cert, orig_cert_names,
orig_cert_bodies, dup_ok)
if update:
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
changed=True
if new_name and new_path:
iam.update_server_cert(name, new_cert_name=new_name, new_path=new_path)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif new_name and not new_path:
iam.update_server_cert(name, new_cert_name=new_name)
module.exit_json(changed=changed, original_name=name, new_name=new_name,
cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif not new_name and new_path:
iam.update_server_cert(name, new_path=new_path)
module.exit_json(changed=changed, name=new_name,
original_path=opath, new_path=new_path, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
else:
changed=False
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp,
msg='No new path or name specified. No changes made')
else:
changed=True
iam.upload_server_cert(name, cert, key, cert_chain=chain, path=cpath)
opath, ocert, ocert_id, upload_date, exp = cert_meta(iam, name)
module.exit_json(changed=changed, name=name, cert_path=opath, cert_body=ocert,
upload_date=upload_date, expiration_date=exp)
elif state == 'absent':
if name in orig_cert_names:
changed=True
iam.delete_server_cert(name)
module.exit_json(changed=changed, deleted_cert=name)
else:
changed=False
module.exit_json(changed=changed, msg='Certifcate with the name %s already absent' % name)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
state=dict(
default=None, required=True, choices=['present', 'absent']),
name=dict(default=None, required=False),
cert=dict(default=None, required=False),
key=dict(default=None, required=False),
cert_chain=dict(default=None, required=False),
new_name=dict(default=None, required=False),
path=dict(default='/', required=False),
new_path=dict(default=None, required=False),
dup_ok=dict(default=False, required=False, choices=[False, True])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[],
)
if not HAS_BOTO:
module.fail_json(msg="Boto is required for this module")
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
try:
iam = boto.iam.connection.IAMConnection(
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
state = module.params.get('state')
name = module.params.get('name')
path = module.params.get('path')
new_name = module.params.get('new_name')
new_path = module.params.get('new_path')
cert_chain = module.params.get('cert_chain')
dup_ok = module.params.get('dup_ok')
if state == 'present':
cert = open(module.params.get('cert'), 'r').read().rstrip()
key = open(module.params.get('key'), 'r').read().rstrip()
if cert_chain is not None:
cert_chain = open(module.params.get('cert_chain'), 'r').read()
else:
key=cert=chain=None
orig_certs = [ctb['server_certificate_name'] for ctb in \
iam.get_all_server_certs().\
list_server_certificates_result.\
server_certificate_metadata_list]
orig_bodies = [iam.get_server_certificate(thing).\
get_server_certificate_result.\
certificate_body \
for thing in orig_certs]
if new_name == name:
new_name = None
if new_path == path:
new_path = None
changed = False
try:
cert_action(module, iam, name, path, new_name, new_path, state,
cert, key, cert_chain, orig_certs, orig_bodies, dup_ok)
except boto.exception.BotoServerError, err:
module.fail_json(changed=changed, msg=str(err), debug=[cert,key])
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()

@ -40,7 +40,12 @@ options:
aliases: [] aliases: []
policy_document: policy_document:
description: description:
- The path to the properly json formatted policy file - The path to the properly json formatted policy file (mutually exclusive with C(policy_json))
required: false
aliases: []
policy_json:
description:
- A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly)
required: false required: false
aliases: [] aliases: []
state: state:
@ -109,16 +114,29 @@ task:
state: present state: present
with_items: new_groups.results with_items: new_groups.results
# Create a new S3 policy with prefix per user
tasks:
- name: Create S3 policy from template
iam_policy:
iam_type: user
iam_name: "{{ item.user }}"
policy_name: "s3_limited_access_{{ item.s3_user_prefix }}"
state: present
policy_json: " {{ lookup( 'template', 's3_policy.json.j2') }} "
with_items:
- user: s3_user
prefix: s3_user_prefix
''' '''
import json import json
import urllib import urllib
import sys
try: try:
import boto import boto
import boto.iam import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError: except ImportError:
print "failed=True msg='boto required for this module'" HAS_BOTO = False
sys.exit(1)
def boto_exception(err): def boto_exception(err):
'''generic error message handler''' '''generic error message handler'''
@ -271,6 +289,7 @@ def main():
iam_name=dict(default=None, required=False), iam_name=dict(default=None, required=False),
policy_name=dict(default=None, required=True), policy_name=dict(default=None, required=True),
policy_document=dict(default=None, required=False), policy_document=dict(default=None, required=False),
policy_json=dict(type='str', default=None, required=False),
skip_duplicates=dict(type='bool', default=True, required=False) skip_duplicates=dict(type='bool', default=True, required=False)
)) ))
@ -278,26 +297,35 @@ def main():
argument_spec=argument_spec, argument_spec=argument_spec,
) )
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state').lower() state = module.params.get('state').lower()
iam_type = module.params.get('iam_type').lower() iam_type = module.params.get('iam_type').lower()
state = module.params.get('state') state = module.params.get('state')
name = module.params.get('iam_name') name = module.params.get('iam_name')
policy_name = module.params.get('policy_name') policy_name = module.params.get('policy_name')
skip = module.params.get('skip_duplicates') skip = module.params.get('skip_duplicates')
if module.params.get('policy_document') != None and module.params.get('policy_json') != None:
module.fail_json(msg='Only one of "policy_document" or "policy_json" may be set')
if module.params.get('policy_document') != None: if module.params.get('policy_document') != None:
with open(module.params.get('policy_document'), 'r') as json_data: with open(module.params.get('policy_document'), 'r') as json_data:
pdoc = json.dumps(json.load(json_data)) pdoc = json.dumps(json.load(json_data))
json_data.close() json_data.close()
elif module.params.get('policy_json') != None:
try:
pdoc = json.dumps(json.loads(module.params.get('policy_json')))
except Exception as e:
module.fail_json(msg=str(e) + '\n' + module.params.get('policy_json'))
else: else:
pdoc=None pdoc=None
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try: try:
iam = boto.iam.connection.IAMConnection( iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
)
except boto.exception.NoAuthHandlerFound, e: except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e)) module.fail_json(msg=str(e))

@ -93,6 +93,45 @@ options:
required: false required: false
default: false default: false
version_added: "1.9" version_added: "1.9"
identifier:
description:
- Weighted and latency-based resource record sets only. An identifier
that differentiates among multiple resource record sets that have the
same combination of DNS name and type.
required: false
default: null
version_added: "2.0"
weight:
description:
- Weighted resource record sets only. Among resource record sets that
have the same combination of DNS name and type, a value that
determines what portion of traffic for the current resource record set
is routed to the associated location.
required: false
default: null
version_added: "2.0"
region:
description:
- Latency-based resource record sets only Among resource record sets
that have the same combination of DNS name and type, a value that
determines which region this should be associated with for the
latency-based routing
required: false
default: null
version_added: "2.0"
health_check:
description:
- Health check to associate with this record
required: false
default: null
version_added: "2.0"
failover:
description:
- Failover resource record sets only. Whether this is the primary or
secondary resource record set.
required: false
default: null
version_added: "2.0"
author: "Bruce Pennypacker (@bpennypacker)" author: "Bruce Pennypacker (@bpennypacker)"
extends_documentation_fragment: aws extends_documentation_fragment: aws
''' '''
@ -156,6 +195,18 @@ EXAMPLES = '''
alias=True alias=True
alias_hosted_zone_id="{{ elb_zone_id }}" alias_hosted_zone_id="{{ elb_zone_id }}"
# Use a routing policy to distribute traffic:
- route53:
command: "create"
zone: "foo.com"
record: "www.foo.com"
type: "CNAME"
value: "host1.foo.com"
ttl: 30
# Routing policy
identifier: "host1@www"
weight: 100
health_check: "d994b780-3150-49fd-9205-356abdd42e75"
''' '''
@ -166,11 +217,21 @@ try:
import boto.ec2 import boto.ec2
from boto import route53 from boto import route53
from boto.route53 import Route53Connection from boto.route53 import Route53Connection
from boto.route53.record import ResourceRecordSets from boto.route53.record import Record, ResourceRecordSets
HAS_BOTO = True HAS_BOTO = True
except ImportError: except ImportError:
HAS_BOTO = False HAS_BOTO = False
def get_zone_by_name(conn, module, zone_name, want_private):
"""Finds a zone by name"""
for zone in conn.get_zones():
# only save this zone id if the private status of the zone matches
# the private_zone_in boolean specified in the params
private_zone = module.boolean(zone.config.get('PrivateZone', False))
if private_zone == want_private and zone.name == zone_name:
return zone
return None
def commit(changes, retry_interval): def commit(changes, retry_interval):
"""Commit changes, but retry PriorRequestNotComplete errors.""" """Commit changes, but retry PriorRequestNotComplete errors."""
@ -200,6 +261,11 @@ def main():
overwrite = dict(required=False, type='bool'), overwrite = dict(required=False, type='bool'),
retry_interval = dict(required=False, default=500), retry_interval = dict(required=False, default=500),
private_zone = dict(required=False, type='bool', default=False), private_zone = dict(required=False, type='bool', default=False),
identifier = dict(required=False),
weight = dict(required=False, type='int'),
region = dict(required=False),
health_check = dict(required=False),
failover = dict(required=False),
) )
) )
module = AnsibleModule(argument_spec=argument_spec) module = AnsibleModule(argument_spec=argument_spec)
@ -217,6 +283,11 @@ def main():
alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id') alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id')
retry_interval_in = module.params.get('retry_interval') retry_interval_in = module.params.get('retry_interval')
private_zone_in = module.params.get('private_zone') private_zone_in = module.params.get('private_zone')
identifier_in = module.params.get('identifier')
weight_in = module.params.get('weight')
region_in = module.params.get('region')
health_check_in = module.params.get('health_check')
failover_in = module.params.get('failover')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
@ -249,32 +320,34 @@ def main():
except boto.exception.BotoServerError, e: except boto.exception.BotoServerError, e:
module.fail_json(msg = e.error_message) module.fail_json(msg = e.error_message)
# Get all the existing hosted zones and save their ID's # Find the named zone ID
zones = {} zone = get_zone_by_name(conn, module, zone_in, private_zone_in)
results = conn.get_all_hosted_zones()
for r53zone in results['ListHostedZonesResponse']['HostedZones']:
# only save this zone id if the private status of the zone matches
# the private_zone_in boolean specified in the params
if module.boolean(r53zone['Config'].get('PrivateZone', False)) == private_zone_in:
zone_id = r53zone['Id'].replace('/hostedzone/', '')
zones[r53zone['Name']] = zone_id
# Verify that the requested zone is already defined in Route53 # Verify that the requested zone is already defined in Route53
if not zone_in in zones: if zone is None:
errmsg = "Zone %s does not exist in Route53" % zone_in errmsg = "Zone %s does not exist in Route53" % zone_in
module.fail_json(msg = errmsg) module.fail_json(msg = errmsg)
record = {} record = {}
found_record = False found_record = False
sets = conn.get_all_rrsets(zones[zone_in]) wanted_rset = Record(name=record_in, type=type_in, ttl=ttl_in,
identifier=identifier_in, weight=weight_in, region=region_in,
health_check=health_check_in, failover=failover_in)
for v in value_list:
if alias_in:
wanted_rset.set_alias(alias_hosted_zone_id_in, v)
else:
wanted_rset.add_value(v)
sets = conn.get_all_rrsets(zone.id, name=record_in, type=type_in, identifier=identifier_in)
for rset in sets: for rset in sets:
# Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round
# tripping of things like * and @. # tripping of things like * and @.
decoded_name = rset.name.replace(r'\052', '*') decoded_name = rset.name.replace(r'\052', '*')
decoded_name = decoded_name.replace(r'\100', '@') decoded_name = decoded_name.replace(r'\100', '@')
if rset.type == type_in and decoded_name.lower() == record_in.lower(): if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in:
found_record = True found_record = True
record['zone'] = zone_in record['zone'] = zone_in
record['type'] = rset.type record['type'] = rset.type
@ -282,6 +355,11 @@ def main():
record['ttl'] = rset.ttl record['ttl'] = rset.ttl
record['value'] = ','.join(sorted(rset.resource_records)) record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records) record['values'] = sorted(rset.resource_records)
record['identifier'] = rset.identifier
record['weight'] = rset.weight
record['region'] = rset.region
record['failover'] = rset.failover
record['health_check'] = rset.health_check
if rset.alias_dns_name: if rset.alias_dns_name:
record['alias'] = True record['alias'] = True
record['value'] = rset.alias_dns_name record['value'] = rset.alias_dns_name
@ -291,35 +369,32 @@ def main():
record['alias'] = False record['alias'] = False
record['value'] = ','.join(sorted(rset.resource_records)) record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records) record['values'] = sorted(rset.resource_records)
if value_list == sorted(rset.resource_records) and int(record['ttl']) == ttl_in and command_in == 'create': if command_in == 'create' and rset.to_xml() == wanted_rset.to_xml():
module.exit_json(changed=False) module.exit_json(changed=False)
break
if command_in == 'get': if command_in == 'get':
module.exit_json(changed=False, set=record) if type_in == 'NS':
ns = record['values']
else:
# Retrieve name servers associated to the zone.
ns = conn.get_zone(zone_in).get_nameservers()
module.exit_json(changed=False, set=record, nameservers=ns)
if command_in == 'delete' and not found_record: if command_in == 'delete' and not found_record:
module.exit_json(changed=False) module.exit_json(changed=False)
changes = ResourceRecordSets(conn, zones[zone_in]) changes = ResourceRecordSets(conn, zone.id)
if command_in == 'create' or command_in == 'delete':
if command_in == 'create' and found_record: if command_in == 'create' and found_record:
if not module.params['overwrite']: if not module.params['overwrite']:
module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it") module.fail_json(msg = "Record already exists with different value. Set 'overwrite' to replace it")
command = 'UPSERT'
else: else:
change = changes.add_change("DELETE", record_in, type_in, record['ttl']) command = command_in.upper()
for v in record['values']: changes.add_change_record(command, wanted_rset)
if record['alias']:
change.set_alias(record['alias_hosted_zone_id'], v)
else:
change.add_value(v)
if command_in == 'create' or command_in == 'delete':
change = changes.add_change(command_in.upper(), record_in, type_in, ttl_in)
for v in value_list:
if module.params['alias']:
change.set_alias(alias_hosted_zone_id_in, v)
else:
change.add_value(v)
try: try:
result = commit(changes, retry_interval_in) result = commit(changes, retry_interval_in)

@ -56,6 +56,18 @@ options:
required: false required: false
default: 600 default: 600
aliases: [] aliases: []
marker:
description:
- Specifies the key to start with when using list mode. Object keys are returned in alphabetical order, starting with key after the marker in order.
required: false
default: null
version_added: "2.0"
max_keys:
description:
- Max number of results to return in list mode, set this if you want to retrieve fewer than the default 1000 keys.
required: false
default: 1000
version_added: "2.0"
metadata: metadata:
description: description:
- Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'. - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
@ -64,7 +76,7 @@ options:
version_added: "1.6" version_added: "1.6"
mode: mode:
description: description:
- Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), create (bucket), delete (bucket), and delobj (delete object). - Switches the module behaviour between put (upload), get (download), geturl (return download url (Ansible 1.3+), getstr (download object as string (1.3+)), list (list keys (2.0+)), create (bucket), delete (bucket), and delobj (delete object).
required: true required: true
default: null default: null
aliases: [] aliases: []
@ -73,6 +85,12 @@ options:
- Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples. - Keyname of the object inside the bucket. Can be used to create "virtual directories", see examples.
required: false required: false
default: null default: null
prefix:
description:
- Limits the response to keys that begin with the specified prefix for list mode
required: false
default: null
version_added: "2.0"
version: version:
description: description:
- Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket. - Version ID of the object inside the bucket. Can be used to get a specific version of a file if versioning is enabled in the target bucket.
@ -129,6 +147,12 @@ EXAMPLES = '''
# PUT/upload with metadata # PUT/upload with metadata
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache' - s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
# List keys simple
- s3: bucket=mybucket mode=list
# List keys all options
- s3: bucket=mybucket mode=list prefix=/my/desired/ marker=/my/desired/0023.txt max_keys=472
# Create an empty bucket # Create an empty bucket
- s3: bucket=mybucket mode=create - s3: bucket=mybucket mode=create
@ -152,6 +176,7 @@ from ssl import SSLError
try: try:
import boto import boto
import boto.ec2
from boto.s3.connection import Location from boto.s3.connection import Location
from boto.s3.connection import OrdinaryCallingFormat from boto.s3.connection import OrdinaryCallingFormat
from boto.s3.connection import S3Connection from boto.s3.connection import S3Connection
@ -204,6 +229,19 @@ def create_bucket(module, s3, bucket, location=None):
if bucket: if bucket:
return True return True
def get_bucket(module, s3, bucket):
try:
return s3.lookup(bucket)
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
def list_keys(module, bucket_object, prefix, marker, max_keys):
all_keys = bucket_object.get_all_keys(prefix=prefix, marker=marker, max_keys=max_keys)
keys = [x.key for x in all_keys]
module.exit_json(msg="LIST operation complete", s3_keys=keys)
def delete_bucket(module, s3, bucket): def delete_bucket(module, s3, bucket):
try: try:
bucket = s3.lookup(bucket) bucket = s3.lookup(bucket)
@ -329,11 +367,14 @@ def main():
dest = dict(default=None), dest = dict(default=None),
encrypt = dict(default=True, type='bool'), encrypt = dict(default=True, type='bool'),
expiry = dict(default=600, aliases=['expiration']), expiry = dict(default=600, aliases=['expiration']),
marker = dict(default=None),
max_keys = dict(default=1000),
metadata = dict(type='dict'), metadata = dict(type='dict'),
mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj'], required=True), mode = dict(choices=['get', 'put', 'delete', 'create', 'geturl', 'getstr', 'delobj', 'list'], required=True),
object = dict(), object = dict(),
version = dict(default=None), version = dict(default=None),
overwrite = dict(aliases=['force'], default='always'), overwrite = dict(aliases=['force'], default='always'),
prefix = dict(default=None),
retries = dict(aliases=['retry'], type='int', default=0), retries = dict(aliases=['retry'], type='int', default=0),
s3_url = dict(aliases=['S3_URL']), s3_url = dict(aliases=['S3_URL']),
src = dict(), src = dict(),
@ -349,11 +390,14 @@ def main():
expiry = int(module.params['expiry']) expiry = int(module.params['expiry'])
if module.params.get('dest'): if module.params.get('dest'):
dest = os.path.expanduser(module.params.get('dest')) dest = os.path.expanduser(module.params.get('dest'))
marker = module.params.get('marker')
max_keys = module.params.get('max_keys')
metadata = module.params.get('metadata') metadata = module.params.get('metadata')
mode = module.params.get('mode') mode = module.params.get('mode')
obj = module.params.get('object') obj = module.params.get('object')
version = module.params.get('version') version = module.params.get('version')
overwrite = module.params.get('overwrite') overwrite = module.params.get('overwrite')
prefix = module.params.get('prefix')
retries = module.params.get('retries') retries = module.params.get('retries')
s3_url = module.params.get('s3_url') s3_url = module.params.get('s3_url')
src = module.params.get('src') src = module.params.get('src')
@ -537,6 +581,16 @@ def main():
else: else:
module.fail_json(msg="Bucket parameter is required.", failed=True) module.fail_json(msg="Bucket parameter is required.", failed=True)
# Support for listing a set of keys
if mode == 'list':
bucket_object = get_bucket(module, s3, bucket)
# If the bucket does not exist then bail out
if bucket_object is None:
module.fail_json(msg="Target bucket (%s) cannot be found"% bucket, failed=True)
list_keys(module, bucket_object, prefix, marker, max_keys)
# Need to research how to create directories without "populating" a key, so this should just do bucket creation for now. # Need to research how to create directories without "populating" a key, so this should just do bucket creation for now.
# WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS. # WE SHOULD ENABLE SOME WAY OF CREATING AN EMPTY KEY TO CREATE "DIRECTORY" STRUCTURE, AWS CONSOLE DOES THIS.
if mode == 'create': if mode == 'create':

@ -53,7 +53,7 @@ options:
default: null default: null
role_size: role_size:
description: description:
- azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6) - azure role size for the new virtual machine (e.g., Small, ExtraLarge, A6). You have to pay attention to the fact that instances of type G and DS are not available in all regions (locations). Make sure if you selected the size and type of instance available in your chosen location.
required: false required: false
default: Small default: Small
endpoints: endpoints:
@ -235,6 +235,14 @@ AZURE_ROLE_SIZES = ['ExtraSmall',
'Standard_D12', 'Standard_D12',
'Standard_D13', 'Standard_D13',
'Standard_D14', 'Standard_D14',
'Standard_DS1',
'Standard_DS2',
'Standard_DS3',
'Standard_DS4',
'Standard_DS11',
'Standard_DS12',
'Standard_DS13',
'Standard_DS14',
'Standard_G1', 'Standard_G1',
'Standard_G2', 'Standard_G2',
'Standard_G3', 'Standard_G3',

@ -92,6 +92,23 @@ options:
- 'alias. Use docker CLI-style syntax: C(redis:myredis).' - 'alias. Use docker CLI-style syntax: C(redis:myredis).'
default: null default: null
version_added: "1.5" version_added: "1.5"
log_driver:
description:
- You can specify a different logging driver for the container than for the daemon.
"json-file" Default logging driver for Docker. Writes JSON messages to file.
docker logs command is available only for this logging driver.
"none" disables any logging for the container. docker logs won't be available with this driver.
"syslog" Syslog logging driver for Docker. Writes log messages to syslog.
docker logs command is not available for this logging driver.
If not defined explicitly, the Docker daemon's default ("json-file") will apply.
Requires docker >= 1.6.0.
required: false
default: json-file
choices:
- json-file
- none
- syslog
version_added: "2.0"
memory_limit: memory_limit:
description: description:
- RAM allocated to the container as a number of bytes or as a human-readable - RAM allocated to the container as a number of bytes or as a human-readable
@ -174,7 +191,8 @@ options:
default: null default: null
detach: detach:
description: description:
- Enable detached mode to leave the container running in background. - Enable detached mode to leave the container running in background. If
disabled, fail unless the process exits cleanly.
default: true default: true
state: state:
description: description:
@ -510,6 +528,7 @@ class DockerManager(object):
'restart_policy': ((0, 5, 0), '1.14'), 'restart_policy': ((0, 5, 0), '1.14'),
'extra_hosts': ((0, 7, 0), '1.3.1'), 'extra_hosts': ((0, 7, 0), '1.3.1'),
'pid': ((1, 0, 0), '1.17'), 'pid': ((1, 0, 0), '1.17'),
'log_driver': ((1, 2, 0), '1.18'),
# Clientside only # Clientside only
'insecure_registry': ((0, 5, 0), '0.0') 'insecure_registry': ((0, 5, 0), '0.0')
} }
@ -521,24 +540,26 @@ class DockerManager(object):
self.volumes = None self.volumes = None
if self.module.params.get('volumes'): if self.module.params.get('volumes'):
self.binds = {} self.binds = {}
self.volumes = {} self.volumes = []
vols = self.module.params.get('volumes') vols = self.module.params.get('volumes')
for vol in vols: for vol in vols:
parts = vol.split(":") parts = vol.split(":")
# regular volume
if len(parts) == 1:
self.volumes.append(parts[0])
# host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container) # host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container)
if len(parts) == 2: elif 2 <= len(parts) <= 3:
self.volumes[parts[1]] = {} # default to read-write
self.binds[parts[0]] = parts[1] ro = False
# with bind mode # with supplied bind mode
elif len(parts) == 3: if len(parts) == 3:
if parts[2] not in ['ro', 'rw']: if parts[2] not in ['ro', 'rw']:
self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"') self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"')
else:
ro = parts[2] == 'ro' ro = parts[2] == 'ro'
self.volumes[parts[1]] = {} self.binds[parts[0]] = {'bind': parts[1], 'ro': ro }
self.binds[parts[0]] = {'bind': parts[1], 'ro': ro}
# docker mount (e.g. /www, mounts a docker volume /www on the container at the same location)
else: else:
self.volumes[parts[0]] = {} self.module.fail_json(msg='volumes support 1 to 3 arguments')
self.lxc_conf = None self.lxc_conf = None
if self.module.params.get('lxc_conf'): if self.module.params.get('lxc_conf'):
@ -1045,15 +1066,14 @@ class DockerManager(object):
for container_port, config in self.port_bindings.iteritems(): for container_port, config in self.port_bindings.iteritems():
if isinstance(container_port, int): if isinstance(container_port, int):
container_port = "{0}/tcp".format(container_port) container_port = "{0}/tcp".format(container_port)
bind = {}
if len(config) == 1: if len(config) == 1:
bind['HostIp'] = "0.0.0.0" expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': ""}]
bind['HostPort'] = "" elif isinstance(config[0], tuple):
expected_bound_ports[container_port] = []
for hostip, hostport in config:
expected_bound_ports[container_port].append({ 'HostIp': hostip, 'HostPort': str(hostport)})
else: else:
bind['HostIp'] = config[0] expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
bind['HostPort'] = str(config[1])
expected_bound_ports[container_port] = [bind]
actual_bound_ports = container['HostConfig']['PortBindings'] or {} actual_bound_ports = container['HostConfig']['PortBindings'] or {}
@ -1090,8 +1110,8 @@ class DockerManager(object):
# NETWORK MODE # NETWORK MODE
expected_netmode = self.module.params.get('net') or '' expected_netmode = self.module.params.get('net') or 'bridge'
actual_netmode = container['HostConfig']['NetworkMode'] actual_netmode = container['HostConfig']['NetworkMode'] or 'bridge'
if actual_netmode != expected_netmode: if actual_netmode != expected_netmode:
self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode)) self.reload_reasons.append('net ({0} => {1})'.format(actual_netmode, expected_netmode))
differing.append(container) differing.append(container)
@ -1114,6 +1134,16 @@ class DockerManager(object):
self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from)) self.reload_reasons.append('volumes_from ({0} => {1})'.format(actual_volumes_from, expected_volumes_from))
differing.append(container) differing.append(container)
# LOG_DRIVER
if self.ensure_capability('log_driver', False) :
expected_log_driver = self.module.params.get('log_driver') or 'json-file'
actual_log_driver = container['HostConfig']['LogConfig']['Type']
if actual_log_driver != expected_log_driver:
self.reload_reasons.append('log_driver ({0} => {1})'.format(actual_log_driver, expected_log_driver))
differing.append(container)
continue
return differing return differing
def get_deployed_containers(self): def get_deployed_containers(self):
@ -1210,6 +1240,52 @@ class DockerManager(object):
except Exception as e: except Exception as e:
self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e)) self.module.fail_json(msg="Failed to pull the specified image: %s" % resource, error=repr(e))
def create_host_config(self):
params = {
'lxc_conf': self.lxc_conf,
'binds': self.binds,
'port_bindings': self.port_bindings,
'publish_all_ports': self.module.params.get('publish_all_ports'),
'privileged': self.module.params.get('privileged'),
'links': self.links,
'network_mode': self.module.params.get('net'),
}
optionals = {}
for optional_param in ('dns', 'volumes_from', 'restart_policy',
'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver'):
optionals[optional_param] = self.module.params.get(optional_param)
if optionals['dns'] is not None:
self.ensure_capability('dns')
params['dns'] = optionals['dns']
if optionals['volumes_from'] is not None:
self.ensure_capability('volumes_from')
params['volumes_from'] = optionals['volumes_from']
if optionals['restart_policy'] is not None:
self.ensure_capability('restart_policy')
params['restart_policy'] = { 'Name': optionals['restart_policy'] }
if params['restart_policy']['Name'] == 'on-failure':
params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry']
if optionals['pid'] is not None:
self.ensure_capability('pid')
params['pid_mode'] = optionals['pid']
if optionals['extra_hosts'] is not None:
self.ensure_capability('extra_hosts')
params['extra_hosts'] = optionals['extra_hosts']
if optionals['log_driver'] is not None:
self.ensure_capability('log_driver')
log_config = docker.utils.LogConfig(type=docker.utils.LogConfig.types.JSON)
log_config.type = optionals['log_driver']
params['log_config'] = log_config
return docker.utils.create_host_config(**params)
def create_containers(self, count=1): def create_containers(self, count=1):
try: try:
mem_limit = _human_to_bytes(self.module.params.get('memory_limit')) mem_limit = _human_to_bytes(self.module.params.get('memory_limit'))
@ -1228,6 +1304,7 @@ class DockerManager(object):
'name': self.module.params.get('name'), 'name': self.module.params.get('name'),
'stdin_open': self.module.params.get('stdin_open'), 'stdin_open': self.module.params.get('stdin_open'),
'tty': self.module.params.get('tty'), 'tty': self.module.params.get('tty'),
'host_config': self.create_host_config(),
} }
def do_create(count, params): def do_create(count, params):
@ -1248,47 +1325,17 @@ class DockerManager(object):
return containers return containers
def start_containers(self, containers): def start_containers(self, containers):
params = {
'lxc_conf': self.lxc_conf,
'binds': self.binds,
'port_bindings': self.port_bindings,
'publish_all_ports': self.module.params.get('publish_all_ports'),
'privileged': self.module.params.get('privileged'),
'links': self.links,
'network_mode': self.module.params.get('net'),
}
optionals = {}
for optional_param in ('dns', 'volumes_from', 'restart_policy',
'restart_policy_retry', 'pid', 'extra_hosts'):
optionals[optional_param] = self.module.params.get(optional_param)
if optionals['dns'] is not None:
self.ensure_capability('dns')
params['dns'] = optionals['dns']
if optionals['volumes_from'] is not None:
self.ensure_capability('volumes_from')
params['volumes_from'] = optionals['volumes_from']
if optionals['restart_policy'] is not None:
self.ensure_capability('restart_policy')
params['restart_policy'] = { 'Name': optionals['restart_policy'] }
if params['restart_policy']['Name'] == 'on-failure':
params['restart_policy']['MaximumRetryCount'] = optionals['restart_policy_retry']
if optionals['pid'] is not None:
self.ensure_capability('pid')
params['pid_mode'] = optionals['pid']
if optionals['extra_hosts'] is not None:
self.ensure_capability('extra_hosts')
params['extra_hosts'] = optionals['extra_hosts']
for i in containers: for i in containers:
self.client.start(i['Id'], **params) self.client.start(i)
self.increment_counter('started') self.increment_counter('started')
if not self.module.params.get('detach'):
status = self.client.wait(i['Id'])
if status != 0:
output = self.client.logs(i['Id'], stdout=True, stderr=True,
stream=False, timestamps=False)
self.module.fail_json(status=status, msg=output)
def stop_containers(self, containers): def stop_containers(self, containers):
for i in containers: for i in containers:
self.client.stop(i['Id']) self.client.stop(i['Id'])
@ -1479,6 +1526,7 @@ def main():
net = dict(default=None), net = dict(default=None),
pid = dict(default=None), pid = dict(default=None),
insecure_registry = dict(default=False, type='bool'), insecure_registry = dict(default=False, type='bool'),
log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']),
), ),
required_together = ( required_together = (
['tls_client_cert', 'tls_client_key'], ['tls_client_cert', 'tls_client_key'],
@ -1536,7 +1584,7 @@ def main():
summary=manager.counters, summary=manager.counters,
containers=containers.changed, containers=containers.changed,
reload_reasons=manager.get_reload_reason_message(), reload_reasons=manager.get_reload_reason_message(),
ansible_facts=_ansible_facts(containers.changed)) ansible_facts=_ansible_facts(manager.get_inspect_containers(containers.changed)))
except DockerAPIError as e: except DockerAPIError as e:
module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation) module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation)

@ -58,6 +58,14 @@ options:
required: false required: false
default: null default: null
aliases: [] aliases: []
service_account_permissions:
version_added: 2.0
description:
- service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information)
required: false
default: null
aliases: []
choices: ["bigquery", "cloud-platform", "compute-ro", "compute-rw", "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", "storage-rw", "taskqueue", "userinfo-email"]
pem_file: pem_file:
version_added: 1.5.1 version_added: 1.5.1
description: description:
@ -287,6 +295,8 @@ def create_instances(module, gce, instance_names):
ip_forward = module.params.get('ip_forward') ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip') external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete') disk_auto_delete = module.params.get('disk_auto_delete')
service_account_permissions = module.params.get('service_account_permissions')
service_account_email = module.params.get('service_account_email')
if external_ip == "none": if external_ip == "none":
external_ip = None external_ip = None
@ -330,6 +340,20 @@ def create_instances(module, gce, instance_names):
items.append({"key": k,"value": v}) items.append({"key": k,"value": v})
metadata = {'items': items} metadata = {'items': items}
ex_sa_perms = []
bad_perms = []
if service_account_permissions:
for perm in service_account_permissions:
if not perm in gce.SA_SCOPES_MAP.keys():
bad_perms.append(perm)
if len(bad_perms) > 0:
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
if service_account_email:
ex_sa_perms.append({'email': service_account_email})
else:
ex_sa_perms.append({'email': "default"})
ex_sa_perms[0]['scopes'] = service_account_permissions
# These variables all have default values but check just in case # These variables all have default values but check just in case
if not lc_image or not lc_network or not lc_machine_type or not lc_zone: if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
module.fail_json(msg='Missing required create instance variable', module.fail_json(msg='Missing required create instance variable',
@ -349,7 +373,7 @@ def create_instances(module, gce, instance_names):
inst = gce.create_node(name, lc_machine_type, lc_image, inst = gce.create_node(name, lc_machine_type, lc_image,
location=lc_zone, ex_network=network, ex_tags=tags, location=lc_zone, ex_network=network, ex_tags=tags,
ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward, ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete) external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, ex_service_accounts=ex_sa_perms)
changed = True changed = True
except ResourceExistsError: except ResourceExistsError:
inst = gce.ex_get_node(name, lc_zone) inst = gce.ex_get_node(name, lc_zone)
@ -437,6 +461,7 @@ def main():
tags = dict(type='list'), tags = dict(type='list'),
zone = dict(default='us-central1-a'), zone = dict(default='us-central1-a'),
service_account_email = dict(), service_account_email = dict(),
service_account_permissions = dict(type='list'),
pem_file = dict(), pem_file = dict(),
project_id = dict(), project_id = dict(),
ip_forward = dict(type='bool', default=False), ip_forward = dict(type='bool', default=False),

@ -32,6 +32,7 @@ version_added: "1.2"
author: author:
- "Benno Joy (@bennojoy)" - "Benno Joy (@bennojoy)"
- "Michael DeHaan" - "Michael DeHaan"
deprecated: Deprecated in 2.0. Use os_keypair instead
short_description: Add/Delete key pair from nova short_description: Add/Delete key pair from nova
description: description:
- Add or Remove key pair from nova . - Add or Remove key pair from nova .

@ -25,6 +25,15 @@ short_description: Get OpenStack Client config
description: description:
- Get I(openstack) client config data from clouds.yaml or environment - Get I(openstack) client config data from clouds.yaml or environment
version_added: "2.0" version_added: "2.0"
notes:
- Facts are placed in the C(openstack.clouds) variable.
options:
clouds:
description:
- List of clouds to limit the return list to. No value means return
information on all configured clouds
required: false
default: []
requirements: [ os-client-config ] requirements: [ os-client-config ]
author: "Monty Taylor (@emonty)" author: "Monty Taylor (@emonty)"
''' '''
@ -34,17 +43,25 @@ EXAMPLES = '''
- os-client-config: - os-client-config:
- debug: var={{ item }} - debug: var={{ item }}
with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}" with_items: "{{ openstack.clouds|rejectattr('secgroup_source', 'none')|list() }}"
# Get the information back just about the mordred cloud
- os-client-config:
clouds:
- mordred
''' '''
def main(): def main():
module = AnsibleModule({}) module = AnsibleModule(argument_spec=dict(
clouds=dict(required=False, default=[]),
))
p = module.params p = module.params
try: try:
config = os_client_config.OpenStackConfig() config = os_client_config.OpenStackConfig()
clouds = [] clouds = []
for cloud in config.get_all_clouds(): for cloud in config.get_all_clouds():
if not module.params['clouds'] or cloud.name in module.param['clouds']:
cloud.config['name'] = cloud.name cloud.config['name'] = cloud.name
clouds.append(cloud.config) clouds.append(cloud.config)
module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds))) module.exit_json(ansible_facts=dict(openstack=dict(clouds=clouds)))

@ -0,0 +1,167 @@
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2013, Benno Joy <benno@ansible.com>
# Copyright (c) 2013, John Dewey <john@dewey.ws>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_keypair
short_description: Add/Delete a keypair from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
description:
- Add or Remove key pair from OpenStack
options:
name:
description:
- Name that has to be given to the key pair
required: true
default: None
public_key:
description:
- The public key that would be uploaded to nova and injected into VMs
upon creation.
required: false
default: None
public_key_file:
description:
- Path to local file containing ssh public key. Mutually exclusive
with public_key.
required: false
default: None
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements: []
'''
EXAMPLES = '''
# Creates a key pair with the running users public key
- os_keypair:
cloud: mordred
state: present
name: ansible_key
public_key_file: /home/me/.ssh/id_rsa.pub
# Creates a new key pair and the private key returned after the run.
- os_keypair:
cloud: rax-dfw
state: present
name: ansible_key
'''
RETURN = '''
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the keypair.
returned: success
type: string
public_key:
description: The public key value for the keypair.
returned: success
type: string
private_key:
description: The private key value for the keypair.
returned: Only when a keypair is generated for the user (e.g., when creating one
and a public key is not specified).
type: string
'''
def _system_state_change(module, keypair):
state = module.params['state']
if state == 'present' and not keypair:
return True
if state == 'absent' and keypair:
return True
return False
def main():
argument_spec = openstack_full_argument_spec(
name = dict(required=True),
public_key = dict(default=None),
public_key_file = dict(default=None),
state = dict(default='present',
choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[['public_key', 'public_key_file']])
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
state = module.params['state']
name = module.params['name']
public_key = module.params['public_key']
if module.params['public_key_file']:
public_key = open(module.params['public_key_file']).read()
public_key = public_key.rstrip()
try:
cloud = shade.openstack_cloud(**module.params)
keypair = cloud.get_keypair(name)
if module.check_mode:
module.exit_json(changed=_system_state_change(module, keypair))
if state == 'present':
if keypair and keypair['name'] == name:
if public_key and (public_key != keypair['public_key']):
module.fail_json(
msg="Key name %s present but key hash not the same"
" as offered. Delete key first." % name
)
else:
module.exit_json(changed=False, key=keypair)
new_key = cloud.create_keypair(name, public_key)
module.exit_json(changed=True, key=new_key)
elif state == 'absent':
if keypair:
cloud.delete_keypair(name)
module.exit_json(changed=True)
module.exit_json(changed=False)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

@ -90,6 +90,11 @@ options:
- Ensure instance has public ip however the cloud wants to do that - Ensure instance has public ip however the cloud wants to do that
required: false required: false
default: 'yes' default: 'yes'
auto_floating_ip:
description:
- If the module should automatically assign a floating IP
required: false
default: 'yes'
floating_ips: floating_ips:
description: description:
- list of valid floating IPs that pre-exist to assign to this node - list of valid floating IPs that pre-exist to assign to this node

@ -21,6 +21,7 @@
import copy import copy
import sys import sys
import datetime import datetime
import glob
import traceback import traceback
import re import re
import shlex import shlex
@ -47,12 +48,12 @@ options:
aliases: [] aliases: []
creates: creates:
description: description:
- a filename, when it already exists, this step will B(not) be run. - a filename or glob pattern, when it already exists, this step will B(not) be run.
required: no required: no
default: null default: null
removes: removes:
description: description:
- a filename, when it does not exist, this step will B(not) be run. - a filename or glob pattern, when it does not exist, this step will B(not) be run.
version_added: "0.8" version_added: "0.8"
required: no required: no
default: null default: null
@ -188,7 +189,7 @@ def main():
# and the filename already exists. This allows idempotence # and the filename already exists. This allows idempotence
# of command executions. # of command executions.
v = os.path.expanduser(creates) v = os.path.expanduser(creates)
if os.path.exists(v): if glob.glob(v):
module.exit_json( module.exit_json(
cmd=args, cmd=args,
stdout="skipped, since %s exists" % v, stdout="skipped, since %s exists" % v,
@ -202,7 +203,7 @@ def main():
# and the filename does not exist. This allows idempotence # and the filename does not exist. This allows idempotence
# of command executions. # of command executions.
v = os.path.expanduser(removes) v = os.path.expanduser(removes)
if not os.path.exists(v): if not glob.glob(v):
module.exit_json( module.exit_json(
cmd=args, cmd=args,
stdout="skipped, since %s does not exist" % v, stdout="skipped, since %s does not exist" % v,

@ -157,6 +157,7 @@ password=n<_665{vS43y
import getpass import getpass
import tempfile import tempfile
import re
try: try:
import MySQLdb import MySQLdb
except ImportError: except ImportError:
@ -316,13 +317,19 @@ def privileges_unpack(priv):
not specified in the string, as MySQL will always provide this by default. not specified in the string, as MySQL will always provide this by default.
""" """
output = {} output = {}
privs = []
for item in priv.strip().split('/'): for item in priv.strip().split('/'):
pieces = item.strip().split(':') pieces = item.strip().split(':')
dbpriv = pieces[0].rsplit(".", 1) dbpriv = pieces[0].rsplit(".", 1)
pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1]) pieces[0] = "`%s`.%s" % (dbpriv[0].strip('`'), dbpriv[1])
if '(' in pieces[1]:
output[pieces[0]] = [s.strip() for s in pieces[1].upper().split(',')] output[pieces[0]] = re.split(r',\s*(?=[^)]*(?:\(|$))', pieces[1].upper())
new_privs = frozenset(output[pieces[0]]) for i in output[pieces[0]]:
privs.append(re.sub(r'\(.*\)','',i))
else:
output[pieces[0]] = pieces[1].upper().split(',')
privs = output[pieces[0]]
new_privs = frozenset(privs)
if not new_privs.issubset(VALID_PRIVS): if not new_privs.issubset(VALID_PRIVS):
raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS)) raise InvalidPrivsError('Invalid privileges specified: %s' % new_privs.difference(VALID_PRIVS))

@ -315,7 +315,7 @@ class Connection(object):
query = """SELECT relname query = """SELECT relname
FROM pg_catalog.pg_class c FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE nspname = %s AND relkind = 'r'""" WHERE nspname = %s AND relkind in ('r', 'v')"""
self.cursor.execute(query, (schema,)) self.cursor.execute(query, (schema,))
return [t[0] for t in self.cursor.fetchall()] return [t[0] for t in self.cursor.fetchall()]

@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import errno
import shutil import shutil
import stat import stat
import grp import grp
@ -280,7 +281,13 @@ def main():
if not os.path.isabs(path): if not os.path.isabs(path):
curpath = curpath.lstrip('/') curpath = curpath.lstrip('/')
if not os.path.exists(curpath): if not os.path.exists(curpath):
try:
os.mkdir(curpath) os.mkdir(curpath)
except OSError, ex:
# Possibly something else created the dir since the os.path.exists
# check above. As long as it's a dir, we don't need to error out.
if not (ex.errno == errno.EEXISTS and os.isdir(curpath)):
raise
tmp_file_args = file_args.copy() tmp_file_args = file_args.copy()
tmp_file_args['path']=curpath tmp_file_args['path']=curpath
changed = module.set_fs_attributes_if_different(tmp_file_args, changed) changed = module.set_fs_attributes_if_different(tmp_file_args, changed)

@ -245,8 +245,11 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
# Don't do backref expansion if not asked. # Don't do backref expansion if not asked.
new_line = line new_line = line
if lines[index[0]] != new_line + os.linesep: if not new_line.endswith(os.linesep):
lines[index[0]] = new_line + os.linesep new_line += os.linesep
if lines[index[0]] != new_line:
lines[index[0]] = new_line
msg = 'line replaced' msg = 'line replaced'
changed = True changed = True
elif backrefs: elif backrefs:

@ -152,6 +152,12 @@ options:
default: default:
required: false required: false
version_added: "1.6" version_added: "1.6"
partial:
description:
- Tells rsync to keep the partial file which should make a subsequent transfer of the rest of the file much faster.
default: no
required: false
version_added: "2.0"
notes: notes:
- rsync must be installed on both the local and remote machine. - rsync must be installed on both the local and remote machine.
- Inspect the verbose output to validate the destination user/host/path - Inspect the verbose output to validate the destination user/host/path
@ -237,6 +243,7 @@ def main():
rsync_timeout = dict(type='int', default=0), rsync_timeout = dict(type='int', default=0),
rsync_opts = dict(type='list'), rsync_opts = dict(type='list'),
ssh_args = dict(type='str'), ssh_args = dict(type='str'),
partial = dict(default='no', type='bool'),
), ),
supports_check_mode = True supports_check_mode = True
) )
@ -254,6 +261,7 @@ def main():
compress = module.params['compress'] compress = module.params['compress']
existing_only = module.params['existing_only'] existing_only = module.params['existing_only']
dirs = module.params['dirs'] dirs = module.params['dirs']
partial = module.params['partial']
# the default of these params depends on the value of archive # the default of these params depends on the value of archive
recursive = module.params['recursive'] recursive = module.params['recursive']
links = module.params['links'] links = module.params['links']
@ -332,6 +340,9 @@ def main():
if rsync_opts: if rsync_opts:
cmd = cmd + " " + " ".join(rsync_opts) cmd = cmd + " " + " ".join(rsync_opts)
if partial:
cmd = cmd + " --partial"
changed_marker = '<<CHANGED>>' changed_marker = '<<CHANGED>>'
cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'" cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'"

@ -47,6 +47,14 @@ options:
required: false required: false
default: "" default: ""
version_added: "1.2" version_added: "1.2"
force:
description:
- the default is C(yes), which will replace the remote file when contents
are different than the source. If C(no), the file will only be transferred
if the destination does not exist.
required: false
choices: [ "yes", "no" ]
default: "yes"
notes: notes:
- "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)." - "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)."
requirements: [] requirements: []

@ -32,6 +32,7 @@ options:
src: src:
description: description:
- If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack. - If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack.
- If copy=no and src contains ://, the remote machine will download the file from the url first. (version_added 2.0)
required: true required: true
default: null default: null
dest: dest:
@ -81,6 +82,9 @@ EXAMPLES = '''
# Unarchive a file that is already on the remote machine # Unarchive a file that is already on the remote machine
- unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no - unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no
# Unarchive a file that needs to be downloaded
- unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no
''' '''
import re import re
@ -90,6 +94,9 @@ from zipfile import ZipFile
# String from tar that shows the tar contents are different from the # String from tar that shows the tar contents are different from the
# filesystem # filesystem
DIFFERENCE_RE = re.compile(r': (.*) differs$') DIFFERENCE_RE = re.compile(r': (.*) differs$')
# When downloading an archive, how much of the archive to download before
# saving to a tempfile (64k)
BUFSIZE = 65536
class UnarchiveError(Exception): class UnarchiveError(Exception):
pass pass
@ -269,11 +276,37 @@ def main():
if not os.path.exists(src): if not os.path.exists(src):
if copy: if copy:
module.fail_json(msg="Source '%s' failed to transfer" % src) module.fail_json(msg="Source '%s' failed to transfer" % src)
# If copy=false, and src= contains ://, try and download the file to a temp directory.
elif '://' in src:
tempdir = os.path.dirname(__file__)
package = os.path.join(tempdir, str(src.rsplit('/', 1)[1]))
try:
rsp, info = fetch_url(module, src)
f = open(package, 'w')
# Read 1kb at a time to save on ram
while True:
data = rsp.read(BUFSIZE)
if data == "":
break # End of file, break while loop
f.write(data)
f.close()
src = package
except Exception, e:
module.fail_json(msg="Failure downloading %s, %s" % (src, e))
else: else:
module.fail_json(msg="Source '%s' does not exist" % src) module.fail_json(msg="Source '%s' does not exist" % src)
if not os.access(src, os.R_OK): if not os.access(src, os.R_OK):
module.fail_json(msg="Source '%s' not readable" % src) module.fail_json(msg="Source '%s' not readable" % src)
# skip working with 0 size archives
try:
if os.path.getsize(src) == 0:
module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
except Exception, e:
module.fail_json(msg="Source '%s' not readable" % src)
# is dest OK to receive tar file? # is dest OK to receive tar file?
if not os.path.isdir(dest): if not os.path.isdir(dest):
module.fail_json(msg="Destination '%s' is not a directory" % dest) module.fail_json(msg="Destination '%s' is not a directory" % dest)
@ -315,5 +348,6 @@ def main():
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__': if __name__ == '__main__':
main() main()

@ -38,6 +38,8 @@ description:
(see `setting the environment (see `setting the environment
<http://docs.ansible.com/playbooks_environment.html>`_), <http://docs.ansible.com/playbooks_environment.html>`_),
or by using the use_proxy option. or by using the use_proxy option.
- HTTP redirects can redirect from HTTP to HTTPS so you should be sure that
your proxy environment for both protocols is correct.
version_added: "0.6" version_added: "0.6"
options: options:
url: url:
@ -113,7 +115,7 @@ options:
- all arguments accepted by the M(file) module also work here - all arguments accepted by the M(file) module also work here
required: false required: false
# informational: requirements for nodes # informational: requirements for nodes
requirements: [ urllib2, urlparse ] requirements: [ ]
author: "Jan-Piet Mens (@jpmens)" author: "Jan-Piet Mens (@jpmens)"
''' '''
@ -125,6 +127,8 @@ EXAMPLES='''
get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c get_url: url=http://example.com/path/file.conf dest=/etc/foo.conf sha256sum=b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c
''' '''
import urlparse
try: try:
import hashlib import hashlib
HAS_HASHLIB=True HAS_HASHLIB=True
@ -215,8 +219,29 @@ def main():
dest_is_dir = os.path.isdir(dest) dest_is_dir = os.path.isdir(dest)
last_mod_time = None last_mod_time = None
# Remove any non-alphanumeric characters, including the infamous
# Unicode zero-width space
stripped_sha256sum = re.sub(r'\W+', '', sha256sum)
# Fail early if sha256 is not supported
if sha256sum != '' and not HAS_HASHLIB:
module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher")
if not dest_is_dir and os.path.exists(dest): if not dest_is_dir and os.path.exists(dest):
if not force: checksum_mismatch = False
# If the download is not forced and there is a checksum, allow
# checksum match to skip the download.
if not force and sha256sum != '':
destination_checksum = module.sha256(dest)
if stripped_sha256sum.lower() == destination_checksum:
module.exit_json(msg="file already exists", dest=dest, url=url, changed=False)
checksum_mismatch = True
# Not forcing redownload, unless sha256sum has already failed
if not force and not checksum_mismatch:
module.exit_json(msg="file already exists", dest=dest, url=url, changed=False) module.exit_json(msg="file already exists", dest=dest, url=url, changed=False)
# If the file already exists, prepare the last modified time for the # If the file already exists, prepare the last modified time for the
@ -279,14 +304,6 @@ def main():
# Check the digest of the destination file and ensure that it matches the # Check the digest of the destination file and ensure that it matches the
# sha256sum parameter if it is present # sha256sum parameter if it is present
if sha256sum != '': if sha256sum != '':
# Remove any non-alphanumeric characters, including the infamous
# Unicode zero-width space
stripped_sha256sum = re.sub(r'\W+', '', sha256sum)
if not HAS_HASHLIB:
os.remove(dest)
module.fail_json(msg="The sha256sum parameter requires hashlib, which is available in Python 2.5 and higher")
else:
destination_checksum = module.sha256(dest) destination_checksum = module.sha256(dest)
if stripped_sha256sum.lower() != destination_checksum: if stripped_sha256sum.lower() != destination_checksum:
@ -315,4 +332,5 @@ def main():
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.urls import * from ansible.module_utils.urls import *
main() if __name__ == '__main__':
main()

@ -269,7 +269,7 @@ def url_filename(url):
def uri(module, url, dest, user, password, body, body_format, method, headers, redirects, socket_timeout, validate_certs): def uri(module, url, dest, user, password, body, body_format, method, headers, redirects, socket_timeout, validate_certs):
# To debug # To debug
#httplib2.debug = 4 #httplib2.debuglevel = 4
# Handle Redirects # Handle Redirects
if redirects == "all" or redirects == "yes": if redirects == "all" or redirects == "yes":

@ -320,7 +320,7 @@ def main():
this_dir = os.path.join(this_dir, chdir) this_dir = os.path.join(this_dir, chdir)
if module.check_mode: if module.check_mode:
if env or extra_args or requirements or state == 'latest' or not name: if extra_args or requirements or state == 'latest' or not name:
module.exit_json(changed=True) module.exit_json(changed=True)
elif name.startswith('svn+') or name.startswith('git+') or \ elif name.startswith('svn+') or name.startswith('git+') or \
name.startswith('hg+') or name.startswith('bzr+'): name.startswith('hg+') or name.startswith('bzr+'):

@ -80,8 +80,8 @@ options:
- 'Note: This does not upgrade a specific package, use state=latest for that.' - 'Note: This does not upgrade a specific package, use state=latest for that.'
version_added: "1.1" version_added: "1.1"
required: false required: false
default: "yes" default: "no"
choices: [ "yes", "safe", "full", "dist"] choices: [ "no", "yes", "safe", "full", "dist"]
dpkg_options: dpkg_options:
description: description:
- Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"' - Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
@ -548,7 +548,7 @@ def main():
default_release = dict(default=None, aliases=['default-release']), default_release = dict(default=None, aliases=['default-release']),
install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'), install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'),
force = dict(default='no', type='bool'), force = dict(default='no', type='bool'),
upgrade = dict(choices=['yes', 'safe', 'full', 'dist']), upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']),
dpkg_options = dict(default=DPKG_OPTIONS) dpkg_options = dict(default=DPKG_OPTIONS)
), ),
mutually_exclusive = [['package', 'upgrade', 'deb']], mutually_exclusive = [['package', 'upgrade', 'deb']],
@ -572,6 +572,10 @@ def main():
APT_GET_CMD = module.get_bin_path("apt-get") APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]: if not APTITUDE_CMD and p.get('upgrade', None) in [ 'full', 'safe', 'yes' ]:
module.fail_json(msg="Could not find aptitude. Please ensure it is installed.") module.fail_json(msg="Could not find aptitude. Please ensure it is installed.")

@ -126,6 +126,8 @@ class InvalidSource(Exception):
class SourcesList(object): class SourcesList(object):
def __init__(self): def __init__(self):
self.files = {} # group sources by file self.files = {} # group sources by file
# Repositories that we're adding -- used to implement mode param
self.new_repos = set()
self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist') self.default_file = self._apt_cfg_file('Dir::Etc::sourcelist')
# read sources.list if it exists # read sources.list if it exists
@ -238,10 +240,6 @@ class SourcesList(object):
d, fn = os.path.split(filename) d, fn = os.path.split(filename)
fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d) fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
# allow the user to override the default mode
this_mode = module.params['mode']
module.set_mode_if_different(tmp_path, this_mode, False)
f = os.fdopen(fd, 'w') f = os.fdopen(fd, 'w')
for n, valid, enabled, source, comment in sources: for n, valid, enabled, source, comment in sources:
chunks = [] chunks = []
@ -259,6 +257,11 @@ class SourcesList(object):
except IOError, err: except IOError, err:
module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err))) module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, unicode(err)))
module.atomic_move(tmp_path, filename) module.atomic_move(tmp_path, filename)
# allow the user to override the default mode
if filename in self.new_repos:
this_mode = module.params['mode']
module.set_mode_if_different(filename, this_mode, False)
else: else:
del self.files[filename] del self.files[filename]
if os.path.exists(filename): if os.path.exists(filename):
@ -300,6 +303,7 @@ class SourcesList(object):
files = self.files[file] files = self.files[file]
files.append((len(files), True, True, source_new, comment_new)) files.append((len(files), True, True, source_new, comment_new))
self.new_repos.add(file)
def add_source(self, line, comment='', file=None): def add_source(self, line, comment='', file=None):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2] source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
@ -374,6 +378,25 @@ class UbuntuSourcesList(SourcesList):
source = self._parse(line, raise_if_invalid_or_disabled=True)[2] source = self._parse(line, raise_if_invalid_or_disabled=True)[2]
self._remove_valid_source(source) self._remove_valid_source(source)
@property
def repos_urls(self):
_repositories = []
for parsed_repos in self.files.values():
for parsed_repo in parsed_repos:
enabled = parsed_repo[1]
source_line = parsed_repo[3]
if not enabled:
continue
if source_line.startswith('ppa:'):
source, ppa_owner, ppa_name = self._expand_ppa(i[3])
_repositories.append(source)
else:
_repositories.append(source_line)
return _repositories
def get_add_ppa_signing_key_callback(module): def get_add_ppa_signing_key_callback(module):
def _run_command(command): def _run_command(command):
@ -421,8 +444,13 @@ def main():
sources_before = sourceslist.dump() sources_before = sourceslist.dump()
if repo.startswith('ppa:'):
expanded_repo = sourceslist._expand_ppa(repo)[0]
else:
expanded_repo = repo
try: try:
if state == 'present': if state == 'present' and expanded_repo not in sourceslist.repos_urls:
sourceslist.add_source(repo) sourceslist.add_source(repo)
elif state == 'absent': elif state == 'absent':
sourceslist.remove_source(repo) sourceslist.remove_source(repo)

@ -56,6 +56,12 @@ options:
- supply an activation key for use with registration - supply an activation key for use with registration
required: False required: False
default: null default: null
profilename:
description:
- supply an profilename for use with registration
required: False
default: null
version_added: "2.0"
channels: channels:
description: description:
- Optionally specify a list of comma-separated channels to subscribe to upon successful registration. - Optionally specify a list of comma-separated channels to subscribe to upon successful registration.
@ -73,6 +79,9 @@ EXAMPLES = '''
# Register with activationkey (1-222333444) and enable extended update support. # Register with activationkey (1-222333444) and enable extended update support.
- rhn_register: state=present activationkey=1-222333444 enable_eus=true - rhn_register: state=present activationkey=1-222333444 enable_eus=true
# Register with activationkey (1-222333444) and set a profilename which may differ from the hostname.
- rhn_register: state=present activationkey=1-222333444 profilename=host.example.com.custom
# Register as user (joe_user) with password (somepass) against a satellite # Register as user (joe_user) with password (somepass) against a satellite
# server specified by (server_url). # server specified by (server_url).
- rhn_register: > - rhn_register: >
@ -209,7 +218,7 @@ class Rhn(RegistrationBase):
self.update_plugin_conf('rhnplugin', True) self.update_plugin_conf('rhnplugin', True)
self.update_plugin_conf('subscription-manager', False) self.update_plugin_conf('subscription-manager', False)
def register(self, enable_eus=False, activationkey=None): def register(self, enable_eus=False, activationkey=None, profilename=None):
''' '''
Register system to RHN. If enable_eus=True, extended update Register system to RHN. If enable_eus=True, extended update
support will be requested. support will be requested.
@ -221,7 +230,8 @@ class Rhn(RegistrationBase):
register_cmd += " --use-eus-channel" register_cmd += " --use-eus-channel"
if activationkey is not None: if activationkey is not None:
register_cmd += " --activationkey '%s'" % activationkey register_cmd += " --activationkey '%s'" % activationkey
# FIXME - support --profilename if profilename is not None:
register_cmd += " --profilename '%s'" % profilename
# FIXME - support --systemorgid # FIXME - support --systemorgid
rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True) rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True)
@ -285,6 +295,7 @@ def main():
password = dict(default=None, required=False), password = dict(default=None, required=False),
server_url = dict(default=rhn.config.get_option('serverURL'), required=False), server_url = dict(default=rhn.config.get_option('serverURL'), required=False),
activationkey = dict(default=None, required=False), activationkey = dict(default=None, required=False),
profilename = dict(default=None, required=False),
enable_eus = dict(default=False, type='bool'), enable_eus = dict(default=False, type='bool'),
channels = dict(default=[], type='list'), channels = dict(default=[], type='list'),
) )
@ -295,6 +306,7 @@ def main():
rhn.password = module.params['password'] rhn.password = module.params['password']
rhn.configure(module.params['server_url']) rhn.configure(module.params['server_url'])
activationkey = module.params['activationkey'] activationkey = module.params['activationkey']
profilename = module.params['profilename']
channels = module.params['channels'] channels = module.params['channels']
rhn.module = module rhn.module = module

@ -60,9 +60,10 @@ EXAMPLES = '''
# Example action to ensure a key is not present in the db # Example action to ensure a key is not present in the db
- rpm_key: state=absent key=DEADB33F - rpm_key: state=absent key=DEADB33F
''' '''
import re
import syslog import syslog
import os.path import os.path
import re import urllib2
import tempfile import tempfile
def is_pubkey(string): def is_pubkey(string):
@ -203,4 +204,5 @@ def main():
# import module snippets # import module snippets
from ansible.module_utils.basic import * from ansible.module_utils.basic import *
from ansible.module_utils.urls import * from ansible.module_utils.urls import *
main() if __name__ == '__main__':
main()

@ -152,6 +152,9 @@ EXAMPLES = '''
yum: name="@Development tools" state=present yum: name="@Development tools" state=present
''' '''
# 64k. Number of bytes to read at a time when manually downloading pkgs via a url
BUFSIZE = 65536
def_qf = "%{name}-%{version}-%{release}.%{arch}" def_qf = "%{name}-%{version}-%{release}.%{arch}"
def log(msg): def log(msg):
@ -526,9 +529,11 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
package = os.path.join(tempdir, str(pkg.rsplit('/', 1)[1])) package = os.path.join(tempdir, str(pkg.rsplit('/', 1)[1]))
try: try:
rsp, info = fetch_url(module, pkg) rsp, info = fetch_url(module, pkg)
data = rsp.read()
f = open(package, 'w') f = open(package, 'w')
data = rsp.read(BUFSIZE)
while data:
f.write(data) f.write(data)
data = rsp.read(BUFSIZE)
f.close() f.close()
pkg = package pkg = package
except Exception, e: except Exception, e:

@ -173,7 +173,8 @@ options:
to be installed. The commit MUST be signed and the public key MUST to be installed. The commit MUST be signed and the public key MUST
be trusted in the GPG trustdb. be trusted in the GPG trustdb.
requirements:
- git (the command line tool)
notes: notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts). - "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt, SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,

@ -34,7 +34,6 @@ options:
- The username on the remote host whose authorized_keys file will be modified - The username on the remote host whose authorized_keys file will be modified
required: true required: true
default: null default: null
aliases: []
key: key:
description: description:
- The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys) - The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys)
@ -72,9 +71,11 @@ options:
version_added: "1.4" version_added: "1.4"
exclusive: exclusive:
description: description:
- Whether to remove all other non-specified keys from the - Whether to remove all other non-specified keys from the authorized_keys file. Multiple keys
authorized_keys file. Multiple keys can be specified in a single can be specified in a single C(key) string value by separating them by newlines.
key= string value by separating them by newlines. - This option is not loop aware, so if you use C(with_) , it will be exclusive per iteration
of the loop, if you want multiple keys in the file you need to pass them all to C(key) in a
single batch as mentioned above.
required: false required: false
choices: [ "yes", "no" ] choices: [ "yes", "no" ]
default: "no" default: "no"
@ -168,6 +169,13 @@ def keyfile(module, user, write=False, path=None, manage_dir=True):
:return: full path string to authorized_keys for user :return: full path string to authorized_keys for user
""" """
if module.check_mode:
if path is None:
module.fail_json(msg="You must provide full path to key file in check mode")
else:
keysfile = path
return keysfile
try: try:
user_entry = pwd.getpwnam(user) user_entry = pwd.getpwnam(user)
except KeyError, e: except KeyError, e:

@ -46,7 +46,7 @@ options:
description: description:
- Description of a crontab entry. - Description of a crontab entry.
default: null default: null
required: true required: false
user: user:
description: description:
- The specific user whose crontab should be modified. - The specific user whose crontab should be modified.
@ -398,7 +398,7 @@ def main():
module = AnsibleModule( module = AnsibleModule(
argument_spec = dict( argument_spec = dict(
name=dict(required=True), name=dict(required=False),
user=dict(required=False), user=dict(required=False),
job=dict(required=False), job=dict(required=False),
cron_file=dict(required=False), cron_file=dict(required=False),

@ -104,6 +104,10 @@ def write_fstab(lines, dest):
fs_w.flush() fs_w.flush()
fs_w.close() fs_w.close()
def _escape_fstab(v):
""" escape space (040), ampersand (046) and backslash (134) which are invalid in fstab fields """
return v.replace('\\', '\\134').replace(' ', '\\040').replace('&', '\\046')
def set_mount(**kwargs): def set_mount(**kwargs):
""" set/change a mount point location in fstab """ """ set/change a mount point location in fstab """
@ -116,11 +120,17 @@ def set_mount(**kwargs):
) )
args.update(kwargs) args.update(kwargs)
# save the mount name before space replacement
origname = args['name']
# replace any space in mount name with '\040' to make it fstab compatible (man fstab)
args['name'] = args['name'].replace(' ', r'\040')
new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n' new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n'
to_write = [] to_write = []
exists = False exists = False
changed = False changed = False
escaped_args = dict([(k, _escape_fstab(v)) for k, v in args.iteritems()])
for line in open(args['fstab'], 'r').readlines(): for line in open(args['fstab'], 'r').readlines():
if not line.strip(): if not line.strip():
to_write.append(line) to_write.append(line)
@ -137,16 +147,16 @@ def set_mount(**kwargs):
ld = {} ld = {}
ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split()
if ld['name'] != args['name']: if ld['name'] != escaped_args['name']:
to_write.append(line) to_write.append(line)
continue continue
# it exists - now see if what we have is different # it exists - now see if what we have is different
exists = True exists = True
for t in ('src', 'fstype','opts', 'dump', 'passno'): for t in ('src', 'fstype','opts', 'dump', 'passno'):
if ld[t] != args[t]: if ld[t] != escaped_args[t]:
changed = True changed = True
ld[t] = args[t] ld[t] = escaped_args[t]
if changed: if changed:
to_write.append(new_line % ld) to_write.append(new_line % ld)
@ -160,7 +170,8 @@ def set_mount(**kwargs):
if changed: if changed:
write_fstab(to_write, args['fstab']) write_fstab(to_write, args['fstab'])
return (args['name'], changed) # mount function needs origname
return (origname, changed)
def unset_mount(**kwargs): def unset_mount(**kwargs):
@ -175,8 +186,14 @@ def unset_mount(**kwargs):
) )
args.update(kwargs) args.update(kwargs)
# save the mount name before space replacement
origname = args['name']
# replace any space in mount name with '\040' to make it fstab compatible (man fstab)
args['name'] = args['name'].replace(' ', r'\040')
to_write = [] to_write = []
changed = False changed = False
escaped_name = _escape_fstab(args['name'])
for line in open(args['fstab'], 'r').readlines(): for line in open(args['fstab'], 'r').readlines():
if not line.strip(): if not line.strip():
to_write.append(line) to_write.append(line)
@ -193,7 +210,7 @@ def unset_mount(**kwargs):
ld = {} ld = {}
ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split() ld['src'], ld['name'], ld['fstype'], ld['opts'], ld['dump'], ld['passno'] = line.split()
if ld['name'] != args['name']: if ld['name'] != escaped_name:
to_write.append(line) to_write.append(line)
continue continue
@ -203,7 +220,8 @@ def unset_mount(**kwargs):
if changed: if changed:
write_fstab(to_write, args['fstab']) write_fstab(to_write, args['fstab'])
return (args['name'], changed) # umount needs origname
return (origname, changed)
def mount(module, **kwargs): def mount(module, **kwargs):
@ -262,8 +280,6 @@ def main():
args['passno'] = module.params['passno'] args['passno'] = module.params['passno']
if module.params['opts'] is not None: if module.params['opts'] is not None:
args['opts'] = module.params['opts'] args['opts'] = module.params['opts']
if ' ' in args['opts']:
module.fail_json(msg="unexpected space in 'opts' parameter")
if module.params['dump'] is not None: if module.params['dump'] is not None:
args['dump'] = module.params['dump'] args['dump'] = module.params['dump']
if module.params['fstab'] is not None: if module.params['fstab'] is not None:

@ -885,7 +885,7 @@ class LinuxService(Service):
if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed: if self.svc_cmd and self.svc_cmd.endswith('rc-service') and self.action == 'start' and self.crashed:
self.execute_command("%s zap" % svc_cmd, daemonize=True) self.execute_command("%s zap" % svc_cmd, daemonize=True)
if self.action is not "restart": if self.action != "restart":
if svc_cmd != '': if svc_cmd != '':
# upstart or systemd or OpenRC # upstart or systemd or OpenRC
rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True) rc_state, stdout, stderr = self.execute_command("%s %s %s" % (svc_cmd, self.action, arguments), daemonize=True)
@ -993,11 +993,11 @@ class FreeBsdService(Service):
def service_control(self): def service_control(self):
if self.action is "start": if self.action == "start":
self.action = "onestart" self.action = "onestart"
if self.action is "stop": if self.action == "stop":
self.action = "onestop" self.action = "onestop"
if self.action is "reload": if self.action == "reload":
self.action = "onereload" self.action = "onereload"
return self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments)) return self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, self.action, self.arguments))
@ -1203,9 +1203,9 @@ class NetBsdService(Service):
self.running = True self.running = True
def service_control(self): def service_control(self):
if self.action is "start": if self.action == "start":
self.action = "onestart" self.action = "onestart"
if self.action is "stop": if self.action == "stop":
self.action = "onestop" self.action = "onestop"
self.svc_cmd = "%s" % self.svc_initscript self.svc_cmd = "%s" % self.svc_initscript

@ -0,0 +1,21 @@
#!/bin/sh
set -x
CHECKOUT_DIR=".ansible-checkout"
MOD_REPO="$1"
# Hidden file to avoid the module_formatter recursing into the checkout
git clone https://github.com/ansible/ansible "$CHECKOUT_DIR"
cd "$CHECKOUT_DIR"
git submodule update --init
rm -rf "lib/ansible/modules/$MOD_REPO"
ln -s "$TRAVIS_BUILD_DIR/" "lib/ansible/modules/$MOD_REPO"
pip install -U Jinja2 PyYAML setuptools six pycrypto sphinx
. ./hacking/env-setup
PAGER=/bin/cat bin/ansible-doc -l
if [ $? -ne 0 ] ; then
exit $?
fi
make -C docsite

@ -35,6 +35,7 @@ options:
choices: ['present', 'absent'] choices: ['present', 'absent']
default: present default: present
requirements: ["a2enmod","a2dismod"]
''' '''
EXAMPLES = ''' EXAMPLES = '''

@ -30,7 +30,8 @@ options:
command: command:
choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ] choices: [ 'cleanup', 'collectstatic', 'flush', 'loaddata', 'migrate', 'runfcgi', 'syncdb', 'test', 'validate', ]
description: description:
- The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate. Other commands can be entered, but will fail if they're unknown to Django. - The name of the Django management command to run. Built in commands are cleanup, collectstatic, flush, loaddata, migrate, runfcgi, syncdb, test, and validate.
- Other commands can be entered, but will fail if they're unknown to Django. Other commands that may prompt for user input should be run with the I(--noinput) flag.
required: true required: true
app_path: app_path:
description: description:
@ -102,7 +103,7 @@ EXAMPLES = """
# Load the initial_data fixture into the application # Load the initial_data fixture into the application
- django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }} - django_manage: command=loaddata app_path={{ django_dir }} fixtures={{ initial_data }}
#Run syncdb on the application # Run syncdb on the application
- django_manage: > - django_manage: >
command=syncdb command=syncdb
app_path={{ django_dir }} app_path={{ django_dir }}
@ -110,8 +111,11 @@ EXAMPLES = """
pythonpath={{ settings_dir }} pythonpath={{ settings_dir }}
virtualenv={{ virtualenv_dir }} virtualenv={{ virtualenv_dir }}
#Run the SmokeTest test case from the main app. Useful for testing deploys. # Run the SmokeTest test case from the main app. Useful for testing deploys.
- django_manage: command=test app_path=django_dir apps=main.SmokeTest - django_manage: command=test app_path={{ django_dir }} apps=main.SmokeTest
# Create an initial superuser.
- django_manage: command="createsuperuser --noinput --username=admin --email=admin@example.com" app_path={{ django_dir }}
""" """
@ -218,7 +222,7 @@ def main():
) )
command = module.params['command'] command = module.params['command']
app_path = module.params['app_path'] app_path = os.path.expanduser(module.params['app_path'])
virtualenv = module.params['virtualenv'] virtualenv = module.params['virtualenv']
for param in specific_params: for param in specific_params:
@ -234,7 +238,7 @@ def main():
_ensure_virtualenv(module) _ensure_virtualenv(module)
cmd = "python manage.py %s" % (command, ) cmd = "./manage.py %s" % (command, )
if command in noinput_commands: if command in noinput_commands:
cmd = '%s --noinput' % cmd cmd = '%s --noinput' % cmd

@ -46,7 +46,10 @@ options:
choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"] choices: ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
default: "apr_md5_crypt" default: "apr_md5_crypt"
description: description:
- Encryption scheme to be used. - Encryption scheme to be used. As well as the four choices listed
here, you can also use any other hash supported by passlib, such as
md5_crypt and sha256_crypt, which are linux passwd hashes. If you
do so the password file will not be compatible with Apache or Nginx
state: state:
required: false required: false
choices: [ present, absent ] choices: [ present, absent ]
@ -74,20 +77,25 @@ EXAMPLES = """
- htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640 - htpasswd: path=/etc/nginx/passwdfile name=janedoe password=9s36?;fyNp owner=root group=www-data mode=0640
# Remove a user from a password file # Remove a user from a password file
- htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent - htpasswd: path=/etc/apache2/passwdfile name=foobar state=absent
# Add a user to a password file suitable for use by libpam-pwdfile
- htpasswd: path=/etc/mail/passwords name=alex password=oedu2eGh crypt_scheme=md5_crypt
""" """
import os import os
import tempfile
from distutils.version import StrictVersion from distutils.version import StrictVersion
try: try:
from passlib.apache import HtpasswdFile from passlib.apache import HtpasswdFile, htpasswd_context
from passlib.context import CryptContext
import passlib import passlib
except ImportError: except ImportError:
passlib_installed = False passlib_installed = False
else: else:
passlib_installed = True passlib_installed = True
apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
def create_missing_directories(dest): def create_missing_directories(dest):
destpath = os.path.dirname(dest) destpath = os.path.dirname(dest)
@ -99,6 +107,10 @@ def present(dest, username, password, crypt_scheme, create, check_mode):
""" Ensures user is present """ Ensures user is present
Returns (msg, changed) """ Returns (msg, changed) """
if crypt_scheme in apache_hashes:
context = htpasswd_context
else:
context = CryptContext(schemes = [ crypt_scheme ] + apache_hashes)
if not os.path.exists(dest): if not os.path.exists(dest):
if not create: if not create:
raise ValueError('Destination %s does not exist' % dest) raise ValueError('Destination %s does not exist' % dest)
@ -106,9 +118,9 @@ def present(dest, username, password, crypt_scheme, create, check_mode):
return ("Create %s" % dest, True) return ("Create %s" % dest, True)
create_missing_directories(dest) create_missing_directories(dest)
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme) ht = HtpasswdFile(dest, new=True, default_scheme=crypt_scheme, context=context)
else: else:
ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme) ht = HtpasswdFile(dest, autoload=False, default=crypt_scheme, context=context)
if getattr(ht, 'set_password', None): if getattr(ht, 'set_password', None):
ht.set_password(username, password) ht.set_password(username, password)
else: else:
@ -117,9 +129,9 @@ def present(dest, username, password, crypt_scheme, create, check_mode):
return ("Created %s and added %s" % (dest, username), True) return ("Created %s and added %s" % (dest, username), True)
else: else:
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'): if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme) ht = HtpasswdFile(dest, new=False, default_scheme=crypt_scheme, context=context)
else: else:
ht = HtpasswdFile(dest, default=crypt_scheme) ht = HtpasswdFile(dest, default=crypt_scheme, context=context)
found = None found = None
if getattr(ht, 'check_password', None): if getattr(ht, 'check_password', None):
@ -198,6 +210,36 @@ def main():
if not passlib_installed: if not passlib_installed:
module.fail_json(msg="This module requires the passlib Python library") module.fail_json(msg="This module requires the passlib Python library")
# Check file for blank lines in effort to avoid "need more than 1 value to unpack" error.
try:
f = open(path, "r")
except IOError:
# No preexisting file to remove blank lines from
f = None
else:
try:
lines = f.readlines()
finally:
f.close()
# If the file gets edited, it returns true, so only edit the file if it has blank lines
strip = False
for line in lines:
if not line.strip():
strip = True
break
if strip:
# If check mode, create a temporary file
if check_mode:
temp = tempfile.NamedTemporaryFile()
path = temp.name
f = open(path, "w")
try:
[ f.write(line) for line in lines if line.strip() ]
finally:
f.close()
try: try:
if state == 'present': if state == 'present':
(msg, changed) = present(path, username, password, crypt_scheme, create, check_mode) (msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)

@ -64,7 +64,7 @@ options:
- The desired state of program/group. - The desired state of program/group.
required: true required: true
default: null default: null
choices: [ "present", "started", "stopped", "restarted" ] choices: [ "present", "started", "stopped", "restarted", "absent" ]
supervisorctl_path: supervisorctl_path:
description: description:
- path to supervisorctl executable - path to supervisorctl executable
@ -103,7 +103,7 @@ def main():
username=dict(required=False), username=dict(required=False),
password=dict(required=False), password=dict(required=False),
supervisorctl_path=dict(required=False), supervisorctl_path=dict(required=False),
state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped']) state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'absent'])
) )
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
@ -194,9 +194,27 @@ def main():
if state == 'restarted': if state == 'restarted':
rc, out, err = run_supervisorctl('update', check_rc=True) rc, out, err = run_supervisorctl('update', check_rc=True)
processes = get_matched_processes() processes = get_matched_processes()
if not processes:
module.fail_json(name=name, msg="ERROR (no such process)")
take_action_on_processes(processes, lambda s: True, 'restart', 'started') take_action_on_processes(processes, lambda s: True, 'restart', 'started')
processes = get_matched_processes() processes = get_matched_processes()
if not processes:
module.fail_json(name=name, msg="ERROR (no such process)")
if state == 'absent':
if len(processes) == 0:
module.exit_json(changed=False, name=name, state=state)
if module.check_mode:
module.exit_json(changed=True)
run_supervisorctl('reread', check_rc=True)
rc, out, err = run_supervisorctl('remove', name)
if '%s: removed process group' % name in out:
module.exit_json(changed=True, name=name, state=state)
else:
module.fail_json(msg=out, name=name, state=state)
if state == 'present': if state == 'present':
if len(processes) > 0: if len(processes) > 0:

@ -18,8 +18,6 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import time
DOCUMENTATION = ''' DOCUMENTATION = '''
--- ---

@ -28,7 +28,7 @@ $result = New-Object PSObject -Property @{
} }
If ($params.name) { If ($params.name) {
$name = $params.name $name = $params.name -split ',' | % { $_.Trim() }
} }
Else { Else {
Fail-Json $result "mising required argument: name" Fail-Json $result "mising required argument: name"

@ -56,7 +56,7 @@ If ( $state -eq "touch" )
} }
Else Else
{ {
echo $null > $file echo $null > $path
} }
$result.changed = $TRUE $result.changed = $TRUE
} }

Loading…
Cancel
Save