merge and resolve conflict in docker.py

reviewable/pr18780/r1
Stefan Junker 10 years ago
commit d8df5da49f

@ -7,7 +7,22 @@ New module submissions for modules that do not yet exist should be submitted to
Take care to submit tickets to the appropriate repo where modules are contained. The docs.ansible.com website indicates this at the bottom of each module documentation page.
Reporting bugs
==============
Take care to submit tickets to the appropriate repo where modules are contained. The repo is mentioned at the bottom of module documentation page at [docs.ansible.com](http://docs.ansible.com/).
Testing modules
===============
Ansible [module development guide](http://docs.ansible.com/developing_modules.html#testing-modules) contains the latest info about that.
License
=======
As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license.
As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license.
Installation
============
There should be no need to install this repo separately as it should be included in any Ansible install using the official documented methods.

@ -130,13 +130,6 @@ except ImportError:
sys.exit(1)
class Region:
def __init__(self, region):
'''connects boto to the region specified in the cloudformation template'''
self.name = region
self.endpoint = 'cloudformation.%s.amazonaws.com' % region
def boto_exception(err):
'''generic error message handler'''
if hasattr(err, 'error_message'):
@ -239,11 +232,10 @@ def main():
stack_outputs = {}
try:
cf_region = Region(region)
cfn = boto.cloudformation.connection.CloudFormationConnection(
aws_access_key_id=aws_access_key,
cfn = boto.cloudformation.connect_to_region(
region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key,
region=cf_region,
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))

@ -61,6 +61,13 @@ options:
required: true
default: null
aliases: []
tenancy:
version_added: "1.9"
description:
- An instance with a tenancy of "dedicated" runs on single-tenant hardware and can only be launched into a VPC. Valid values are "default" or "dedicated". Note that to use dedicated tenancy you MUST specify a vpc_subnet_id as well. Dedicated tenancy is not available for EC2 "micro" instances.
required: false
default: default
aliases: []
spot_price:
version_added: "1.5"
description:
@ -299,6 +306,18 @@ EXAMPLES = '''
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
# Dedicated tenancy example
- local_action:
module: ec2
assign_public_ip: yes
group_id: sg-1dc53f72
key_name: mykey
image: ami-6e649707
instance_type: m1.small
tenancy: dedicated
vpc_subnet_id: subnet-29e63245
wait: yes
# Spot instance example
- ec2:
spot_price: 0.24
@ -476,6 +495,7 @@ try:
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
from boto.exception import EC2ResponseError
from boto.vpc import VPCConnection
except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
@ -571,7 +591,10 @@ def get_instance_info(inst):
'root_device_type': inst.root_device_type,
'root_device_name': inst.root_device_name,
'state': inst.state,
'hypervisor': inst.hypervisor}
'hypervisor': inst.hypervisor,
'tags': inst.tags,
'groups': dict((group.id, group.name) for group in inst.groups),
}
try:
instance_info['virtualization_type'] = getattr(inst,'virtualization_type')
except AttributeError:
@ -582,6 +605,11 @@ def get_instance_info(inst):
except AttributeError:
instance_info['ebs_optimized'] = False
try:
instance_info['tenancy'] = getattr(inst, 'placement_tenancy')
except AttributeError:
instance_info['tenancy'] = 'default'
return instance_info
def boto_supports_associate_public_ip_address(ec2):
@ -651,7 +679,7 @@ def boto_supports_param_in_spot_request(ec2, param):
method = getattr(ec2, 'request_spot_instances')
return param in method.func_code.co_varnames
def enforce_count(module, ec2):
def enforce_count(module, ec2, vpc):
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
@ -676,7 +704,7 @@ def enforce_count(module, ec2):
to_create = exact_count - len(instances)
if not checkmode:
(instance_dict_array, changed_instance_ids, changed) \
= create_instances(module, ec2, override_count=to_create)
= create_instances(module, ec2, vpc, override_count=to_create)
for inst in instance_dict_array:
instances.append(inst)
@ -707,7 +735,7 @@ def enforce_count(module, ec2):
return (all_instances, instance_dict_array, changed_instance_ids, changed)
def create_instances(module, ec2, override_count=None):
def create_instances(module, ec2, vpc, override_count=None):
"""
Creates new instances
@ -725,6 +753,7 @@ def create_instances(module, ec2, override_count=None):
group_id = module.params.get('group_id')
zone = module.params.get('zone')
instance_type = module.params.get('instance_type')
tenancy = module.params.get('tenancy')
spot_price = module.params.get('spot_price')
image = module.params.get('image')
if override_count:
@ -755,25 +784,29 @@ def create_instances(module, ec2, override_count=None):
module.fail_json(msg = str("Use only one type of parameter (group_name) or (group_id)"))
sys.exit(1)
vpc_id = None
if vpc_subnet_id:
vpc_id = vpc.get_all_subnets(subnet_ids=[vpc_subnet_id])[0].vpc_id
else:
vpc_id = None
try:
# Here we try to lookup the group id from the security group name - if group is set.
if group_name:
grp_details = ec2.get_all_security_groups()
if type(group_name) == list:
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
elif type(group_name) == str:
for grp in grp_details:
if str(group_name) in str(grp):
group_id = [str(grp.id)]
if vpc_id:
grp_details = ec2.get_all_security_groups(filters={'vpc_id': vpc_id})
else:
grp_details = ec2.get_all_security_groups()
if isinstance(group_name, basestring):
group_name = [group_name]
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
# Now we try to lookup the group id testing if group exists.
elif group_id:
#wrap the group_id in a list if it's not one already
if type(group_id) == str:
if isinstance(group_id, basestring):
group_id = [group_id]
grp_details = ec2.get_all_security_groups(group_ids=group_id)
grp_item = grp_details[0]
group_name = [grp_item.name]
group_name = [grp_item.name for grp_item in grp_details]
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
@ -808,6 +841,10 @@ def create_instances(module, ec2, override_count=None):
if ebs_optimized:
params['ebs_optimized'] = ebs_optimized
# 'tenancy' always has a default value, but it is not a valid parameter for spot instance resquest
if not spot_price:
params['tenancy'] = tenancy
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
@ -887,6 +924,18 @@ def create_instances(module, ec2, override_count=None):
continue
else:
module.fail_json(msg = str(e))
# The instances returned through ec2.run_instances above can be in
# terminated state due to idempotency. See commit 7f11c3d for a complete
# explanation.
terminated_instances = [
str(instance.id) for instance in res.instances if instance.state == 'terminated'
]
if terminated_instances:
module.fail_json(msg = "Instances with id(s) %s " % terminated_instances +
"were created previously but have since been terminated - " +
"use a (possibly different) 'instanceid' parameter")
else:
if private_ip:
module.fail_json(
@ -924,15 +973,6 @@ def create_instances(module, ec2, override_count=None):
except boto.exception.BotoServerError, e:
module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message))
# The instances returned through run_instances can be in
# terminated state due to idempotency.
terminated_instances = [ str(instance.id) for instance in res.instances
if instance.state == 'terminated' ]
if terminated_instances:
module.fail_json(msg = "Instances with id(s) %s " % terminated_instances +
"were created previously but have since been terminated - " +
"use a (possibly different) 'instanceid' parameter")
# wait here until the instances are up
num_running = 0
wait_timeout = time.time() + wait_timeout
@ -1150,6 +1190,7 @@ def main():
count_tag = dict(),
volumes = dict(type='list'),
ebs_optimized = dict(type='bool', default=False),
tenancy = dict(default='default'),
)
)
@ -1164,6 +1205,20 @@ def main():
ec2 = ec2_connect(module)
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
if region:
try:
vpc = boto.vpc.connect_to_region(
region,
aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key
)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
else:
module.fail_json(msg="region must be specified")
tagged_instances = []
state = module.params.get('state')
@ -1188,9 +1243,9 @@ def main():
module.fail_json(msg='image parameter is required for new instance')
if module.params.get('exact_count') is None:
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2)
(instance_dict_array, new_instance_ids, changed) = create_instances(module, ec2, vpc)
else:
(tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2)
(tagged_instances, instance_dict_array, new_instance_ids, changed) = enforce_count(module, ec2, vpc)
module.exit_json(changed=changed, instance_ids=new_instance_ids, instances=instance_dict_array, tagged_instances=tagged_instances)

@ -242,7 +242,10 @@ def main():
)
module = AnsibleModule(argument_spec=argument_spec)
ec2 = ec2_connect(module)
try:
ec2 = ec2_connect(module)
except Exception, e:
module.json_fail(msg="Error while connecting to aws: %s" % str(e))
if module.params.get('state') == 'absent':
if not module.params.get('image_id'):

@ -57,7 +57,8 @@ options:
required: false
default: us-east-1
choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2",
"eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2", "us-gov-west-1"]
"eu-central-1", "eu-west-1", "sa-east-1", "us-east-1",
"us-west-1", "us-west-2", "us-gov-west-1"]
virt:
description: virutalization type
required: false
@ -89,11 +90,13 @@ SUPPORTED_DISTROS = ['ubuntu']
AWS_REGIONS = ['ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-central-1',
'eu-west-1',
'sa-east-1',
'us-east-1',
'us-west-1',
'us-west-2']
'us-west-2',
"us-gov-west-1"]
def get_url(module, url):

@ -274,7 +274,7 @@ def create_autoscaling_group(connection, module):
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
elif vpc_zone_identifier:
vpc_zone_identifier = ','.join(vpc_zone_identifier)
@ -326,6 +326,8 @@ def create_autoscaling_group(connection, module):
for attr in ASG_ATTRIBUTES:
if module.params.get(attr):
module_attr = module.params.get(attr)
if attr == 'vpc_zone_identifier':
module_attr = ','.join(module_attr)
group_attr = getattr(as_group, attr)
# we do this because AWS and the module may return the same list
# sorted differently

@ -258,7 +258,7 @@ class ElbManager:
try:
elb = connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e))
elbs = elb.get_all_load_balancers()
@ -278,7 +278,7 @@ class ElbManager:
try:
ec2 = connect_to_aws(boto.ec2, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e))
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]

@ -328,7 +328,9 @@ class ElbManager(object):
'security_group_ids': check_elb.security_groups,
'status': self.status,
'subnets': self.subnets,
'scheme': check_elb.scheme
'scheme': check_elb.scheme,
'hosted_zone_name': check_elb.canonical_hosted_zone_name,
'hosted_zone_id': check_elb.canonical_hosted_zone_name_id
}
if check_elb.health_check:
@ -341,7 +343,7 @@ class ElbManager(object):
}
if check_elb.listeners:
info['listeners'] = [l.get_complex_tuple()
info['listeners'] = [self._api_listener_as_tuple(l)
for l in check_elb.listeners]
elif self.status == 'created':
# When creating a new ELB, listeners don't show in the
@ -375,7 +377,7 @@ class ElbManager(object):
try:
return connect_to_aws(boto.ec2.elb, self.region,
**self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
self.module.fail_json(msg=str(e))
def _delete_elb(self):
@ -431,7 +433,7 @@ class ElbManager(object):
# Since ELB allows only one listener on each incoming port, a
# single match on the incomping port is all we're looking for
if existing_listener[0] == listener['load_balancer_port']:
existing_listener_found = existing_listener.get_complex_tuple()
existing_listener_found = self._api_listener_as_tuple(existing_listener)
break
if existing_listener_found:
@ -451,7 +453,7 @@ class ElbManager(object):
# Check for any extraneous listeners we need to remove, if desired
if self.purge_listeners:
for existing_listener in self.elb.listeners:
existing_listener_tuple = existing_listener.get_complex_tuple()
existing_listener_tuple = self._api_listener_as_tuple(existing_listener)
if existing_listener_tuple in listeners_to_remove:
# Already queued for removal
continue
@ -468,6 +470,13 @@ class ElbManager(object):
if listeners_to_add:
self._create_elb_listeners(listeners_to_add)
def _api_listener_as_tuple(self, listener):
"""Adds ssl_certificate_id to ELB API tuple if present"""
base_tuple = listener.get_complex_tuple()
if listener.ssl_certificate_id and len(base_tuple) < 5:
return base_tuple + (listener.ssl_certificate_id,)
return base_tuple
def _listener_as_tuple(self, listener):
"""Formats listener as a 4- or 5-tuples, in the order specified by the
ELB API"""

@ -63,6 +63,7 @@ class Ec2Metadata(object):
AWS_REGIONS = ('ap-northeast-1',
'ap-southeast-1',
'ap-southeast-2',
'eu-central-1',
'eu-west-1',
'sa-east-1',
'us-east-1',

@ -128,7 +128,7 @@ def make_rule_key(prefix, rule, group_id, cidr_ip):
def addRulesToLookup(rules, prefix, dict):
for rule in rules:
for grant in rule.grants:
dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = rule
dict[make_rule_key(prefix, rule, grant.group_id, grant.cidr_ip)] = (rule, grant)
def get_target_from_rule(module, ec2, rule, name, group, groups, vpc_id):
@ -304,14 +304,13 @@ def main():
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules:
for rule in groupRules.itervalues() :
for grant in rule.grants:
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id]
if not module.check_mode:
group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup)
changed = True
for (rule, grant) in groupRules.itervalues() :
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id]
if not module.check_mode:
group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup)
changed = True
# Manage egress rules
groupRules = {}
@ -369,20 +368,19 @@ def main():
# Finally, remove anything left in the groupRules -- these will be defunct rules
if purge_rules_egress:
for rule in groupRules.itervalues():
for grant in rule.grants:
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id].id
if not module.check_mode:
ec2.revoke_security_group_egress(
group_id=group.id,
ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group_id=grantGroup,
cidr_ip=grant.cidr_ip)
changed = True
for (rule, grant) in groupRules.itervalues():
grantGroup = None
if grant.group_id:
grantGroup = groups[grant.group_id].id
if not module.check_mode:
ec2.revoke_security_group_egress(
group_id=group.id,
ip_protocol=rule.ip_protocol,
from_port=rule.from_port,
to_port=rule.to_port,
src_group_id=grantGroup,
cidr_ip=grant.cidr_ip)
changed = True
if group:
module.exit_json(changed=changed, group_id=group.id)

@ -93,7 +93,6 @@ options:
description:
- Used for Auto Scaling groups that launch instances into an Amazon Virtual Private Cloud. Specifies whether to assign a public IP address to each instance launched in a Amazon VPC.
required: false
default: false
aliases: []
version_added: "1.8"
ramdisk_id:
@ -255,7 +254,7 @@ def main():
ebs_optimized=dict(default=False, type='bool'),
associate_public_ip_address=dict(type='bool'),
instance_monitoring=dict(default=False, type='bool'),
assign_public_ip=dict(default=False, type='bool')
assign_public_ip=dict(type='bool')
)
)
@ -265,7 +264,7 @@ def main():
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
state = module.params.get('state')

@ -271,7 +271,7 @@ def main():
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg=str(e))
if state == 'present':

@ -163,9 +163,7 @@ def main():
try:
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
if not connection:
module.fail_json(msg="failed to connect to AWS for the given region: %s" % str(region))
except boto.exception.NoAuthHandlerFound, e:
except (boto.exception.NoAuthHandlerFound, StandardError), e:
module.fail_json(msg = str(e))
if state == 'present':

@ -24,9 +24,9 @@ version_added: "1.1"
options:
instance:
description:
- instance ID if you wish to attach the volume.
- instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
required: false
default: null
default: null
aliases: []
name:
description:
@ -55,7 +55,7 @@ options:
required: false
default: standard
aliases: []
version_added: "1.8"
version_added: "1.9"
iops:
description:
- the provisioned IOPs you want to associate with this volume (integer).
@ -152,12 +152,12 @@ EXAMPLES = '''
image: "{{ image }}"
zone: YYYYYY
id: my_instance
wait: yes
wait: yes
count: 1
register: ec2
- ec2_vol:
instance: "{{ item.id }}"
instance: "{{ item.id }}"
name: my_existing_volume_Name_tag
device_name: /dev/xvdf
with_items: ec2.instances
@ -168,16 +168,20 @@ EXAMPLES = '''
id: vol-XXXXXXXX
state: absent
# Detach a volume (since 1.9)
- ec2_vol:
id: vol-XXXXXXXX
instance: None
# List volumes for an instance
- ec2_vol:
instance: i-XXXXXX
state: list
# Create new volume using SSD storage
- local_action:
module: ec2_vol
instance: XXXXXX
volume_size: 50
- ec2_vol:
instance: XXXXXX
volume_size: 50
volume_type: gp2
device_name: /dev/xvdf
'''
@ -261,15 +265,18 @@ def create_volume(module, ec2, zone):
if iops:
volume_type = 'io1'
if instance == 'None' or instance == '':
instance = None
# If no instance supplied, try volume creation based on module parameters.
if name or id:
if not instance:
module.fail_json(msg = "If name or id is specified, instance must also be specified")
if iops or volume_size:
module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]")
volume = get_volume(module, ec2)
if volume.attachment_state() is not None:
if instance is None:
return volume
adata = volume.attach_data
if adata.instance_id != instance:
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
@ -331,6 +338,13 @@ def attach_volume(module, ec2, volume, instance):
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
def detach_volume(module, ec2):
vol = get_volume(module, ec2)
if not vol or vol.attachment_state() is None:
module.exit_json(changed=False)
else:
vol.detach()
module.exit_json(changed=True)
def main():
argument_spec = ec2_argument_spec()
@ -362,6 +376,9 @@ def main():
snapshot = module.params.get('snapshot')
state = module.params.get('state')
if instance == 'None' or instance == '':
instance = None
ec2 = ec2_connect(module)
if state == 'list':
@ -428,6 +445,8 @@ def main():
volume = create_volume(module, ec2, zone)
if instance:
attach_volume(module, ec2, volume, inst)
else:
detach_volume(module, ec2)
module.exit_json(volume_id=volume.id, device=device_name, volume_type=volume.type)
# import module snippets

@ -357,7 +357,9 @@ class ElastiCacheManager(object):
'modifying': 'available',
'deleting': 'gone'
}
if self.status == awaited_status:
# No need to wait, we're already done
return
if status_map[self.status] != awaited_status:
msg = "Invalid awaited status. '%s' cannot transition to '%s'"
self.module.fail_json(msg=msg % (self.status, awaited_status))

File diff suppressed because it is too large Load Diff

@ -79,8 +79,8 @@ EXAMPLES = '''
- subnet-aaaaaaaa
- subnet-bbbbbbbb
# Remove a parameter group
- rds_param_group:
# Remove a subnet group
- rds_subnet_group:
state: absent
name: norwegian-blue
'''

@ -54,9 +54,23 @@ options:
default: null
aliases: []
choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ]
alias:
description:
- Indicates if this is an alias record.
required: false
version_added: 1.9
default: False
aliases: []
alias_hosted_zone_id:
description:
- The hosted zone identifier.
required: false
version_added: 1.9
default: null
aliases: []
value:
description:
- The new value when creating a DNS record. Multiple comma-spaced values are allowed. When deleting a record all values for the record must be specified or Route53 will not delete it.
- The new value when creating a DNS record. Multiple comma-spaced values are allowed for non-alias records. When deleting a record all values for the record must be specified or Route53 will not delete it.
required: false
default: null
aliases: []
@ -84,6 +98,12 @@ options:
required: false
default: 500
aliases: []
private_zone:
description:
- If set to true, the private zone matching the requested name within the domain will be used if there are both public and private zones. The default is to use the public zone.
required: false
default: false
version_added: "1.9"
requirements: [ "boto" ]
author: Bruce Pennypacker
'''
@ -113,6 +133,7 @@ EXAMPLES = '''
command: delete
zone: foo.com
record: "{{ rec.set.record }}"
ttl: "{{ rec.set.ttl }}"
type: "{{ rec.set.type }}"
value: "{{ rec.set.value }}"
@ -136,6 +157,16 @@ EXAMPLES = '''
ttl: "7200"
value: '"bar"'
# Add an alias record that points to an Amazon ELB:
- route53:
command=create
zone=foo.com
record=elb.foo.com
type=A
value="{{ elb_dns_name }}"
alias=yes
alias_hosted_zone_id="{{ elb_zone_id }}"
'''
@ -167,25 +198,31 @@ def commit(changes, retry_interval):
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['get', 'create', 'delete'], required=True),
zone = dict(required=True),
record = dict(required=True),
ttl = dict(required=False, default=3600),
type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True),
value = dict(required=False),
overwrite = dict(required=False, type='bool'),
retry_interval = dict(required=False, default=500)
command = dict(choices=['get', 'create', 'delete'], required=True),
zone = dict(required=True),
record = dict(required=True),
ttl = dict(required=False, default=3600),
type = dict(choices=['A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'], required=True),
alias = dict(required=False, type='bool'),
alias_hosted_zone_id = dict(required=False),
value = dict(required=False),
overwrite = dict(required=False, type='bool'),
retry_interval = dict(required=False, default=500),
private_zone = dict(required=False, type='bool', default=False),
)
)
module = AnsibleModule(argument_spec=argument_spec)
command_in = module.params.get('command')
zone_in = module.params.get('zone')
ttl_in = module.params.get('ttl')
record_in = module.params.get('record')
type_in = module.params.get('type')
value_in = module.params.get('value')
retry_interval_in = module.params.get('retry_interval')
command_in = module.params.get('command')
zone_in = module.params.get('zone').lower()
ttl_in = module.params.get('ttl')
record_in = module.params.get('record').lower()
type_in = module.params.get('type')
value_in = module.params.get('value')
alias_in = module.params.get('alias')
alias_hosted_zone_id_in = module.params.get('alias_hosted_zone_id')
retry_interval_in = module.params.get('retry_interval')
private_zone_in = module.params.get('private_zone')
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
@ -206,6 +243,11 @@ def main():
if command_in == 'create' or command_in == 'delete':
if not value_in:
module.fail_json(msg = "parameter 'value' required for create/delete")
elif alias_in:
if len(value_list) != 1:
module.fail_json(msg = "parameter 'value' must contain a single dns name for alias create/delete")
elif not alias_hosted_zone_id_in:
module.fail_json(msg = "parameter 'alias_hosted_zone_id' required for alias create/delete")
# connect to the route53 endpoint
try:
@ -217,8 +259,11 @@ def main():
zones = {}
results = conn.get_all_hosted_zones()
for r53zone in results['ListHostedZonesResponse']['HostedZones']:
zone_id = r53zone['Id'].replace('/hostedzone/', '')
zones[r53zone['Name']] = zone_id
# only save this zone id if the private status of the zone matches
# the private_zone_in boolean specified in the params
if module.boolean(r53zone['Config'].get('PrivateZone', False)) == private_zone_in:
zone_id = r53zone['Id'].replace('/hostedzone/', '')
zones[r53zone['Name']] = zone_id
# Verify that the requested zone is already defined in Route53
if not zone_in in zones:
@ -235,7 +280,7 @@ def main():
decoded_name = rset.name.replace(r'\052', '*')
decoded_name = decoded_name.replace(r'\100', '@')
if rset.type == type_in and decoded_name == record_in:
if rset.type == type_in and decoded_name.lower() == record_in.lower():
found_record = True
record['zone'] = zone_in
record['type'] = rset.type
@ -243,6 +288,15 @@ def main():
record['ttl'] = rset.ttl
record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records)
if rset.alias_dns_name:
record['alias'] = True
record['value'] = rset.alias_dns_name
record['values'] = [rset.alias_dns_name]
record['alias_hosted_zone_id'] = rset.alias_hosted_zone_id
else:
record['alias'] = False
record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records)
if value_list == sorted(rset.resource_records) and int(record['ttl']) == ttl_in and command_in == 'create':
module.exit_json(changed=False)
@ -260,12 +314,18 @@ def main():
else:
change = changes.add_change("DELETE", record_in, type_in, record['ttl'])
for v in record['values']:
change.add_value(v)
if record['alias']:
change.set_alias(record['alias_hosted_zone_id'], v)
else:
change.add_value(v)
if command_in == 'create' or command_in == 'delete':
change = changes.add_change(command_in.upper(), record_in, type_in, ttl_in)
for v in value_list:
change.add_value(v)
if module.params['alias']:
change.set_alias(alias_hosted_zone_id_in, v)
else:
change.add_value(v)
try:
result = commit(changes, retry_interval_in)

@ -125,6 +125,8 @@ import os
import urlparse
import hashlib
from boto.s3.connection import OrdinaryCallingFormat
try:
import boto
from boto.s3.connection import Location
@ -321,7 +323,6 @@ def main():
if is_fakes3(s3_url):
try:
fakes3 = urlparse.urlparse(s3_url)
from boto.s3.connection import OrdinaryCallingFormat
s3 = boto.connect_s3(
aws_access_key,
aws_secret_key,
@ -339,20 +340,20 @@ def main():
module.fail_json(msg = str(e))
else:
try:
s3 = boto.connect_s3(aws_access_key, aws_secret_key)
s3 = boto.s3.connect_to_region(location, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key, is_secure=True, calling_format=OrdinaryCallingFormat())
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg = str(e))
# If our mode is a GET operation (download), go through the procedure as appropriate ...
if mode == 'get':
# First, we check to see if the bucket exists, we get "bucket" returned.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
module.fail_json(msg="Target bucket cannot be found", failed=True)
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
keyrtn = key_check(module, s3, bucket, obj)
keyrtn = key_check(module, s3, bucket, obj)
if keyrtn is False:
module.fail_json(msg="Target key cannot be found", failed=True)
@ -376,8 +377,8 @@ def main():
if overwrite is True:
download_s3file(module, s3, bucket, obj, dest)
else:
module.fail_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.", failed=True)
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force download.")
# Firstly, if key_matches is TRUE and overwrite is not enabled, we EXIT with a helpful message.
if sum_matches is True and overwrite is False:
module.exit_json(msg="Local and remote object are identical, ignoring. Use overwrite parameter to force.", changed=False)
@ -387,8 +388,8 @@ def main():
download_s3file(module, s3, bucket, obj, dest)
# If sum does not match but the destination exists, we
# if our mode is a PUT operation (upload), go through the procedure as appropriate ...
# if our mode is a PUT operation (upload), go through the procedure as appropriate ...
if mode == 'put':
# Use this snippet to debug through conditionals:
@ -399,7 +400,7 @@ def main():
pathrtn = path_check(src)
if pathrtn is False:
module.fail_json(msg="Local object for PUT does not exist", failed=True)
# Lets check to see if bucket exists to get ground truth.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is True:
@ -420,7 +421,7 @@ def main():
if overwrite is True:
upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.")
# If neither exist (based on bucket existence), we can create both.
if bucketrtn is False and pathrtn is True:

@ -173,7 +173,20 @@ AZURE_ROLE_SIZES = ['ExtraSmall',
'Basic_A1',
'Basic_A2',
'Basic_A3',
'Basic_A4']
'Basic_A4',
'Standard_D1',
'Standard_D2',
'Standard_D3',
'Standard_D4',
'Standard_D11',
'Standard_D12',
'Standard_D13',
'Standard_D14',
'Standard_G1',
'Standard_G2',
'Sandard_G3',
'Standard_G4',
'Standard_G5']
try:
import azure as windows_azure
@ -281,6 +294,7 @@ def create_virtual_machine(module, azure):
network_config = ConfigurationSetInputEndpoints()
network_config.configuration_set_type = 'NetworkConfiguration'
network_config.subnet_names = []
network_config.public_ips = None
for port in endpoints:
network_config.input_endpoints.append(ConfigurationSetInputEndpoint(name='TCP-%s' % port,
protocol='TCP',
@ -442,6 +456,8 @@ def main():
module.fail_json(msg='location parameter is required for new instance')
if not module.params.get('storage_account'):
module.fail_json(msg='storage_account parameter is required for new instance')
if not module.params.get('password'):
module.fail_json(msg='password parameter is required for new instance')
(changed, public_dns_name, deployment) = create_virtual_machine(module, azure)
module.exit_json(changed=changed, public_dns_name=public_dns_name, deployment=json.loads(json.dumps(deployment, default=lambda o: o.__dict__)))

@ -236,7 +236,9 @@ class Droplet(JsonfyMixIn):
@classmethod
def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False):
json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking, backups_enabled)
private_networking_lower = str(private_networking).lower()
backups_enabled_lower = str(backups_enabled).lower()
json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking_lower, backups_enabled_lower)
droplet = cls(json)
return droplet

File diff suppressed because it is too large Load Diff

@ -115,6 +115,27 @@ options:
required: true
default: "us-central1-a"
aliases: []
ip_forward:
version_added: "1.9"
description:
- set to true if the instance can forward ip packets (useful for gateways)
required: false
default: "false"
aliases: []
external_ip:
version_added: "1.9"
description:
- type of external ip, ephemeral by default
required: false
default: "ephemeral"
aliases: []
disk_auto_delete:
version_added: "1.9"
description:
- if set boot disk will be removed after instance destruction
required: false
default: "true"
aliases: []
requirements: [ "libcloud" ]
notes:
@ -223,6 +244,12 @@ def get_instance_info(inst):
key=lambda disk_info: disk_info['index'])]
else:
disk_names = []
if len(inst.public_ips) == 0:
public_ip = None
else:
public_ip = inst.public_ips[0]
return({
'image': not inst.image is None and inst.image.split('/')[-1] or None,
'disks': disk_names,
@ -231,11 +258,11 @@ def get_instance_info(inst):
'name': inst.name,
'network': netname,
'private_ip': inst.private_ips[0],
'public_ip': inst.public_ips[0],
'public_ip': public_ip,
'status': ('status' in inst.extra) and inst.extra['status'] or None,
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
})
})
def create_instances(module, gce, instance_names):
"""Creates new instances. Attributes other than instance_names are picked
@ -259,6 +286,12 @@ def create_instances(module, gce, instance_names):
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
ip_forward = module.params.get('ip_forward')
external_ip = module.params.get('external_ip')
disk_auto_delete = module.params.get('disk_auto_delete')
if external_ip == "none":
external_ip = None
new_instances = []
changed = False
@ -319,7 +352,8 @@ def create_instances(module, gce, instance_names):
try:
inst = gce.create_node(name, lc_machine_type, lc_image,
location=lc_zone, ex_network=network, ex_tags=tags,
ex_metadata=metadata, ex_boot_disk=pd)
ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete)
changed = True
except ResourceExistsError:
inst = gce.ex_get_node(name, lc_zone)
@ -409,6 +443,10 @@ def main():
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
ip_forward = dict(type='bool', default=False),
external_ip = dict(choices=['ephemeral', 'none'],
default='ephemeral'),
disk_auto_delete = dict(type='bool', default=True),
)
)
@ -424,6 +462,7 @@ def main():
state = module.params.get('state')
tags = module.params.get('tags')
zone = module.params.get('zone')
ip_forward = module.params.get('ip_forward')
changed = False
inames = []

@ -66,6 +66,13 @@ options:
required: false
default: null
aliases: []
target_tags:
version_added: "1.9"
description:
- the target instance tags for creating a firewall rule
required: false
default: null
aliases: []
state:
description:
- desired state of the persistent disk
@ -156,8 +163,9 @@ def main():
ipv4_range = dict(),
fwname = dict(),
name = dict(),
src_range = dict(),
src_range = dict(type='list'),
src_tags = dict(type='list'),
target_tags = dict(type='list'),
state = dict(default='present'),
service_account_email = dict(),
pem_file = dict(),
@ -173,6 +181,7 @@ def main():
name = module.params.get('name')
src_range = module.params.get('src_range')
src_tags = module.params.get('src_tags')
target_tags = module.params.get('target_tags')
state = module.params.get('state')
changed = False
@ -218,7 +227,7 @@ def main():
try:
gce.ex_create_firewall(fwname, allowed_list, network=name,
source_ranges=src_range, source_tags=src_tags)
source_ranges=src_range, source_tags=src_tags, target_tags=target_tags)
changed = True
except ResourceExistsError:
pass
@ -229,6 +238,7 @@ def main():
json_output['allowed'] = allowed
json_output['src_range'] = src_range
json_output['src_tags'] = src_tags
json_output['target_tags'] = target_tags
if state in ['absent', 'deleted']:
if fwname:

@ -108,6 +108,14 @@ options:
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
@ -144,6 +152,7 @@ def main():
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
@ -161,6 +170,7 @@ def main():
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
@ -174,7 +184,7 @@ def main():
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state }
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
@ -233,7 +243,7 @@ def main():
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot)
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:

@ -20,6 +20,7 @@ DOCUMENTATION = '''
---
module: glance_image
version_added: "1.2"
deprecated: Deprecated in 1.9. Use os_image instead
short_description: Add/Delete images from glance
description:
- Add or Remove images from the glance repository.

@ -7,6 +7,7 @@ DOCUMENTATION = '''
---
module: keystone_user
version_added: "1.2"
deprecated: Deprecated in 1.9. Use os_keystone_user instead
short_description: Manage OpenStack Identity (keystone) users, tenants and roles
description:
- Manage users,tenants, roles from OpenStack.

@ -22,7 +22,7 @@ import os
try:
from novaclient.v1_1 import client as nova_client
from novaclient.v1_1 import floating_ips
from novaclient.v1_1 import floating_ips
from novaclient import exceptions
from novaclient import utils
import time
@ -33,6 +33,7 @@ DOCUMENTATION = '''
---
module: nova_compute
version_added: "1.2"
deprecated: Deprecated in 1.9. Use os_server instead
short_description: Create/Delete VMs from OpenStack
description:
- Create or Remove virtual machines from Openstack.
@ -168,6 +169,12 @@ options:
required: false
default: None
version_added: "1.6"
scheduler_hints:
description:
- Arbitrary key/value pairs to the scheduler for custom use
required: false
default: None
version_added: "1.9"
requirements: ["novaclient"]
'''
@ -294,15 +301,15 @@ def _add_floating_ip_from_pool(module, nova, server):
# instantiate FloatingIPManager object
floating_ip_obj = floating_ips.FloatingIPManager(nova)
# empty dict and list
usable_floating_ips = {}
# empty dict and list
usable_floating_ips = {}
pools = []
# user specified
pools = module.params['floating_ip_pools']
# get the list of all floating IPs. Mileage may
# vary according to Nova Compute configuration
# get the list of all floating IPs. Mileage may
# vary according to Nova Compute configuration
# per cloud provider
all_floating_ips = floating_ip_obj.list()
@ -324,7 +331,7 @@ def _add_floating_ip_from_pool(module, nova, server):
try:
new_ip = nova.floating_ips.create(pool)
except Exception, e:
module.fail_json(msg = "Unable to create floating ip")
module.fail_json(msg = "Unable to create floating ip: %s" % (e.message))
pool_ips.append(new_ip.ip)
# Add to the main list
usable_floating_ips[pool] = pool_ips
@ -356,7 +363,7 @@ def _add_auto_floating_ip(module, nova, server):
try:
new_ip = nova.floating_ips.create()
except Exception as e:
module.fail_json(msg = "Unable to create floating ip: %s" % (e.message))
module.fail_json(msg = "Unable to create floating ip: %s" % (e))
try:
server.add_floating_ip(new_ip)
@ -378,9 +385,9 @@ def _add_floating_ip(module, nova, server):
else:
return server
# this may look redundant, but if there is now a
# this may look redundant, but if there is now a
# floating IP, then it needs to be obtained from
# a recent server object if the above code path exec'd
# a recent server object if the above code path exec'd
try:
server = nova.servers.get(server.id)
except Exception, e:
@ -422,7 +429,7 @@ def _create_server(module, nova):
'config_drive': module.params['config_drive'],
}
for optional_param in ('region_name', 'key_name', 'availability_zone'):
for optional_param in ('region_name', 'key_name', 'availability_zone', 'scheduler_hints'):
if module.params[optional_param]:
bootkwargs[optional_param] = module.params[optional_param]
try:
@ -443,7 +450,7 @@ def _create_server(module, nova):
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
# now exit with info
# now exit with info
module.exit_json(changed = True, id = server.id, private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info)
if server.status == 'ERROR':
@ -543,6 +550,7 @@ def main():
auto_floating_ip = dict(default=False, type='bool'),
floating_ips = dict(default=None),
floating_ip_pools = dict(default=None),
scheduler_hints = dict(default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
@ -582,4 +590,3 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()

@ -28,6 +28,7 @@ DOCUMENTATION = '''
---
module: nova_keypair
version_added: "1.2"
deprecated: Deprecated in 1.9. Use os_keypair instead
short_description: Add/Delete key pair from nova
description:
- Add or Remove key pair from nova .

@ -31,6 +31,7 @@ DOCUMENTATION = '''
---
module: quantum_floating_ip
version_added: "1.2"
deprecated: Deprecated in 1.9. Use os_floating_ip instead
short_description: Add/Remove floating IP from an instance
description:
- Add or Remove a floating IP to an instance

@ -31,6 +31,7 @@ DOCUMENTATION = '''
---
module: quantum_floating_ip_associate
version_added: "1.2"
deprecated: Deprecated in 1.9. Use os_floating_ip instead
short_description: Associate or disassociate a particular floating IP with an instance
description:
- Associates or disassociates a specific floating IP with a particular instance

@ -29,6 +29,7 @@ DOCUMENTATION = '''
---
module: quantum_network
version_added: "1.4"
deprecated: Deprecated in 1.9. Use os_network instead
short_description: Creates/Removes networks from OpenStack
description:
- Add or Remove network from OpenStack.

@ -30,6 +30,7 @@ DOCUMENTATION = '''
module: quantum_router
version_added: "1.2"
short_description: Create or Remove router from openstack
deprecated: Deprecated in 1.9. Use os_router instead
description:
- Create or Delete routers from OpenStack
options:

@ -28,6 +28,7 @@ DOCUMENTATION = '''
---
module: quantum_router_gateway
version_added: "1.2"
deprecated: Deprecated in 1.9. Use os_router_gateway instead
short_description: set/unset a gateway interface for the router with the specified external network
description:
- Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic.

@ -27,6 +27,7 @@ except ImportError:
DOCUMENTATION = '''
---
module: quantum_router_interface
deprecated: Deprecated in 1.9. Use os_router_interface instead
version_added: "1.2"
short_description: Attach/Dettach a subnet's interface to a router
description:

@ -29,6 +29,7 @@ DOCUMENTATION = '''
---
module: quantum_subnet
version_added: "1.2"
deprecated: Deprecated in 1.9. Use os_subnet instead
short_description: Add/remove subnet from a network
description:
- Add/remove subnet from a network

@ -35,6 +35,34 @@ options:
- "yes"
- "no"
version_added: 1.5
boot_from_volume:
description:
- Whether or not to boot the instance from a Cloud Block Storage volume.
If C(yes) and I(image) is specified a new volume will be created at
boot time. I(boot_volume_size) is required with I(image) to create a
new volume at boot time.
default: "no"
choices:
- "yes"
- "no"
version_added: 1.9
boot_volume:
description:
- Cloud Block Storage ID or Name to use as the boot volume of the
instance
version_added: 1.9
boot_volume_size:
description:
- Size of the volume to create in Gigabytes. This is only required with
I(image) and I(boot_from_volume).
default: 100
version_added: 1.9
boot_volume_terminate:
description:
- Whether the I(boot_volume) or newly created volume from I(image) will
be terminated when the server is terminated
default: false
version_added: 1.9
config_drive:
description:
- Attach read-only configuration drive to server as label config-2
@ -99,7 +127,9 @@ options:
version_added: 1.4
image:
description:
- image to use for the instance. Can be an C(id), C(human_id) or C(name)
- image to use for the instance. Can be an C(id), C(human_id) or C(name).
With I(boot_from_volume), a Cloud Block Storage volume will be created
with this image
default: null
instance_ids:
description:
@ -210,10 +240,43 @@ except ImportError:
HAS_PYRAX = False
def rax_find_server_image(module, server, image, boot_volume):
if not image and boot_volume:
vol = rax_find_bootable_volume(module, pyrax, server,
exit=False)
if not vol:
return None
volume_image_metadata = vol.volume_image_metadata
vol_image_id = volume_image_metadata.get('image_id')
if vol_image_id:
server_image = rax_find_image(module, pyrax,
vol_image_id, exit=False)
if server_image:
server.image = dict(id=server_image)
# Match image IDs taking care of boot from volume
if image and not server.image:
vol = rax_find_bootable_volume(module, pyrax, server)
volume_image_metadata = vol.volume_image_metadata
vol_image_id = volume_image_metadata.get('image_id')
if not vol_image_id:
return None
server_image = rax_find_image(module, pyrax,
vol_image_id, exit=False)
if image != server_image:
return None
server.image = dict(id=server_image)
elif image and server.image['id'] != image:
return None
return server.image
def create(module, names=[], flavor=None, image=None, meta={}, key_name=None,
files={}, wait=True, wait_timeout=300, disk_config=None,
group=None, nics=[], extra_create_args={}, user_data=None,
config_drive=False, existing=[]):
config_drive=False, existing=[], block_device_mapping_v2=[]):
cs = pyrax.cloudservers
changed = False
@ -239,6 +302,7 @@ def create(module, names=[], flavor=None, image=None, meta={}, key_name=None,
module.fail_json(msg='Failed to load %s' % lpath)
try:
servers = []
bdmv2 = block_device_mapping_v2
for name in names:
servers.append(cs.servers.create(name=name, image=image,
flavor=flavor, meta=meta,
@ -247,9 +311,14 @@ def create(module, names=[], flavor=None, image=None, meta={}, key_name=None,
disk_config=disk_config,
config_drive=config_drive,
userdata=user_data,
block_device_mapping_v2=bdmv2,
**extra_create_args))
except Exception, e:
module.fail_json(msg='%s' % e.message)
if e.message:
msg = str(e.message)
else:
msg = repr(e)
module.fail_json(msg=msg)
else:
changed = True
@ -394,7 +463,9 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
disk_config=None, count=1, group=None, instance_ids=[],
exact_count=False, networks=[], count_offset=0,
auto_increment=False, extra_create_args={}, user_data=None,
config_drive=False):
config_drive=False, boot_from_volume=False,
boot_volume=None, boot_volume_size=None,
boot_volume_terminate=False):
cs = pyrax.cloudservers
cnw = pyrax.cloud_networks
if not cnw:
@ -402,6 +473,26 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present' or (state == 'absent' and instance_ids is None):
if not boot_from_volume and not boot_volume and not image:
module.fail_json(msg='image is required for the "rax" module')
for arg, value in dict(name=name, flavor=flavor).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax" module' %
arg)
if boot_from_volume and not image and not boot_volume:
module.fail_json(msg='image or boot_volume are required for the '
'"rax" with boot_from_volume')
if boot_from_volume and image and not boot_volume_size:
module.fail_json(msg='boot_volume_size is required for the "rax" '
'module with boot_from_volume and image')
if boot_from_volume and image and boot_volume:
image = None
servers = []
# Add the group meta key
@ -438,12 +529,6 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
# act on the state
if state == 'present':
for arg, value in dict(name=name, flavor=flavor,
image=image).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax" module' %
arg)
# Idempotent ensurance of a specific count of servers
if exact_count is not False:
# See if we can find servers that match our options
@ -583,7 +668,6 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
# Perform more simplistic matching
search_opts = {
'name': '^%s$' % name,
'image': image,
'flavor': flavor
}
servers = []
@ -591,6 +675,11 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if not rax_find_server_image(module, server, image,
boot_volume):
continue
# Ignore servers with non matching metadata
if server.metadata != meta:
continue
@ -616,34 +705,57 @@ def cloudservers(module, state=None, name=None, flavor=None, image=None,
# them, we aren't performing auto_increment here
names = [name] * (count - len(servers))
block_device_mapping_v2 = []
if boot_from_volume:
mapping = {
'boot_index': '0',
'delete_on_termination': boot_volume_terminate,
'destination_type': 'volume',
}
if image:
mapping.update({
'uuid': image,
'source_type': 'image',
'volume_size': boot_volume_size,
})
image = None
elif boot_volume:
volume = rax_find_volume(module, pyrax, boot_volume)
mapping.update({
'uuid': pyrax.utils.get_id(volume),
'source_type': 'volume',
})
block_device_mapping_v2.append(mapping)
create(module, names=names, flavor=flavor, image=image,
meta=meta, key_name=key_name, files=files, wait=wait,
wait_timeout=wait_timeout, disk_config=disk_config, group=group,
nics=nics, extra_create_args=extra_create_args,
user_data=user_data, config_drive=config_drive,
existing=servers)
existing=servers,
block_device_mapping_v2=block_device_mapping_v2)
elif state == 'absent':
if instance_ids is None:
# We weren't given an explicit list of server IDs to delete
# Let's match instead
for arg, value in dict(name=name, flavor=flavor,
image=image).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax" '
'module' % arg)
search_opts = {
'name': '^%s$' % name,
'image': image,
'flavor': flavor
}
for server in cs.servers.list(search_opts=search_opts):
# Ignore DELETED servers
if server.status == 'DELETED':
continue
if not rax_find_server_image(module, server, image,
boot_volume):
continue
# Ignore servers with non matching metadata
if meta != server.metadata:
continue
servers.append(server)
# Build a list of server IDs to delete
@ -672,6 +784,10 @@ def main():
argument_spec.update(
dict(
auto_increment=dict(default=True, type='bool'),
boot_from_volume=dict(default=False, type='bool'),
boot_volume=dict(type='str'),
boot_volume_size=dict(type='int', default=100),
boot_volume_terminate=dict(type='bool', default=False),
config_drive=dict(default=False, type='bool'),
count=dict(default=1, type='int'),
count_offset=dict(default=1, type='int'),
@ -712,6 +828,10 @@ def main():
'playbook pertaining to the "rax" module')
auto_increment = module.params.get('auto_increment')
boot_from_volume = module.params.get('boot_from_volume')
boot_volume = module.params.get('boot_volume')
boot_volume_size = module.params.get('boot_volume_size')
boot_volume_terminate = module.params.get('boot_volume_terminate')
config_drive = module.params.get('config_drive')
count = module.params.get('count')
count_offset = module.params.get('count_offset')
@ -757,7 +877,9 @@ def main():
exact_count=exact_count, networks=networks,
count_offset=count_offset, auto_increment=auto_increment,
extra_create_args=extra_create_args, user_data=user_data,
config_drive=config_drive)
config_drive=config_drive, boot_from_volume=boot_from_volume,
boot_volume=boot_volume, boot_volume_size=boot_volume_size,
boot_volume_terminate=boot_volume_terminate)
# import module snippets

@ -28,6 +28,12 @@ options:
description:
- Description to give the volume being created
default: null
image:
description:
- image to use for bootable volumes. Can be an C(id), C(human_id) or
C(name). This option requires C(pyrax>=1.9.3)
default: null
version_added: 1.9
meta:
description:
- A hash of metadata to associate with the volume
@ -99,6 +105,8 @@ EXAMPLES = '''
register: my_volume
'''
from distutils.version import LooseVersion
try:
import pyrax
HAS_PYRAX = True
@ -107,14 +115,8 @@ except ImportError:
def cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout):
for arg in (state, name, size, volume_type):
if not arg:
module.fail_json(msg='%s is required for rax_cbs' % arg)
if size < 100:
module.fail_json(msg='"size" must be greater than or equal to 100')
snapshot_id, volume_type, wait, wait_timeout,
image):
changed = False
volume = None
instance = {}
@ -126,15 +128,26 @@ def cloud_block_storage(module, state, name, description, meta, size,
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if image:
# pyrax<1.9.3 did not have support for specifying an image when
# creating a volume which is required for bootable volumes
if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
module.fail_json(msg='Creating a bootable volume requires '
'pyrax>=1.9.3')
image = rax_find_image(module, pyrax, image)
volume = rax_find_volume(module, pyrax, name)
if state == 'present':
if not volume:
kwargs = dict()
if image:
kwargs['image'] = image
try:
volume = cbs.create(name, size=size, volume_type=volume_type,
description=description,
metadata=meta,
snapshot_id=snapshot_id)
snapshot_id=snapshot_id, **kwargs)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
@ -145,10 +158,7 @@ def cloud_block_storage(module, state, name, description, meta, size,
attempts=attempts)
volume.get()
for key, value in vars(volume).iteritems():
if (isinstance(value, NON_CALLABLES) and
not key.startswith('_')):
instance[key] = value
instance = rax_to_dict(volume)
result = dict(changed=changed, volume=instance)
@ -164,6 +174,7 @@ def cloud_block_storage(module, state, name, description, meta, size,
elif state == 'absent':
if volume:
instance = rax_to_dict(volume)
try:
volume.delete()
changed = True
@ -177,7 +188,8 @@ def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
description=dict(),
description=dict(type='str'),
image=dict(type='str'),
meta=dict(type='dict', default={}),
name=dict(required=True),
size=dict(type='int', default=100),
@ -198,6 +210,7 @@ def main():
module.fail_json(msg='pyrax is required for this module')
description = module.params.get('description')
image = module.params.get('image')
meta = module.params.get('meta')
name = module.params.get('name')
size = module.params.get('size')
@ -210,11 +223,12 @@ def main():
setup_rax_module(module, pyrax)
cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout)
snapshot_id, volume_type, wait, wait_timeout,
image)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
# invoke the module
main()

@ -90,11 +90,6 @@ except ImportError:
def cloud_block_storage_attachments(module, state, volume, server, device,
wait, wait_timeout):
for arg in (state, volume, server, device):
if not arg:
module.fail_json(msg='%s is required for rax_cbs_attachments' %
arg)
cbs = pyrax.cloud_blockstorage
cs = pyrax.cloudservers
@ -133,7 +128,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device,
not key.startswith('_')):
instance[key] = value
result = dict(changed=changed, volume=instance)
result = dict(changed=changed)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
@ -142,6 +137,9 @@ def cloud_block_storage_attachments(module, state, volume, server, device,
pyrax.utils.wait_until(volume, 'status', 'in-use',
interval=5, attempts=attempts)
volume.get()
result['volume'] = rax_to_dict(volume)
if 'msg' in result:
module.fail_json(**result)
else:
@ -167,12 +165,7 @@ def cloud_block_storage_attachments(module, state, volume, server, device,
elif volume.attachments:
module.fail_json(msg='Volume is attached to another server')
for key, value in vars(volume).iteritems():
if (isinstance(value, NON_CALLABLES) and
not key.startswith('_')):
instance[key] = value
result = dict(changed=changed, volume=instance)
result = dict(changed=changed, volume=rax_to_dict(volume))
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id

@ -79,12 +79,6 @@ def find_database(instance, name):
def save_database(module, cdb_id, name, character_set, collate):
for arg, value in dict(cdb_id=cdb_id, name=name).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax_cdb_database" '
'module' % arg)
cdb = pyrax.cloud_databases
try:
@ -111,12 +105,6 @@ def save_database(module, cdb_id, name, character_set, collate):
def delete_database(module, cdb_id, name):
for arg, value in dict(cdb_id=cdb_id, name=name).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax_cdb_database" '
'module' % arg)
cdb = pyrax.cloud_databases
try:
@ -136,7 +124,8 @@ def delete_database(module, cdb_id, name):
else:
changed = True
module.exit_json(changed=changed, action='delete')
module.exit_json(changed=changed, action='delete',
database=rax_to_dict(database))
def rax_cdb_database(module, state, cdb_id, name, character_set, collate):

@ -140,10 +140,6 @@ except ImportError:
def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
vip_type, timeout, wait, wait_timeout, vip_id):
for arg in (state, name, port, protocol, vip_type):
if not arg:
module.fail_json(msg='%s is required for rax_clb' % arg)
if int(timeout) < 30:
module.fail_json(msg='"timeout" must be greater than or equal to 30')
@ -156,7 +152,14 @@ def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
for balancer in clb.list():
balancer_list = clb.list()
while balancer_list:
retrieved = clb.list(marker=balancer_list.pop().id)
balancer_list.extend(retrieved)
if len(retrieved) < 2:
break
for balancer in balancer_list:
if name != balancer.name and name != balancer.id:
continue
@ -257,7 +260,7 @@ def main():
algorithm=dict(choices=CLB_ALGORITHMS,
default='LEAST_CONNECTIONS'),
meta=dict(type='dict', default={}),
name=dict(),
name=dict(required=True),
port=dict(type='int', default=80),
protocol=dict(choices=CLB_PROTOCOLS, default='HTTP'),
state=dict(default='present', choices=['present', 'absent']),

@ -150,21 +150,6 @@ def _get_node(lb, node_id=None, address=None, port=None):
return None
def _is_primary(node):
"""Return True if node is primary and enabled"""
return (node.type.lower() == 'primary' and
node.condition.lower() == 'enabled')
def _get_primary_nodes(lb):
"""Return a list of primary and enabled nodes"""
nodes = []
for node in lb.nodes:
if _is_primary(node):
nodes.append(node)
return nodes
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
@ -230,13 +215,6 @@ def main():
if state == 'absent':
if not node: # Removing a non-existent node
module.exit_json(changed=False, state=state)
# The API detects this as well but currently pyrax does not return a
# meaningful error message
if _is_primary(node) and len(_get_primary_nodes(lb)) == 1:
module.fail_json(
msg='At least one primary node has to be enabled')
try:
lb.delete_node(node)
result = {}
@ -299,5 +277,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
# invoke the module
main()

@ -55,10 +55,6 @@ except ImportError:
def cloud_identity(module, state, identity):
for arg in (state, identity):
if not arg:
module.fail_json(msg='%s is required for rax_identity' % arg)
instance = dict(
authenticated=identity.authenticated,
credentials=identity._creds_file
@ -79,7 +75,7 @@ def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
state=dict(default='present', choices=['present', 'absent'])
state=dict(default='present', choices=['present'])
)
)
@ -95,7 +91,7 @@ def main():
setup_rax_module(module, pyrax)
if pyrax.identity is None:
if not pyrax.identity:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
@ -106,5 +102,5 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
# invoke the module
main()

@ -104,7 +104,7 @@ def rax_keypair(module, name, public_key, state):
keypair = {}
if state == 'present':
if os.path.isfile(public_key):
if public_key and os.path.isfile(public_key):
try:
f = open(public_key)
public_key = f.read()
@ -143,7 +143,7 @@ def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
name=dict(),
name=dict(required=True),
public_key=dict(),
state=dict(default='present', choices=['absent', 'present']),
)

@ -65,10 +65,6 @@ except ImportError:
def cloud_network(module, state, label, cidr):
for arg in (state, label, cidr):
if not arg:
module.fail_json(msg='%s is required for cloud_networks' % arg)
changed = False
network = None
networks = []
@ -79,6 +75,9 @@ def cloud_network(module, state, label, cidr):
'incorrectly capitalized region name.')
if state == 'present':
if not cidr:
module.fail_json(msg='missing required arguments: cidr')
try:
network = pyrax.cloud_networks.find_network_by_label(label)
except pyrax.exceptions.NetworkNotFound:
@ -115,7 +114,7 @@ def main():
dict(
state=dict(default='present',
choices=['present', 'absent']),
label=dict(),
label=dict(required=True),
cidr=dict()
)
)

@ -69,11 +69,13 @@ options:
default: present
choices: ['present', 'powered_off', 'absent', 'powered_on', 'restarted', 'reconfigured']
from_template:
version_added: "1.9"
description:
- Specifies if the VM should be deployed from a template (cannot be ran with state)
default: no
choices: ['yes', 'no']
template_src:
version_added: "1.9"
description:
- Name of the source template to deploy from
default: None
@ -684,7 +686,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
hfmor = dcprops.hostFolder._obj
# virtualmachineFolder managed object reference
if vm_extra_config['folder']:
if vm_extra_config.get('folder'):
if vm_extra_config['folder'] not in vsphere_client._get_managed_objects(MORTypes.Folder).values():
vsphere_client.disconnect()
module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder'])

@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import copy
import sys
import datetime
import traceback
@ -99,12 +100,22 @@ EXAMPLES = '''
creates: /path/to/database
'''
# Dict of options and their defaults
OPTIONS = {'chdir': None,
'creates': None,
'executable': None,
'NO_LOG': None,
'removes': None,
'warn': True,
}
# This is a pretty complex regex, which functions as follows:
#
# 1. (^|\s)
# ^ look for a space or the beginning of the line
# 2. (creates|removes|chdir|executable|NO_LOG)=
# ^ look for a valid param, followed by an '='
# 2. ({options_list})=
# ^ expanded to (chdir|creates|executable...)=
# look for a valid param, followed by an '='
# 3. (?P<quote>[\'"])?
# ^ look for an optional quote character, which can either be
# a single or double quote character, and store it for later
@ -114,8 +125,11 @@ EXAMPLES = '''
# ^ a non-escaped space or a non-escaped quote of the same kind
# that was matched in the first 'quote' is found, or the end of
# the line is reached
PARAM_REGEX = re.compile(r'(^|\s)(creates|removes|chdir|executable|NO_LOG|warn)=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)')
OPTIONS_REGEX = '|'.join(OPTIONS.keys())
PARAM_REGEX = re.compile(
r'(^|\s)(' + OPTIONS_REGEX +
r')=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)'
)
def check_command(commandline):
@ -148,7 +162,7 @@ def main():
args = module.params['args']
creates = module.params['creates']
removes = module.params['removes']
warn = module.params.get('warn', True)
warn = module.params['warn']
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
@ -232,13 +246,8 @@ class CommandModule(AnsibleModule):
def _load_params(self):
''' read the input and return a dictionary and the arguments string '''
args = MODULE_ARGS
params = {}
params['chdir'] = None
params['creates'] = None
params['removes'] = None
params['shell'] = False
params['executable'] = None
params['warn'] = True
params = copy.copy(OPTIONS)
params['shell'] = False
if "#USE_SHELL" in args:
args = args.replace("#USE_SHELL", "")
params['shell'] = True
@ -251,7 +260,7 @@ class CommandModule(AnsibleModule):
# check to see if this is a special parameter for the command
k, v = x.split('=', 1)
v = unquote(v.strip())
if k in ('creates', 'removes', 'chdir', 'executable', 'NO_LOG'):
if k in OPTIONS.keys():
if k == "chdir":
v = os.path.abspath(os.path.expanduser(v))
if not (os.path.exists(v) and os.path.isdir(v)):

@ -102,6 +102,7 @@ EXAMPLES = '''
import ConfigParser
import os
import pipes
import stat
try:
import MySQLdb
except ImportError:
@ -128,7 +129,7 @@ def db_dump(module, host, user, password, db_name, target, port, socket=None):
if socket is not None:
cmd += " --socket=%s" % pipes.quote(socket)
else:
cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port))
cmd += " --host=%s --port=%i" % (pipes.quote(host), port)
cmd += " %s" % pipes.quote(db_name)
if os.path.splitext(target)[-1] == '.gz':
cmd = cmd + ' | gzip > ' + pipes.quote(target)
@ -148,42 +149,42 @@ def db_import(module, host, user, password, db_name, target, port, socket=None):
if socket is not None:
cmd += " --socket=%s" % pipes.quote(socket)
else:
cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port))
cmd += " --host=%s --port=%i" % (pipes.quote(host), port)
cmd += " -D %s" % pipes.quote(db_name)
if os.path.splitext(target)[-1] == '.gz':
gunzip_path = module.get_bin_path('gunzip')
if gunzip_path:
rc, stdout, stderr = module.run_command('%s %s' % (gunzip_path, target))
if rc != 0:
return rc, stdout, stderr
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0])
gzip_path = module.get_bin_path('gzip')
if not gzip_path:
module.fail_json(msg="gzip command not found")
#gzip -d file (uncompress)
rc, stdout, stderr = module.run_command('%s -d %s' % (gzip_path, target))
if rc != 0:
return rc, stdout, stderr
#Import sql
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0])
try:
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
return rc, stdout, stderr
gzip_path = module.get_bin_path('gzip')
if gzip_path:
rc, stdout, stderr = module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0]))
else:
module.fail_json(msg="gzip command not found")
else:
module.fail_json(msg="gunzip command not found")
finally:
#gzip file back up
module.run_command('%s %s' % (gzip_path, os.path.splitext(target)[0]))
elif os.path.splitext(target)[-1] == '.bz2':
bunzip2_path = module.get_bin_path('bunzip2')
if bunzip2_path:
rc, stdout, stderr = module.run_command('%s %s' % (bunzip2_path, target))
if rc != 0:
return rc, stdout, stderr
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0])
bzip2_path = module.get_bin_path('bzip2')
if not bzip2_path:
module.fail_json(msg="bzip2 command not found")
#bzip2 -d file (uncompress)
rc, stdout, stderr = module.run_command('%s -d %s' % (bzip2_path, target))
if rc != 0:
return rc, stdout, stderr
#Import sql
cmd += " < %s" % pipes.quote(os.path.splitext(target)[0])
try:
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
return rc, stdout, stderr
bzip2_path = module.get_bin_path('bzip2')
if bzip2_path:
rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0]))
else:
module.fail_json(msg="bzip2 command not found")
else:
module.fail_json(msg="bunzip2 command not found")
finally:
#bzip2 file back up
rc, stdout, stderr = module.run_command('%s %s' % (bzip2_path, os.path.splitext(target)[0]))
else:
cmd += " < %s" % pipes.quote(target)
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
@ -265,7 +266,7 @@ def main():
login_user=dict(default=None),
login_password=dict(default=None),
login_host=dict(default="localhost"),
login_port=dict(default="3306"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
name=dict(required=True, aliases=['db']),
encoding=dict(default=""),
@ -283,6 +284,10 @@ def main():
collation = module.params["collation"]
state = module.params["state"]
target = module.params["target"]
socket = module.params["login_unix_socket"]
login_port = module.params["login_port"]
if login_port < 0 or login_port > 65535:
module.fail_json(msg="login_port must be a valid unix port number (0-65535)")
# make sure the target path is expanded for ~ and $HOME
if target is not None:
@ -310,21 +315,27 @@ def main():
module.fail_json(msg="with state=%s target is required" % (state))
connect_to_db = db
else:
connect_to_db = 'mysql'
connect_to_db = ''
try:
if module.params["login_unix_socket"]:
db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db=connect_to_db)
elif module.params["login_port"] != "3306" and module.params["login_host"] == "localhost":
if socket:
try:
socketmode = os.stat(socket).st_mode
if not stat.S_ISSOCK(socketmode):
module.fail_json(msg="%s, is not a socket, unable to connect" % socket)
except OSError:
module.fail_json(msg="%s, does not exist, unable to connect" % socket)
db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=socket, user=login_user, passwd=login_password, db=connect_to_db)
elif login_port != 3306 and module.params["login_host"] == "localhost":
module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined")
else:
db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db=connect_to_db)
db_connection = MySQLdb.connect(host=module.params["login_host"], port=login_port, user=login_user, passwd=login_password, db=connect_to_db)
cursor = db_connection.cursor()
except Exception, e:
if "Unknown database" in str(e):
errno, errstr = e.args
module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
else:
module.fail_json(msg="unable to connect, check login_user and login_password are correct, or alternatively check ~/.my.cnf contains credentials")
module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running")
changed = False
if db_exists(cursor, db):
@ -336,7 +347,7 @@ def main():
elif state == "dump":
rc, stdout, stderr = db_dump(module, login_host, login_user,
login_password, db, target,
port=module.params['login_port'],
port=login_port,
socket=module.params['login_unix_socket'])
if rc != 0:
module.fail_json(msg="%s" % stderr)
@ -345,7 +356,7 @@ def main():
elif state == "import":
rc, stdout, stderr = db_import(module, login_host, login_user,
login_password, db, target,
port=module.params['login_port'],
port=login_port,
socket=module.params['login_unix_socket'])
if rc != 0:
module.fail_json(msg="%s" % stderr)

@ -117,6 +117,9 @@ EXAMPLES = """
# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION'
- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present
# Modifiy user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
- mysql_user: name=bob append_privs=true priv=*.*:REQUIRESSL state=present
# Ensure no user named 'sally' exists, also passing in the auth credentials.
- mysql_user: login_user=root login_password=123456 name=sally state=absent
@ -159,7 +162,7 @@ VALID_PRIVS = frozenset(('CREATE', 'DROP', 'GRANT', 'GRANT OPTION',
'EXECUTE', 'FILE', 'CREATE TABLESPACE', 'CREATE USER',
'PROCESS', 'PROXY', 'RELOAD', 'REPLICATION CLIENT',
'REPLICATION SLAVE', 'SHOW DATABASES', 'SHUTDOWN',
'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE',))
'SUPER', 'ALL', 'ALL PRIVILEGES', 'USAGE', 'REQUIRESSL'))
class InvalidPrivsError(Exception):
pass
@ -261,6 +264,8 @@ def privileges_get(cursor, user,host):
privileges = [ pick(x) for x in privileges]
if "WITH GRANT OPTION" in res.group(4):
privileges.append('GRANT')
if "REQUIRE SSL" in res.group(4):
privileges.append('REQUIRESSL')
db = res.group(2)
output[db] = privileges
return output
@ -294,6 +299,11 @@ def privileges_unpack(priv):
if '*.*' not in output:
output['*.*'] = ['USAGE']
# if we are only specifying something like REQUIRESSL in *.* we still need
# to add USAGE as a privilege to avoid syntax errors
if priv.find('REQUIRESSL') != -1 and 'USAGE' not in output['*.*']:
output['*.*'].append('USAGE')
return output
def privileges_revoke(cursor, user,host,db_table,grant_option):
@ -313,15 +323,16 @@ def privileges_grant(cursor, user,host,db_table,priv):
# Escape '%' since mysql db.execute uses a format string and the
# specification of db and table often use a % (SQL wildcard)
db_table = db_table.replace('%', '%%')
priv_string = ",".join(filter(lambda x: x != 'GRANT', priv))
priv_string = ",".join(filter(lambda x: x not in [ 'GRANT', 'REQUIRESSL' ], priv))
query = ["GRANT %s ON %s" % (priv_string, mysql_quote_identifier(db_table, 'table'))]
query.append("TO %s@%s")
if 'GRANT' in priv:
query.append("WITH GRANT OPTION")
if 'REQUIRESSL' in priv:
query.append("REQUIRE SSL")
query = ' '.join(query)
cursor.execute(query, (user, host))
def strip_quotes(s):
""" Remove surrounding single or double quotes
@ -413,7 +424,7 @@ def connect(module, login_user, login_password):
if module.params["login_unix_socket"]:
db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql")
else:
db_connection = MySQLdb.connect(host=module.params["login_host"], port=int(module.params["login_port"]), user=login_user, passwd=login_password, db="mysql")
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql")
return db_connection.cursor()
# ===========================================
@ -426,7 +437,7 @@ def main():
login_user=dict(default=None),
login_password=dict(default=None),
login_host=dict(default="localhost"),
login_port=dict(default="3306"),
login_port=dict(default=3306, type='int'),
login_unix_socket=dict(default=None),
user=dict(required=True, aliases=['name']),
password=dict(default=None),
@ -487,16 +498,14 @@ def main():
if user_exists(cursor, user, host):
try:
changed = user_mod(cursor, user, host, password, priv, append_privs)
except SQLParseError, e:
module.fail_json(msg=str(e))
except InvalidPrivsError, e:
except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e:
module.fail_json(msg=str(e))
else:
if password is None:
module.fail_json(msg="password parameter required when adding a user")
try:
changed = user_add(cursor, user, host, password, priv)
except SQLParseError, e:
except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e:
module.fail_json(msg=str(e))
elif state == "absent":
if user_exists(cursor, user, host):

@ -275,7 +275,7 @@ def main():
kw["host"] = module.params["login_unix_socket"]
try:
db_connection = psycopg2.connect(database="template1", **kw)
db_connection = psycopg2.connect(database="postgres", **kw)
# Enable autocommit so we can create databases
if psycopg2.__version__ >= '2.4.2':
db_connection.autocommit = True

@ -474,10 +474,13 @@ class Connection(object):
if obj_type == 'group':
set_what = ','.join(pg_quote_identifier(i, 'role') for i in obj_ids)
else:
# function types are already quoted above
if obj_type != 'function':
obj_ids = [pg_quote_identifier(i, 'table') for i in obj_ids]
# Note: obj_type has been checked against a set of string literals
# and privs was escaped when it was parsed
set_what = '%s ON %s %s' % (','.join(privs), obj_type,
','.join(pg_quote_identifier(i, 'table') for i in obj_ids))
','.join(obj_ids))
# for_whom: SQL-fragment specifying for whom to set the above
if roles == 'PUBLIC':

@ -102,6 +102,14 @@ EXAMPLES = '''
register: acl_info
'''
RETURN = '''
acl:
description: Current acl on provided path (after changes, if any)
returned: success
type: list
sample: [ "user::rwx", "group::rwx", "other::rwx" ]
'''
def normalize_permissions(p):
perms = ['-','-','-']
for char in p:
@ -111,6 +119,9 @@ def normalize_permissions(p):
perms[1] = 'w'
if char == 'x':
perms[2] = 'x'
if char == 'X':
if perms[2] != 'x': # 'x' is more permissive
perms[2] = 'X'
return ''.join(perms)
def split_entry(entry):

@ -108,6 +108,68 @@ EXAMPLES = '''
- copy: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s'
'''
RETURN = '''
dest:
description: destination file/path
returned: success
type: string
sample: "/path/to/file.txt"
src:
description: source file used for the copy on the target machine
returned: changed
type: string
sample: "/home/httpd/.ansible/tmp/ansible-tmp-1423796390.97-147729857856000/source"
md5sum:
description: md5 checksum of the file after running copy
returned: when supported
type: string
sample: "2a5aeecc61dc98c4d780b14b330e3282",
checksum:
description: checksum of the file after running copy
returned: success
type: string
sample: "6e642bb8dd5c2e027bf21dd923337cbb4214f827"
backup_file:
description: name of backup file created
returned: changed and if backup=yes
type: string
sample: "/path/to/file.txt.2015-02-12@22:09~"
gid:
description: group id of the file, after execution
returned: success
type: int
sample: 100
group:
description: group of the file, after execution
returned: success
type: string
sample: "httpd"
owner:
description: owner of the file, after execution
returned: success
type: string
sample: "httpd"
uid: 100
description: owner id of the file, after execution
returned: success
type: int
sample: 100
mode:
description: permissions of the target, after execution
returned: success
type: string
sample: "0644"
size:
description: size of the target, after execution
returned: success
type: int
sample: 1220
state:
description: permissions of the target, after execution
returned: success
type: string
sample: "file"
'''
def split_pre_existing_dir(dirname):
'''
@ -181,7 +243,7 @@ def main():
if original_basename and dest.endswith("/"):
dest = os.path.join(dest, original_basename)
dirname = os.path.dirname(dest)
if not os.path.exists(dirname) and '/' in dirname:
if not os.path.exists(dirname) and os.path.isabs(dirname):
(pre_existing_dir, new_directory_list) = split_pre_existing_dir(dirname)
os.makedirs(dirname)
directory_args = module.load_file_common_arguments(module.params)

@ -45,10 +45,10 @@ options:
flat:
version_added: "1.2"
description:
Allows you to override the default behavior of prepending hostname/path/to/file to
the destination. If dest ends with '/', it will use the basename of the source
file, similar to the copy module. Obviously this is only handy if the filenames
are unique.
- Allows you to override the default behavior of prepending
hostname/path/to/file to the destination. If dest ends with '/', it
will use the basename of the source file, similar to the copy module.
Obviously this is only handy if the filenames are unique.
requirements: []
author: Michael DeHaan
'''

@ -57,7 +57,7 @@ options:
or M(template) module if you want that behavior. If C(link), the symbolic
link will be created or changed. Use C(hard) for hardlinks. If C(absent),
directories will be recursively deleted, and files or symlinks will be unlinked.
If C(touch) (new in 1.4), an empty file will be created if the c(path) does not
If C(touch) (new in 1.4), an empty file will be created if the C(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way `touch` works from the command line).
required: false
@ -88,6 +88,7 @@ options:
'''
EXAMPLES = '''
# change file ownership, group and mode. When specifying mode using octal numbers, first digit should always be 0.
- file: path=/etc/foo.conf owner=foo group=foo mode=0644
- file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link
- file: src=/tmp/{{ item.path }} dest={{ item.dest }} state=link
@ -103,6 +104,45 @@ EXAMPLES = '''
'''
def get_state(path):
''' Find out current state '''
if os.path.lexists(path):
if os.path.islink(path):
return 'link'
elif os.path.isdir(path):
return 'directory'
elif os.stat(path).st_nlink > 1:
return 'hard'
else:
# could be many other things, but defaulting to file
return 'file'
return 'absent'
def recursive_set_attributes(module, path, follow, file_args):
changed = False
for root, dirs, files in os.walk(path):
for fsobj in dirs + files:
fsname = os.path.join(root, fsobj)
if not os.path.islink(fsname):
tmp_file_args = file_args.copy()
tmp_file_args['path']=fsname
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed)
else:
tmp_file_args = file_args.copy()
tmp_file_args['path']=fsname
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed)
if follow:
fsname = os.path.join(root, os.readlink(fsname))
if os.path.isdir(fsname):
changed |= recursive_set_attributes(module, fsname, follow, file_args)
tmp_file_args = file_args.copy()
tmp_file_args['path']=fsname
changed |= module.set_fs_attributes_if_different(tmp_file_args, changed)
return changed
def main():
module = AnsibleModule(
@ -143,18 +183,7 @@ def main():
pass
module.exit_json(path=path, changed=False, appears_binary=appears_binary)
# Find out current state
prev_state = 'absent'
if os.path.lexists(path):
if os.path.islink(path):
prev_state = 'link'
elif os.path.isdir(path):
prev_state = 'directory'
elif os.stat(path).st_nlink > 1:
prev_state = 'hard'
else:
# could be many other things, but defaulting to file
prev_state = 'file'
prev_state = get_state(path)
# state should default to file, but since that creates many conflicts,
# default to 'current' when it exists.
@ -172,7 +201,7 @@ def main():
if state in ['link','hard']:
if follow and state == 'link':
# use the current target of the link as the source
src = os.readlink(path)
src = os.path.realpath(path)
else:
module.fail_json(msg='src and dest are required for creating links')
@ -212,7 +241,15 @@ def main():
module.exit_json(path=path, changed=False)
elif state == 'file':
if state != prev_state:
if follow and prev_state == 'link':
# follow symlink and operate on original
path = os.path.realpath(path)
prev_state = get_state(path)
file_args['path'] = path
if prev_state not in ['file','hard']:
# file is not absent and any other state is a conflict
module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state))
@ -220,6 +257,10 @@ def main():
module.exit_json(path=path, changed=changed)
elif state == 'directory':
if follow and prev_state == 'link':
path = os.path.realpath(path)
prev_state = get_state(path)
if prev_state == 'absent':
if module.check_mode:
module.exit_json(changed=True)
@ -247,12 +288,7 @@ def main():
changed = module.set_fs_attributes_if_different(file_args, changed)
if recurse:
for root,dirs,files in os.walk( file_args['path'] ):
for fsobj in dirs + files:
fsname=os.path.join(root, fsobj)
tmp_file_args = file_args.copy()
tmp_file_args['path']=fsname
changed = module.set_fs_attributes_if_different(tmp_file_args, changed)
changed |= recursive_set_attributes(module, file_args['path'], follow, file_args)
module.exit_json(path=path, changed=changed)
@ -336,13 +372,13 @@ def main():
open(path, 'w').close()
except OSError, e:
module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e))
elif prev_state in ['file', 'directory']:
elif prev_state in ['file', 'directory', 'hard']:
try:
os.utime(path, None)
except OSError, e:
module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e))
else:
module.fail_json(msg='Cannot touch other than files and directories')
module.fail_json(msg='Cannot touch other than files, directories, and hardlinks (%s is %s)' % (path, prev_state))
try:
module.set_fs_attributes_if_different(file_args, True)
except SystemExit, e:
@ -360,5 +396,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

@ -97,9 +97,9 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese
changed = False
if (sys.version_info[0] == 2 and sys.version_info[1] >= 7) or sys.version_info[0] >= 3:
cp = ConfigParser.ConfigParser(allow_no_value=True)
cp = ConfigParser.ConfigParser(allow_no_value=True)
else:
cp = ConfigParser.ConfigParser()
cp = ConfigParser.ConfigParser()
cp.optionxform = identity
try:
@ -126,7 +126,7 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese
if state == 'present':
# DEFAULT section is always there by DEFAULT, so never try to add it.
if cp.has_section(section) == False and section.upper() != 'DEFAULT':
if not cp.has_section(section) and section.upper() != 'DEFAULT':
cp.add_section(section)
changed = True
@ -144,7 +144,7 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese
cp.set(section, option, value)
changed = True
if changed:
if changed and not module.check_mode:
if backup:
module.backup_local(filename)
@ -152,7 +152,7 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese
f = open(filename, 'w')
cp.write(f)
except:
module.fail_json(msg="Can't creat %s" % filename)
module.fail_json(msg="Can't create %s" % filename)
return changed
@ -183,7 +183,8 @@ def main():
backup = dict(default='no', type='bool'),
state = dict(default='present', choices=['present', 'absent'])
),
add_file_common_args = True
add_file_common_args = True,
supports_check_mode = True
)
info = dict()

@ -85,8 +85,9 @@ options:
default: EOF
description:
- Used with C(state=present). If specified, the line will be inserted
after the specified regular expression. A special value is
available; C(EOF) for inserting the line at the end of the file.
after the last match of specified regular expression. A special value is
available; C(EOF) for inserting the line at the end of the file.
If specified regular expresion has no matches, EOF will be used instead.
May not be used with C(backrefs).
choices: [ 'EOF', '*regex*' ]
insertbefore:
@ -94,9 +95,10 @@ options:
version_added: "1.1"
description:
- Used with C(state=present). If specified, the line will be inserted
before the specified regular expression. A value is available;
C(BOF) for inserting the line at the beginning of the file.
May not be used with C(backrefs).
before the last match of specified regular expression. A value is
available; C(BOF) for inserting the line at the beginning of the file.
If specified regular expresion has no matches, the line will be
inserted at the end of the file. May not be used with C(backrefs).
choices: [ 'BOF', '*regex*' ]
create:
required: false
@ -256,9 +258,9 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
msg = 'line added'
changed = True
# Add it to the end of the file if requested or
# if insertafter=/insertbefore didn't match anything
# if insertafter/insertbefore didn't match anything
# (so default behaviour is to add at the end)
elif insertafter == 'EOF':
elif insertafter == 'EOF' or index[1] == -1:
# If the file is not empty then ensure there's a newline before the added line
if len(lines)>0 and not (lines[-1].endswith('\n') or lines[-1].endswith('\r')):
@ -267,9 +269,6 @@ def present(module, dest, regexp, line, insertafter, insertbefore, create,
lines.append(line + os.linesep)
msg = 'line added'
changed = True
# Do nothing if insert* didn't match
elif index[1] == -1:
pass
# insert* matched, but not the regexp
else:
lines.insert(index[1], line + os.linesep)

@ -152,6 +152,8 @@ def main():
if changed and not module.check_mode:
if params['backup'] and os.path.exists(dest):
module.backup_local(dest)
if params['follow'] and os.path.islink(dest):
dest = os.path.realpath(dest)
write_changes(module, result[0], dest)
msg, changed = check_file_attrs(module, changed, msg)

@ -69,6 +69,189 @@ EXAMPLES = '''
- stat: path=/path/to/myhugefile get_md5=no
'''
RETURN = '''
stat:
description: dictionary containing all the stat data
returned: success
type: dictionary
contains:
exists:
description: if the destination path actually exists or not
returned: success
type: boolean
sample: True
path:
description: The full path of the file/object to get the facts of
returned: success and if path exists
type: boolean
sample: '/path/to/file'
mode:
description: Unix permissions of the file in octal
returned: success, path exists and user can read stats
type: octal
sample: 1755
isdir:
description: Tells you if the path is a directory
returned: success, path exists and user can read stats
type: boolean
sample: False
ischr:
description: Tells you if the path is a character device
returned: success, path exists and user can read stats
type: boolean
sample: False
isblk:
description: Tells you if the path is a block device
returned: success, path exists and user can read stats
type: boolean
sample: False
isreg:
description: Tells you if the path is a regular file
returned: success, path exists and user can read stats
type: boolean
sample: True
isfifo:
description: Tells you if the path is a named pipe
returned: success, path exists and user can read stats
type: boolean
sample: False
islnk:
description: Tells you if the path is a symbolic link
returned: success, path exists and user can read stats
type: boolean
sample: False
issock:
description: Tells you if the path is a unix domain socket
returned: success, path exists and user can read stats
type: boolean
sample: False
uid:
description: Numeric id representing the file owner
returned: success, path exists and user can read stats
type: int
sample: 1003
gid:
description: Numeric id representing the group of the owner
returned: success, path exists and user can read stats
type: int
sample: 1003
size:
description: Size in bytes for a plain file, ammount of data for some special files
returned: success, path exists and user can read stats
type: int
sample: 203
inode:
description: Inode number of the path
returned: success, path exists and user can read stats
type: int
sample: 12758
dev:
description: Device the inode resides on
returned: success, path exists and user can read stats
type: int
sample: 33
nlink:
description: Number of links to the inode (hard links)
returned: success, path exists and user can read stats
type: int
sample: 1
atime:
description: Time of last access
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
mtime:
description: Time of last modification
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
ctime:
description: Time of last metadata update or creation (depends on OS)
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
wusr:
description: Tells you if the owner has write permission
returned: success, path exists and user can read stats
type: boolean
sample: True
rusr:
description: Tells you if the owner has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xusr:
description: Tells you if the owner has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
wgrp:
description: Tells you if the owner's group has write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
rgrp:
description: Tells you if the owner's group has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xgrp:
description: Tells you if the owner's group has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
woth:
description: Tells you if others have write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
roth:
description: Tells you if others have read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xoth:
description: Tells you if others have execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
isuid:
description: Tells you if the invoking user's id matches the owner's id
returned: success, path exists and user can read stats
type: boolean
sample: False
isgid:
description: Tells you if the invoking user's group id matches the owner's group id
returned: success, path exists and user can read stats
type: boolean
sample: False
lnk_source:
description: Original path
returned: success, path exists and user can read stats and the path is a symbolic link
type: boolean
sample: True
md5:
description: md5 hash of the path
returned: success, path exists and user can read stats and path supports hashing and md5 is supported
type: boolean
sample: True
checksum:
description: hash of the path
returned: success, path exists and user can read stats and path supports hashing
type: boolean
sample: True
pw_name:
description: User name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: httpd
gr_name:
description: Group name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: www-data
'''
import os
import sys
from stat import *

@ -255,7 +255,7 @@ def main():
group = module.params['group']
rsync_opts = module.params['rsync_opts']
cmd = '%s --delay-updates -FF' % rsync
cmd = '%s --delay-updates -F' % rsync
if compress:
cmd = cmd + ' --compress'
if rsync_timeout:

@ -23,14 +23,14 @@ DOCUMENTATION = '''
---
module: unarchive
version_added: 1.4
short_description: Copies an archive to a remote location and unpack it
short_description: Unpacks an archive after (optionally) copying it from the local machine.
extends_documentation_fragment: files
description:
- The M(unarchive) module copies an archive file from the local machine to a remote and unpacks it.
- The M(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set copy=no to unpack an archive which already exists on the target..
options:
src:
description:
- Local path to archive file to copy to the remote server; can be absolute or relative.
- If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack.
required: true
default: null
dest:
@ -40,7 +40,7 @@ options:
default: null
copy:
description:
- "if true, the file is copied from the 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine."
- "If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine."
required: false
choices: [ "yes", "no" ]
default: "yes"
@ -76,18 +76,35 @@ EXAMPLES = '''
'''
import os
from zipfile import ZipFile
class UnarchiveError(Exception):
pass
# class to handle .zip files
class ZipFile(object):
class ZipArchive(object):
def __init__(self, src, dest, module):
self.src = src
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('unzip')
self._files_in_archive = []
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
archive = ZipFile(self.src)
try:
self._files_in_archive = archive.namelist()
except:
raise UnarchiveError('Unable to list files in the archive')
return self._files_in_archive
def is_unarchived(self):
def is_unarchived(self, mode, owner, group):
return dict(unarchived=False)
def unarchive(self):
@ -106,19 +123,61 @@ class ZipFile(object):
# class to handle gzipped tar files
class TgzFile(object):
class TgzArchive(object):
def __init__(self, src, dest, module):
self.src = src
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('tar')
# Prefer gtar (GNU tar) as it supports the compression options -zjJ
self.cmd_path = self.module.get_bin_path('gtar', None)
if not self.cmd_path:
# Fallback to tar
self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = 'z'
self._files_in_archive = []
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
def is_unarchived(self):
cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src)
cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src)
rc, out, err = self.module.run_command(cmd)
if rc != 0:
raise UnarchiveError('Unable to list files in the archive')
for filename in out.splitlines():
if filename:
self._files_in_archive.append(filename)
return self._files_in_archive
def is_unarchived(self, mode, owner, group):
cmd = '%s -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src)
rc, out, err = self.module.run_command(cmd)
unarchived = (rc == 0)
if not unarchived:
# Check whether the differences are in something that we're
# setting anyway
# What will be set
to_be_set = set()
for perm in (('Mode', mode), ('Gid', group), ('Uid', owner)):
if perm[1] is not None:
to_be_set.add(perm[0])
# What is different
changes = set()
difference_re = re.compile(r': (.*) differs$')
for line in out.splitlines():
match = difference_re.search(line)
if not match:
# Unknown tar output. Assume we have changes
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
changes.add(match.groups()[0])
if changes and changes.issubset(to_be_set):
unarchived = True
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
def unarchive(self):
@ -129,47 +188,41 @@ class TgzFile(object):
def can_handle_archive(self):
if not self.cmd_path:
return False
cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src)
rc, out, err = self.module.run_command(cmd)
if rc == 0:
if len(out.splitlines(True)) > 0:
try:
if self.files_in_archive:
return True
except UnarchiveError:
pass
# Errors and no files in archive assume that we weren't able to
# properly unarchive it
return False
# class to handle tar files that aren't compressed
class TarFile(TgzFile):
class TarArchive(TgzArchive):
def __init__(self, src, dest, module):
self.src = src
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('tar')
super(TarArchive, self).__init__(src, dest, module)
self.zipflag = ''
# class to handle bzip2 compressed tar files
class TarBzip(TgzFile):
class TarBzipArchive(TgzArchive):
def __init__(self, src, dest, module):
self.src = src
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('tar')
super(TarBzipArchive, self).__init__(src, dest, module)
self.zipflag = 'j'
# class to handle xz compressed tar files
class TarXz(TgzFile):
class TarXzArchive(TgzArchive):
def __init__(self, src, dest, module):
self.src = src
self.dest = dest
self.module = module
self.cmd_path = self.module.get_bin_path('tar')
super(TarXzArchive, self).__init__(src, dest, module)
self.zipflag = 'J'
# try handlers in order and return the one that works or bail if none work
def pick_handler(src, dest, module):
handlers = [TgzFile, ZipFile, TarFile, TarBzip, TarXz]
handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive]
for handler in handlers:
obj = handler(src, dest, module)
if obj.can_handle_archive():
@ -193,6 +246,7 @@ def main():
src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest'])
copy = module.params['copy']
file_args = module.load_file_common_arguments(module.params)
# did tar file arrive?
if not os.path.exists(src):
@ -214,23 +268,29 @@ def main():
res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
# do we need to do unpack?
res_args['check_results'] = handler.is_unarchived()
res_args['check_results'] = handler.is_unarchived(file_args['mode'],
file_args['owner'], file_args['group'])
if res_args['check_results']['unarchived']:
res_args['changed'] = False
module.exit_json(**res_args)
# do the unpack
try:
res_args['extract_results'] = handler.unarchive()
if res_args['extract_results']['rc'] != 0:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
except IOError:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest))
else:
# do the unpack
try:
res_args['extract_results'] = handler.unarchive()
if res_args['extract_results']['rc'] != 0:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
except IOError:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest))
else:
res_args['changed'] = True
res_args['changed'] = True
# do we need to change perms?
for filename in handler.files_in_archive:
file_args['path'] = os.path.join(dest, filename)
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])
module.exit_json(**res_args)
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

@ -5,7 +5,7 @@ DOCUMENTATION = '''
module: add_host
short_description: add a host (and alternatively a group) to the ansible-playbook in-memory inventory
description:
- Use variables to create new hosts and groups in inventory for use in later plays of the same playbook.
- Use variables to create new hosts and groups in inventory for use in later plays of the same playbook.
Takes variables so you can define the new hosts more fully.
version_added: "0.9"
options:
@ -13,12 +13,15 @@ options:
aliases: [ 'hostname', 'host' ]
description:
- The hostname/ip of the host to add to the inventory, can include a colon and a port number.
required: true
required: true
groups:
aliases: [ 'groupname', 'group' ]
description:
- The groups to add the hostname to, comma separated.
required: false
notes:
- This module bypasses the play host loop and only runs once for all the hosts in the play, if you need it
to iterate use a with_ directive.
author: Seth Vidal
'''

@ -381,7 +381,7 @@ def main():
# of uri executions.
creates = os.path.expanduser(creates)
if os.path.exists(creates):
module.exit_json(stdout="skipped, since %s exists" % creates, skipped=True, changed=False, stderr=False, rc=0)
module.exit_json(stdout="skipped, since %s exists" % creates, changed=False, stderr=False, rc=0)
if removes is not None:
# do not run the command if the line contains removes=filename
@ -389,7 +389,7 @@ def main():
# of uri executions.
v = os.path.expanduser(removes)
if not os.path.exists(removes):
module.exit_json(stdout="skipped, since %s does not exist" % removes, skipped=True, changed=False, stderr=False, rc=0)
module.exit_json(stdout="skipped, since %s does not exist" % removes, changed=False, stderr=False, rc=0)
# httplib2 only sends authentication after the server asks for it with a 401.

@ -98,7 +98,7 @@ options:
required: false
default: null
notes:
- Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified.
- Please note that virtualenv (U(http://www.virtualenv.org/)) must be installed on the remote host if the virtualenv parameter is specified and the virtualenv needs to be initialized.
requirements: [ "virtualenv", "pip" ]
author: Matt Wright
'''
@ -113,6 +113,9 @@ EXAMPLES = '''
# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args.
- pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp'
# Install (MyApp) from local tarball
- pip: name='file:///path/to/MyApp.tar.gz'
# Install (Bottle) into the specified (virtualenv), inheriting none of the globally installed modules
- pip: name=bottle virtualenv=/my_app/venv
@ -252,12 +255,14 @@ def main():
if env:
env = os.path.expanduser(env)
virtualenv = os.path.expanduser(virtualenv_command)
if os.path.basename(virtualenv) == virtualenv:
virtualenv = module.get_bin_path(virtualenv_command, True)
if not os.path.exists(os.path.join(env, 'bin', 'activate')):
if module.check_mode:
module.exit_json(changed=True)
virtualenv = os.path.expanduser(virtualenv_command)
if os.path.basename(virtualenv) == virtualenv:
virtualenv = module.get_bin_path(virtualenv_command, True)
if module.params['virtualenv_site_packages']:
cmd = '%s --system-site-packages %s' % (virtualenv, env)
else:
@ -278,7 +283,7 @@ def main():
pip = _get_pip(module, env, module.params['executable'])
cmd = '%s %s' % (pip, state_map[state])
# If there's a virtualenv we want things we install to be able to use other
# installations that exist as binaries within this virtualenv. Example: we
# install cython and then gevent -- gevent needs to use the cython binary,
@ -308,7 +313,7 @@ def main():
cmd += ' %s' % _get_full_name(name, version)
elif requirements:
cmd += ' -r %s' % requirements
this_dir = tempfile.gettempdir()
if chdir:
this_dir = os.path.join(this_dir, chdir)
@ -319,7 +324,7 @@ def main():
elif name.startswith('svn+') or name.startswith('git+') or \
name.startswith('hg+') or name.startswith('bzr+'):
module.exit_json(changed=True)
freeze_cmd = '%s freeze' % pip
rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=this_dir)

@ -34,10 +34,10 @@ options:
default: null
state:
description:
- Indicates the desired package state. C(latest) ensures that the latest version is installed.
- Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies are installed.
required: false
default: present
choices: [ "latest", "absent", "present" ]
choices: [ "latest", "absent", "present", "build-dep" ]
update_cache:
description:
- Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
@ -133,6 +133,9 @@ EXAMPLES = '''
# Install a .deb package
- apt: deb=/tmp/mypackage.deb
# Install the build dependencies for package "foo"
- apt: pkg=foo state=build-dep
'''
@ -144,6 +147,7 @@ warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning)
import os
import datetime
import fnmatch
import itertools
# APT related constants
APT_ENV_VARS = dict(
@ -173,6 +177,24 @@ def package_split(pkgspec):
else:
return parts[0], None
def package_versions(pkgname, pkg, pkg_cache):
try:
versions = set(p.version for p in pkg.versions)
except AttributeError:
# assume older version of python-apt is installed
# apt.package.Package#versions require python-apt >= 0.7.9.
pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname)
pkg_versions = (p.VersionList for p in pkg_cache_list)
versions = set(p.VerStr for p in itertools.chain(*pkg_versions))
return versions
def package_version_compare(version, other_version):
try:
return apt_pkg.version_compare(version, other_version)
except AttributeError:
return apt_pkg.VersionCompare(version, other_version)
def package_status(m, pkgname, version, cache, state):
try:
# get the package from the cache, as well as the
@ -183,9 +205,14 @@ def package_status(m, pkgname, version, cache, state):
ll_pkg = cache._cache[pkgname] # the low-level package object
except KeyError:
if state == 'install':
if cache.get_providing_packages(pkgname):
try:
if cache.get_providing_packages(pkgname):
return False, True, False
m.fail_json(msg="No package matching '%s' is available" % pkgname)
except AttributeError:
# python-apt version too old to detect virtual packages
# mark as upgradable and let apt-get install deal with it
return False, True, False
m.fail_json(msg="No package matching '%s' is available" % pkgname)
else:
return False, False, False
try:
@ -206,7 +233,8 @@ def package_status(m, pkgname, version, cache, state):
package_is_installed = pkg.isInstalled
if version:
avail_upgrades = fnmatch.filter((p.version for p in pkg.versions), version)
versions = package_versions(pkgname, pkg, cache._cache)
avail_upgrades = fnmatch.filter(versions, version)
if package_is_installed:
try:
@ -220,7 +248,7 @@ def package_status(m, pkgname, version, cache, state):
# Only claim the package is upgradable if a candidate matches the version
package_is_upgradable = False
for candidate in avail_upgrades:
if pkg.versions[candidate] > pkg.installed:
if package_version_compare(candidate, installed_version) > 0:
package_is_upgradable = True
break
else:
@ -274,13 +302,18 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=True, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
build_dep=False):
pkg_list = []
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
for package in pkgspec:
name, version = package_split(package)
installed, upgradable, has_files = package_status(m, name, version, cache, state='install')
if build_dep:
# Let apt decide what to install
pkg_list.append("'%s'" % package)
continue
if not installed or (upgrade and upgradable):
pkg_list.append("'%s'" % package)
if installed and upgradable and version:
@ -307,7 +340,10 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None,
for (k,v) in APT_ENV_VARS.iteritems():
os.environ[k] = v
cmd = "%s -y %s %s %s install %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages)
if build_dep:
cmd = "%s -y %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages)
else:
cmd = "%s -y %s %s %s install %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
@ -316,7 +352,7 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None,
rc, out, err = m.run_command(cmd)
if rc:
return (False, dict(msg="'apt-get install %s' failed: %s" % (packages, err), stdout=out, stderr=err))
return (False, dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err))
else:
return (True, dict(changed=True, stdout=out, stderr=err))
else:
@ -327,7 +363,10 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options):
deps_to_install = []
pkgs_to_install = []
for deb_file in debs.split(','):
pkg = apt.debfile.DebPackage(deb_file)
try:
pkg = apt.debfile.DebPackage(deb_file)
except SystemError, e:
m.fail_json(msg="Error: %s\nSystem Error: %s" % (pkg._failure_string,str(e)))
# Check if it's already installed
if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME:
@ -358,7 +397,7 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options):
if m.check_mode:
options += " --simulate"
if force:
options += " --force-yes"
options += " --force-all"
cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
rc, out, err = m.run_command(cmd)
@ -462,7 +501,7 @@ def upgrade(m, mode="yes", force=False, default_release=None,
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present']),
state = dict(default='present', choices=['installed', 'latest', 'removed', 'absent', 'present', 'build-dep']),
update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
cache_valid_time = dict(type='int'),
purge = dict(default=False, type='bool'),
@ -570,20 +609,18 @@ def main():
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if p['state'] == 'latest':
result = install(module, packages, cache, upgrade=True,
if p['state'] in ('latest', 'present', 'build-dep'):
state_upgrade = False
state_builddep = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
result = install(module, packages, cache, upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes, dpkg_options=dpkg_options)
(success, retvals) = result
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'present':
result = install(module, packages, cache, default_release=p['default_release'],
install_recommends=install_recommends,force=force_yes,
dpkg_options=dpkg_options)
force=force_yes, dpkg_options=dpkg_options,
build_dep=state_builddep)
(success, retvals) = result
if success:
module.exit_json(**retvals)
@ -594,6 +631,8 @@ def main():
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
# import module snippets
from ansible.module_utils.basic import *

@ -90,17 +90,17 @@ def get_systemid(client, session, sysname):
# ------------------------------------------------------- #
def subscribe_channels(channels, client, session, sysname, sys_id):
c = base_channels(client, session, sys_id)
c.append(channels)
return client.channel.software.setSystemChannels(session, sys_id, c)
def subscribe_channels(channelname, client, session, sysname, sys_id):
channels = base_channels(client, session, sys_id)
channels.append(channelname)
return client.system.setChildChannels(session, sys_id, channels)
# ------------------------------------------------------- #
def unsubscribe_channels(channels, client, session, sysname, sys_id):
c = base_channels(client, session, sys_id)
c.remove(channels)
return client.channel.software.setSystemChannels(session, sys_id, c)
def unsubscribe_channels(channelname, client, session, sysname, sys_id):
channels = base_channels(client, session, sys_id)
channels.remove(channelname)
return client.system.setChildChannels(session, sys_id, channels)
# ------------------------------------------------------- #
@ -167,3 +167,4 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()

@ -25,6 +25,7 @@
import traceback
import os
import yum
import rpm
try:
from yum.misc import find_unfinished_transactions, find_ts_remaining
@ -108,7 +109,7 @@ options:
notes: []
# informational: requirements for nodes
requirements: [ yum, rpm ]
requirements: [ yum ]
author: Seth Vidal
'''
@ -122,6 +123,9 @@ EXAMPLES = '''
- name: install the latest version of Apache from the testing repo
yum: name=httpd enablerepo=testing state=present
- name: install one specific version of Apache
yum: name=httpd-2.2.29-1.4.amzn1 state=present
- name: upgrade all packages
yum: name=* state=latest
@ -149,21 +153,13 @@ def log(msg):
syslog.openlog('ansible-yum', 0, syslog.LOG_USER)
syslog.syslog(syslog.LOG_NOTICE, msg)
def yum_base(conf_file=None, cachedir=False):
def yum_base(conf_file=None):
my = yum.YumBase()
my.preconf.debuglevel=0
my.preconf.errorlevel=0
if conf_file and os.path.exists(conf_file):
my.preconf.fn = conf_file
if cachedir or os.geteuid() != 0:
if hasattr(my, 'setCacheDir'):
my.setCacheDir()
else:
cachedir = yum.misc.getCacheDir()
my.repos.setCacheDir(cachedir)
my.conf.cache = 0
return my
def install_yum_utils(module):
@ -247,13 +243,11 @@ def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_
else:
myrepoq = list(repoq)
for repoid in dis_repos:
r_cmd = ['--disablerepo', repoid]
myrepoq.extend(r_cmd)
r_cmd = ['--disablerepo', ','.join(dis_repos)]
myrepoq.extend(r_cmd)
for repoid in en_repos:
r_cmd = ['--enablerepo', repoid]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(en_repos)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd)
@ -296,13 +290,11 @@ def is_update(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=[], dis_rep
else:
myrepoq = list(repoq)
for repoid in dis_repos:
r_cmd = ['--disablerepo', repoid]
myrepoq.extend(r_cmd)
r_cmd = ['--disablerepo', ','.join(dis_repos)]
myrepoq.extend(r_cmd)
for repoid in en_repos:
r_cmd = ['--enablerepo', repoid]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(en_repos)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--pkgnarrow=updates", "--qf", qf, pkgspec]
rc,out,err = module.run_command(cmd)
@ -341,13 +333,11 @@ def what_provides(module, repoq, req_spec, conf_file, qf=def_qf, en_repos=[], d
else:
myrepoq = list(repoq)
for repoid in dis_repos:
r_cmd = ['--disablerepo', repoid]
myrepoq.extend(r_cmd)
r_cmd = ['--disablerepo', ','.join(dis_repos)]
myrepoq.extend(r_cmd)
for repoid in en_repos:
r_cmd = ['--enablerepo', repoid]
myrepoq.extend(r_cmd)
r_cmd = ['--enablerepo', ','.join(en_repos)]
myrepoq.extend(r_cmd)
cmd = myrepoq + ["--qf", qf, "--whatprovides", req_spec]
rc,out,err = module.run_command(cmd)
@ -405,14 +395,19 @@ def transaction_exists(pkglist):
def local_nvra(module, path):
"""return nvra of a local rpm passed in"""
cmd = ['/bin/rpm', '-qp' ,'--qf',
'%{name}-%{version}-%{release}.%{arch}\n', path ]
rc, out, err = module.run_command(cmd)
if rc != 0:
return None
nvra = out.split('\n')[0]
return nvra
ts = rpm.TransactionSet()
ts.setVSFlags(rpm._RPMVSF_NOSIGNATURES)
fd = os.open(path, os.O_RDONLY)
try:
header = ts.hdrFromFdno(fd)
finally:
os.close(fd)
return '%s-%s-%s.%s' % (header[rpm.RPMTAG_NAME],
header[rpm.RPMTAG_VERSION],
header[rpm.RPMTAG_RELEASE],
header[rpm.RPMTAG_ARCH])
def pkg_to_dict(pkgstr):
@ -682,7 +677,7 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
nothing_to_do = False
break
if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=dis_repos):
if basecmd == 'update' and is_update(module, repoq, this, conf_file, en_repos=en_repos, dis_repos=en_repos):
nothing_to_do = False
break
@ -744,16 +739,14 @@ def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo,
en_repos = []
if disablerepo:
dis_repos = disablerepo.split(',')
r_cmd = ['--disablerepo=%s' % disablerepo]
yum_basecmd.extend(r_cmd)
if enablerepo:
en_repos = enablerepo.split(',')
for repoid in dis_repos:
r_cmd = ['--disablerepo=%s' % repoid]
r_cmd = ['--enablerepo=%s' % enablerepo]
yum_basecmd.extend(r_cmd)
for repoid in en_repos:
r_cmd = ['--enablerepo=%s' % repoid]
yum_basecmd.extend(r_cmd)
if state in ['installed', 'present', 'latest']:
@ -762,13 +755,12 @@ def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo,
my = yum_base(conf_file)
try:
for r in dis_repos:
my.repos.disableRepo(r)
if disablerepo:
my.repos.disableRepo(disablerepo)
current_repos = my.repos.repos.keys()
for r in en_repos:
if enablerepo:
try:
my.repos.enableRepo(r)
my.repos.enableRepo(enablerepo)
new_repos = my.repos.repos.keys()
for i in new_repos:
if not i in current_repos:
@ -779,7 +771,6 @@ def ensure(module, state, pkgspec, conf_file, enablerepo, disablerepo,
module.fail_json(msg="Error setting/accessing repo %s: %s" % (r, e))
except yum.Errors.YumBaseError, e:
module.fail_json(msg="Error accessing repos: %s" % e)
if state in ['installed', 'present']:
if disable_gpg_check:
yum_basecmd.append('--nogpgcheck')

@ -80,15 +80,27 @@ options:
default: "origin"
description:
- Name of the remote.
refspec:
required: false
default: null
version_added: "1.9"
description:
- Add an additional refspec to be fetched.
If version is set to a I(SHA-1) not reachable from any branch
or tag, this option may be necessary to specify the ref containing
the I(SHA-1).
Uses the same syntax as the 'git fetch' command.
An example value could be "refs/meta/config".
force:
required: false
default: "yes"
default: "no"
choices: [ "yes", "no" ]
version_added: "0.7"
description:
- If C(yes), any modified files in the working
repository will be discarded. Prior to 0.7, this was always
'yes' and could not be disabled.
'yes' and could not be disabled. Prior to 1.9, the default was
`yes`
depth:
required: false
default: null
@ -170,6 +182,9 @@ EXAMPLES = '''
# Example just get information about the repository whether or not it has
# already been cloned locally.
- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout clone=no update=no
# Example checkout a github repo and use refspec to fetch all pull requests
- git: repo=https://github.com/ansible/ansible-examples.git dest=/src/ansible-examples refspec=+refs/pull/*:refs/heads/*
'''
import re
@ -283,7 +298,7 @@ def get_submodule_versions(git_path, module, dest, version='HEAD'):
return submodules
def clone(git_path, module, repo, dest, remote, depth, version, bare,
reference):
reference, refspec):
''' makes a new git repo if it does not already exist '''
dest_dirname = os.path.dirname(dest)
try:
@ -308,6 +323,9 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare,
if remote != 'origin':
module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
if refspec:
module.run_command([git_path, 'fetch', remote, refspec], check_rc=True, cwd=dest)
def has_local_mods(module, git_path, dest, bare):
if bare:
return False
@ -455,35 +473,31 @@ def get_head_branch(git_path, module, dest, remote, bare=False):
f.close()
return branch
def fetch(git_path, module, repo, dest, version, remote, bare):
def fetch(git_path, module, repo, dest, version, remote, bare, refspec):
''' updates repo from remote sources '''
out_acc = []
err_acc = []
(rc, out0, err0) = module.run_command([git_path, 'remote', 'set-url', remote, repo], cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to set a new url %s for %s: %s" % (repo, remote, out0 + err0))
if bare:
(rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*'], cwd=dest)
else:
(rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote), cwd=dest)
out_acc.append(out1)
err_acc.append(err1)
if rc != 0:
module.fail_json(msg="Failed to download remote objects and refs: %s %s" %
(''.join(out_acc), ''.join(err_acc)))
commands = [("set a new url %s for %s" % (repo, remote), [git_path, 'remote', 'set-url', remote, repo])]
fetch_str = 'download remote objects and refs'
if bare:
(rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'], cwd=dest)
refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs))
else:
(rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest)
out_acc.append(out2)
err_acc.append(err2)
if rc != 0:
module.fail_json(msg="Failed to download remote objects and refs: %s %s" %
(''.join(out_acc), ''.join(err_acc)))
return (rc, ''.join(out_acc), ''.join(err_acc))
# unlike in bare mode, there's no way to combine the
# additional refspec with the default git fetch behavior,
# so use two commands
commands.append((fetch_str, [git_path, 'fetch', remote]))
refspecs = ['+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs))
for (label,command) in commands:
(rc,out,err) = module.run_command(command, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
def submodules_fetch(git_path, module, remote, track_submodules, dest):
changed = False
@ -596,8 +610,9 @@ def main():
repo=dict(required=True, aliases=['name']),
version=dict(default='HEAD'),
remote=dict(default='origin'),
refspec=dict(default=None),
reference=dict(default=None),
force=dict(default='yes', type='bool'),
force=dict(default='no', type='bool'),
depth=dict(default=None, type='int'),
clone=dict(default='yes', type='bool'),
update=dict(default='yes', type='bool'),
@ -616,6 +631,7 @@ def main():
repo = module.params['repo']
version = module.params['version']
remote = module.params['remote']
refspec = module.params['refspec']
force = module.params['force']
depth = module.params['depth']
update = module.params['update']
@ -673,7 +689,7 @@ def main():
remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
module.exit_json(changed=True, before=before, after=remote_head)
# there's no git config, so clone
clone(git_path, module, repo, dest, remote, depth, version, bare, reference)
clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec)
repo_updated = True
elif not update:
# Just return having found a repo already in the dest path
@ -707,7 +723,7 @@ def main():
if repo_updated is None:
if module.check_mode:
module.exit_json(changed=True, before=before, after=remote_head)
fetch(git_path, module, repo, dest, version, remote, bare)
fetch(git_path, module, repo, dest, version, remote, bare, refspec)
repo_updated = True
# switch to version specified regardless of whether

@ -2,6 +2,7 @@
#-*- coding: utf-8 -*-
# (c) 2013, Yeukhon Wong <yeukhon@acm.org>
# (c) 2014, Nate Coraor <nate@bx.psu.edu>
#
# This module was originally inspired by Brad Olson's ansible-module-mercurial
# <https://github.com/bradobro/ansible-module-mercurial>. This module tends
@ -49,13 +50,14 @@ options:
- Equivalent C(-r) option in hg command which could be the changeset, revision number,
branch name or even tag.
required: false
default: "default"
default: null
aliases: [ version ]
force:
description:
- Discards uncommitted changes. Runs C(hg update -C).
- Discards uncommitted changes. Runs C(hg update -C). Prior to
1.9, the default was `yes`.
required: false
default: "yes"
default: "no"
choices: [ "yes", "no" ]
purge:
description:
@ -128,7 +130,10 @@ class Hg(object):
if not before:
return False
(rc, out, err) = self._command(['update', '-C', '-R', self.dest])
args = ['update', '-C', '-R', self.dest]
if self.revision is not None:
args = args + ['-r', self.revision]
(rc, out, err) = self._command(args)
if rc != 0:
self.module.fail_json(msg=err)
@ -170,13 +175,30 @@ class Hg(object):
['pull', '-R', self.dest, self.repo])
def update(self):
if self.revision is not None:
return self._command(['update', '-r', self.revision, '-R', self.dest])
return self._command(['update', '-R', self.dest])
def clone(self):
return self._command(['clone', self.repo, self.dest, '-r', self.revision])
if self.revision is not None:
return self._command(['clone', self.repo, self.dest, '-r', self.revision])
return self._command(['clone', self.repo, self.dest])
def switch_version(self):
return self._command(['update', '-r', self.revision, '-R', self.dest])
@property
def at_revision(self):
"""
There is no point in pulling from a potentially down/slow remote site
if the desired changeset is already the current changeset.
"""
if self.revision is None or len(self.revision) < 7:
# Assume it's a rev number, tag, or branch
return False
(rc, out, err) = self._command(['--debug', 'id', '-i', '-R', self.dest])
if rc != 0:
self.module.fail_json(msg=err)
if out.startswith(self.revision):
return True
return False
# ===========================================
@ -185,8 +207,8 @@ def main():
argument_spec = dict(
repo = dict(required=True, aliases=['name']),
dest = dict(required=True),
revision = dict(default="default", aliases=['version']),
force = dict(default='yes', type='bool'),
revision = dict(default=None, aliases=['version']),
force = dict(default='no', type='bool'),
purge = dict(default='no', type='bool'),
executable = dict(default=None),
),
@ -212,6 +234,12 @@ def main():
(rc, out, err) = hg.clone()
if rc != 0:
module.fail_json(msg=err)
elif hg.at_revision:
# no update needed, don't pull
before = hg.get_revision()
# but force and purge if desired
cleaned = hg.cleanup(force, purge)
else:
# get the current state before doing pulling
before = hg.get_revision()
@ -227,7 +255,6 @@ def main():
if rc != 0:
module.fail_json(msg=err)
hg.switch_version()
after = hg.get_revision()
if before != after or cleaned:
changed = True

@ -50,8 +50,9 @@ options:
force:
description:
- If C(yes), modified files will be discarded. If C(no), module will fail if it encounters modified files.
Prior to 1.9 the default was `yes`.
required: false
default: "yes"
default: "no"
choices: [ "yes", "no" ]
username:
description:
@ -123,7 +124,12 @@ class Subversion(object):
def export(self, force=False):
'''Export svn repo to directory'''
self._exec(["export", "-r", self.revision, self.repo, self.dest])
cmd = ["export"]
if force:
cmd.append("--force")
cmd.extend(["-r", self.revision, self.repo, self.dest])
self._exec(cmd)
def switch(self):
'''Change working directory's repo.'''
@ -173,7 +179,7 @@ def main():
dest=dict(required=True),
repo=dict(required=True, aliases=['name', 'repository']),
revision=dict(default='HEAD', aliases=['rev', 'version']),
force=dict(default='yes', type='bool'),
force=dict(default='no', type='bool'),
username=dict(required=False),
password=dict(required=False),
executable=dict(default=None),
@ -194,7 +200,7 @@ def main():
os.environ['LANG'] = 'C'
svn = Subversion(module, dest, repo, revision, username, password, svn_path)
if not os.path.exists(dest):
if export or not os.path.exists(dest):
before = None
local_mods = False
if module.check_mode:
@ -202,7 +208,7 @@ def main():
if not export:
svn.checkout()
else:
svn.export()
svn.export(force=force)
elif os.path.exists("%s/.svn" % (dest, )):
# Order matters. Need to get local mods before switch to avoid false
# positives. Need to switch before revert to ensure we are reverting to
@ -222,9 +228,12 @@ def main():
else:
module.fail_json(msg="ERROR: %s folder already exists, but its not a subversion repository." % (dest, ))
after = svn.get_revision()
changed = before != after or local_mods
module.exit_json(changed=changed, before=before, after=after)
if export:
module.exit_json(changed=True)
else:
after = svn.get_revision()
changed = before != after or local_mods
module.exit_json(changed=changed, before=before, after=after)
# import module snippets
from ansible.module_utils.basic import *

@ -37,7 +37,7 @@ options:
aliases: []
key:
description:
- The SSH public key, as a string
- The SSH public key(s), as a string or (since 1.9) url (https://github.com/username.keys)
required: true
default: null
path:
@ -70,6 +70,15 @@ options:
required: false
default: null
version_added: "1.4"
exclusive:
description:
- Whether to remove all other non-specified keys from the
authorized_keys file. Multiple keys can be specified in a single
key= string value by separating them by newlines.
required: false
choices: [ "yes", "no" ]
default: "no"
version_added: "1.9"
description:
- "Adds or removes authorized keys for particular user accounts"
author: Brad Olson
@ -79,6 +88,9 @@ EXAMPLES = '''
# Example using key data from a local file on the management machine
- authorized_key: user=charlie key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
# Using github url as key source
- authorized_key: user=charlie key=https://github.com/charlie.keys
# Using alternate directory locations:
- authorized_key: user=charlie
key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
@ -97,6 +109,10 @@ EXAMPLES = '''
- authorized_key: user=charlie
key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
key_options='no-port-forwarding,host="10.0.1.1"'
# Set up authorized_keys exclusively with one key
- authorized_key: user=root key=public_keys/doe-jane state=present
exclusive=yes
'''
# Makes sure the public key line is present or absent in the user's .ssh/authorized_keys.
@ -332,16 +348,32 @@ def enforce_state(module, params):
manage_dir = params.get("manage_dir", True)
state = params.get("state", "present")
key_options = params.get("key_options", None)
exclusive = params.get("exclusive", False)
error_msg = "Error getting key from: %s"
# if the key is a url, request it and use it as key source
if key.startswith("http"):
try:
resp, info = fetch_url(module, key)
if info['status'] != 200:
module.fail_json(msg=error_msg % key)
else:
key = resp.read()
except Exception:
module.fail_json(msg=error_msg % key)
# extract individual keys into an array, skipping blank lines and comments
key = [s for s in key.splitlines() if s and not s.startswith('#')]
# check current state -- just get the filename, don't create file
do_write = False
params["keyfile"] = keyfile(module, user, do_write, path, manage_dir)
existing_keys = readkeys(module, params["keyfile"])
# Add a place holder for keys that should exist in the state=present and
# exclusive=true case
keys_to_exist = []
# Check our new keys, if any of them exist we'll continue.
for new_key in key:
parsed_new_key = parsekey(module, new_key)
@ -371,6 +403,7 @@ def enforce_state(module, params):
# handle idempotent state=present
if state=="present":
keys_to_exist.append(parsed_new_key[0])
if len(non_matching_keys) > 0:
for non_matching_key in non_matching_keys:
if non_matching_key[0] in existing_keys:
@ -387,6 +420,13 @@ def enforce_state(module, params):
del existing_keys[parsed_new_key[0]]
do_write = True
# remove all other keys to honor exclusive
if state == "present" and exclusive:
to_remove = frozenset(existing_keys).difference(keys_to_exist)
for key in to_remove:
del existing_keys[key]
do_write = True
if do_write:
if module.check_mode:
module.exit_json(changed=True)
@ -409,6 +449,7 @@ def main():
state = dict(default='present', choices=['absent','present']),
key_options = dict(required=False, type='str'),
unique = dict(default=False, type='bool'),
exclusive = dict(default=False, type='bool'),
),
supports_check_mode=True
)
@ -418,4 +459,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()

@ -251,6 +251,49 @@ class FreeBsdGroup(Group):
# ===========================================
class DarwinGroup(Group):
"""
This is a Mac OS X Darwin Group manipulation class.
This overrides the following methods from the generic class:-
- group_del()
- group_add()
- group_mod()
group manupulation are done using dseditgroup(1).
"""
platform = 'Darwin'
distribution = None
def group_add(self, **kwargs):
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += [ '-o', 'create' ]
cmd += [ '-i', self.gid ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
def group_del(self):
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += [ '-o', 'delete' ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
def group_mod(self):
info = self.group_info()
if self.gid is not None and int(self.gid) != info[2]:
cmd = [self.module.get_bin_path('dseditgroup', True)]
cmd += [ '-o', 'edit' ]
cmd += [ '-i', self.gid ]
cmd += [ '-L', self.name ]
(rc, out, err) = self.execute_command(cmd)
return (rc, out, err)
return (None, '', '')
class OpenBsdGroup(Group):
"""
This is a OpenBSD Group manipulation class.

@ -28,6 +28,7 @@ requirements: [ hostname ]
description:
- Set system's hostname
- Currently implemented on Debian, Ubuntu, Fedora, RedHat, openSUSE, Linaro, ScientificLinux, Arch, CentOS, AMI.
- Any distribution that uses systemd as their init system
options:
name:
required: true
@ -232,9 +233,9 @@ class RedHatStrategy(GenericStrategy):
# ===========================================
class FedoraStrategy(GenericStrategy):
class SystemdStrategy(GenericStrategy):
"""
This is a Fedora family Hostname manipulation strategy class - it uses
This is a Systemd hostname manipulation strategy class - it uses
the hostnamectl command.
"""
@ -323,17 +324,17 @@ class OpenRCStrategy(GenericStrategy):
class FedoraHostname(Hostname):
platform = 'Linux'
distribution = 'Fedora'
strategy_class = FedoraStrategy
strategy_class = SystemdStrategy
class OpenSUSEHostname(Hostname):
platform = 'Linux'
distribution = 'Opensuse '
strategy_class = FedoraStrategy
strategy_class = SystemdStrategy
class ArchHostname(Hostname):
platform = 'Linux'
distribution = 'Arch'
strategy_class = FedoraStrategy
strategy_class = SystemdStrategy
class RedHat5Hostname(Hostname):
platform = 'Linux'
@ -345,7 +346,7 @@ class RedHatServerHostname(Hostname):
distribution = 'Red hat enterprise linux server'
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = FedoraStrategy
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
@ -354,7 +355,7 @@ class RedHatWorkstationHostname(Hostname):
distribution = 'Red hat enterprise linux workstation'
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = FedoraStrategy
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
@ -363,7 +364,7 @@ class CentOSHostname(Hostname):
distribution = 'Centos'
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = FedoraStrategy
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
@ -372,19 +373,27 @@ class CentOSLinuxHostname(Hostname):
distribution = 'Centos linux'
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = FedoraStrategy
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
class ScientificHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific'
strategy_class = RedHatStrategy
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
class ScientificLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Scientific linux'
strategy_class = RedHatStrategy
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
class AmazonLinuxHostname(Hostname):
platform = 'Linux'
@ -401,6 +410,11 @@ class UbuntuHostname(Hostname):
distribution = 'Ubuntu'
strategy_class = DebianStrategy
class LinuxmintHostname(Hostname):
platform = 'Linux'
distribution = 'Linuxmint'
strategy_class = DebianStrategy
class LinaroHostname(Hostname):
platform = 'Linux'
distribution = 'Linaro'

@ -320,6 +320,17 @@ def main():
if os.path.ismount(name):
if changed:
res,msg = mount(module, **args)
elif 'bind' in args.get('opts', []):
changed = True
cmd = 'mount -l'
rc, out, err = module.run_command(cmd)
allmounts = out.split('\n')
for mounts in allmounts[:-1]:
arguments = mounts.split()
if arguments[0] == args['src'] and arguments[2] == args['name'] and arguments[4] == args['fstype']:
changed = False
if changed:
res,msg = mount(module, **args)
else:
changed = True
res,msg = mount(module, **args)

@ -174,14 +174,19 @@ def main():
if (state != runtime_state):
if module.check_mode:
module.exit_json(changed=True)
if (state == 'disabled'):
msgs.append('state change will take effect next reboot')
else:
if (runtime_enabled):
if (runtime_enabled):
if (state == 'disabled'):
if (runtime_state != 'permissive'):
# Temporarily set state to permissive
set_state('permissive')
msgs.append('runtime state temporarily changed from \'%s\' to \'permissive\', state change will take effect next reboot' % (runtime_state))
else:
msgs.append('state change will take effect next reboot')
else:
set_state(state)
msgs.append('runtime state changed from \'%s\' to \'%s\'' % (runtime_state, state))
else:
msgs.append('state change will take effect next reboot')
else:
msgs.append('state change will take effect next reboot')
changed=True
if (state != config_state):

@ -26,7 +26,7 @@ version_added: "0.1"
short_description: Manage services.
description:
- Controls services on remote hosts. Supported init systems include BSD init,
OpenRC, SysV, systemd, upstart.
OpenRC, SysV, Solaris SMF, systemd, upstart.
options:
name:
required: true
@ -105,8 +105,14 @@ import shlex
import select
import time
import string
import glob
from distutils.version import LooseVersion
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
class Service(object):
"""
@ -387,7 +393,7 @@ class LinuxService(Service):
def get_service_tools(self):
paths = [ '/sbin', '/usr/sbin', '/bin', '/usr/bin' ]
binaries = [ 'service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart' ]
binaries = [ 'service', 'chkconfig', 'update-rc.d', 'rc-service', 'rc-update', 'initctl', 'systemctl', 'start', 'stop', 'restart', 'insserv' ]
initpaths = [ '/etc/init.d' ]
location = dict()
@ -455,6 +461,9 @@ class LinuxService(Service):
if location.get('update-rc.d', False):
# and uses update-rc.d
self.enable_cmd = location['update-rc.d']
elif location.get('insserv', None):
# and uses insserv
self.enable_cmd = location['insserv']
elif location.get('chkconfig', False):
# and uses chkconfig
self.enable_cmd = location['chkconfig']
@ -473,6 +482,12 @@ class LinuxService(Service):
if location.get('initctl', False):
self.svc_initctl = location['initctl']
def get_systemd_service_enabled(self):
(rc, out, err) = self.execute_command("%s is-enabled %s" % (self.enable_cmd, self.__systemd_unit,))
if rc == 0:
return True
return False
def get_systemd_status_dict(self):
(rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,))
if rc != 0:
@ -687,12 +702,11 @@ class LinuxService(Service):
action = 'disable'
# Check if we're already in the correct state
d = self.get_systemd_status_dict()
if "UnitFileState" in d:
if self.enable and d["UnitFileState"] == "enabled":
self.changed = False
elif not self.enable and d["UnitFileState"] == "disabled":
self.changed = False
service_enabled = self.get_systemd_service_enabled()
if self.enable and service_enabled:
self.changed = False
elif not self.enable and not service_enabled:
self.changed = False
elif not self.enable:
self.changed = False
@ -734,45 +748,74 @@ class LinuxService(Service):
# update-rc.d style
#
if self.enable_cmd.endswith("update-rc.d"):
if self.enable:
action = 'enable'
else:
action = 'disable'
if self.enable:
# make sure the init.d symlinks are created
# otherwise enable might not work
(rc, out, err) = self.execute_command("%s %s defaults" \
% (self.enable_cmd, self.name))
enabled = False
slinks = glob.glob('/etc/rc?.d/S??' + self.name)
if slinks:
enabled = True
if self.enable != enabled:
self.changed = True
if self.enable:
action = 'enable'
klinks = glob.glob('/etc/rc?.d/K??' + self.name)
if not klinks:
(rc, out, err) = self.execute_command("%s %s defaults" % (self.enable_cmd, self.name))
if rc != 0:
if err:
self.module.fail_json(msg=err)
else:
self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
else:
action = 'disable'
(rc, out, err) = self.execute_command("%s %s %s" % (self.enable_cmd, self.name, action))
if rc != 0:
if err:
self.module.fail_json(msg=err)
else:
self.module.fail_json(msg=out)
self.module.fail_json(msg=out) % (self.enable_cmd, self.name, action)
else:
self.changed = False
return
#
# insserv (Debian 7)
#
if self.enable_cmd.endswith("insserv"):
if self.enable:
(rc, out, err) = self.execute_command("%s -n %s" % (self.enable_cmd, self.name))
else:
(rc, out, err) = self.execute_command("%s -nr %s" % (self.enable_cmd, self.name))
(rc, out, err) = self.execute_command("%s -n %s %s" \
% (self.enable_cmd, self.name, action))
self.changed = False
for line in out.splitlines():
if line.startswith('rename'):
self.changed = True
break
elif self.enable and 'do not exist' in line:
for line in err.splitlines():
if self.enable and line.find('enable service') != -1:
self.changed = True
break
elif not self.enable and 'already exist' in line:
if not self.enable and line.find('remove service') != -1:
self.changed = True
break
# Debian compatibility
for line in err.splitlines():
if self.enable and 'no runlevel symlinks to modify' in line:
self.changed = True
break
if self.module.check_mode:
self.module.exit_json(changed=self.changed)
if not self.changed:
return
if self.enable:
(rc, out, err) = self.execute_command("%s %s" % (self.enable_cmd, self.name))
if (rc != 0) or (err != ''):
self.module.fail_json(msg=("Failed to install service. rc: %s, out: %s, err: %s" % (rc, out, err)))
return (rc, out, err)
else:
(rc, out, err) = self.execute_command("%s -r %s" % (self.enable_cmd, self.name))
if (rc != 0) or (err != ''):
self.module.fail_json(msg=("Failed to remove service. rc: %s, out: %s, err: %s" % (rc, out, err)))
return (rc, out, err)
#
# If we've gotten to the end, the service needs to be updated
#
@ -942,34 +985,151 @@ class FreeBsdService(Service):
class OpenBsdService(Service):
"""
This is the OpenBSD Service manipulation class - it uses /etc/rc.d for
service control. Enabling a service is currently not supported because the
<service>_flags variable is not boolean, you should supply a rc.conf.local
file in some other way.
This is the OpenBSD Service manipulation class - it uses rcctl(8) or
/etc/rc.d scripts for service control. Enabling a service is
only supported if rcctl is present.
"""
platform = 'OpenBSD'
distribution = None
def get_service_tools(self):
rcdir = '/etc/rc.d'
self.enable_cmd = self.module.get_bin_path('rcctl')
if self.enable_cmd:
self.svc_cmd = self.enable_cmd
else:
rcdir = '/etc/rc.d'
rc_script = "%s/%s" % (rcdir, self.name)
if os.path.isfile(rc_script):
self.svc_cmd = rc_script
rc_script = "%s/%s" % (rcdir, self.name)
if os.path.isfile(rc_script):
self.svc_cmd = rc_script
if not self.svc_cmd:
self.module.fail_json(msg='unable to find rc.d script')
self.module.fail_json(msg='unable to find svc_cmd')
def get_service_status(self):
rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check'))
if self.enable_cmd:
rc, stdout, stderr = self.execute_command("%s %s %s" % (self.svc_cmd, 'check', self.name))
else:
rc, stdout, stderr = self.execute_command("%s %s" % (self.svc_cmd, 'check'))
if stderr:
self.module.fail_json(msg=stderr)
if rc == 1:
self.running = False
elif rc == 0:
self.running = True
def service_control(self):
return self.execute_command("%s %s" % (self.svc_cmd, self.action))
if self.enable_cmd:
return self.execute_command("%s -f %s %s" % (self.svc_cmd, self.action, self.name))
else:
return self.execute_command("%s -f %s" % (self.svc_cmd, self.action))
def service_enable(self):
if not self.enable_cmd:
return super(OpenBsdService, self).service_enable()
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'getdef', self.name, 'flags'))
if stderr:
self.module.fail_json(msg=stderr)
getdef_string = stdout.rstrip()
# Depending on the service the string returned from 'default' may be
# either a set of flags or the boolean YES/NO
if getdef_string == "YES" or getdef_string == "NO":
default_flags = ''
else:
default_flags = getdef_string
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'flags'))
if stderr:
self.module.fail_json(msg=stderr)
get_string = stdout.rstrip()
# Depending on the service the string returned from 'getdef/get' may be
# either a set of flags or the boolean YES/NO
if get_string == "YES" or get_string == "NO":
current_flags = ''
else:
current_flags = get_string
# If there are arguments from the user we use these as flags unless
# they are already set.
if self.arguments and self.arguments != current_flags:
changed_flags = self.arguments
# If the user has not supplied any arguments and the current flags
# differ from the default we reset them.
elif not self.arguments and current_flags != default_flags:
changed_flags = ' '
# Otherwise there is no need to modify flags.
else:
changed_flags = ''
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.enable_cmd, 'get', self.name, 'status'))
if self.enable:
if rc == 0 and not changed_flags:
return
if rc != 0:
status_action = "set %s status on" % (self.name)
else:
status_action = ''
if changed_flags:
flags_action = "set %s flags %s" % (self.name, changed_flags)
else:
flags_action = ''
else:
if rc == 1:
return
status_action = "set %s status off" % self.name
flags_action = ''
# Verify state assumption
if not status_action and not flags_action:
self.module.fail_json(msg="neither status_action or status_flags is set, this should never happen")
if self.module.check_mode:
self.module.exit_json(changed=True, msg="changing service enablement")
status_modified = 0
if status_action:
rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, status_action))
if rc != 0:
if stderr:
self.module.fail_json(msg=stderr)
else:
self.module.fail_json(msg="rcctl failed to modify service status")
status_modified = 1
if flags_action:
rc, stdout, stderr = self.execute_command("%s %s" % (self.enable_cmd, flags_action))
if rc != 0:
if stderr:
if status_modified:
error_message = "rcctl modified service status but failed to set flags: " + stderr
else:
error_message = stderr
else:
if status_modified:
error_message = "rcctl modified service status but failed to set flags"
else:
error_message = "rcctl failed to modify service flags"
self.module.fail_json(msg=error_message)
self.changed = True
# ===========================================
# Subclass: NetBSD

@ -185,12 +185,20 @@ class SysctlModule(object):
def _parse_value(self, value):
if value is None:
return ''
elif value.lower() in BOOLEANS_TRUE:
return '1'
elif value.lower() in BOOLEANS_FALSE:
return '0'
elif isinstance(value, bool):
if value:
return '1'
else:
return '0'
elif isinstance(value, basestring):
if value.lower() in BOOLEANS_TRUE:
return '1'
elif value.lower() in BOOLEANS_FALSE:
return '0'
else:
return value.strip()
else:
return value.strip()
return value
# ==============================================================
# SYSCTL COMMAND MANAGEMENT

@ -81,13 +81,14 @@ options:
the user example in the github examples directory for what this looks
like in a playbook. The `FAQ <http://docs.ansible.com/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module>`_
contains details on various ways to generate these password values.
Note on Darwin system, this value has to be cleartext.
Beware of security issues.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the account should exist. When C(absent), removes
the user account.
- Whether the account should exist or not, taking action if the state is different from what is stated.
createhome:
required: false
default: "yes"
@ -95,7 +96,7 @@ options:
description:
- Unless set to C(no), a home directory will be made for the user
when the account is created or if the home directory does not
exist.
exist.
move_home:
required: false
default: "no"
@ -153,13 +154,14 @@ options:
present on target host.
ssh_key_file:
required: false
default: $HOME/.ssh/id_rsa
default: .ssh/id_rsa
version_added: "0.9"
description:
- Optionally specify the SSH key filename.
- Optionally specify the SSH key filename. If this is a relative
filename then it will be relative to the user's home directory.
ssh_key_comment:
required: false
default: ansible-generated
default: ansible-generated on $HOSTNAME
version_added: "0.9"
description:
- Optionally define the comment for the SSH key.
@ -177,6 +179,13 @@ options:
version_added: "1.3"
description:
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
expires:
version_added: "1.9"
required: false
default: "None"
description:
- An expiry time for the user in epoch, it will be ignored on platforms that do not support this.
Currently supported on Linux and FreeBSD.
'''
EXAMPLES = '''
@ -189,8 +198,11 @@ EXAMPLES = '''
# Remove the user 'johnd'
- user: name=johnd state=absent remove=yes
# Create a 2048-bit SSH key for user jsmith
- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048
# Create a 2048-bit SSH key for user jsmith in ~jsmith/.ssh/id_rsa
- user: name=jsmith generate_ssh_key=yes ssh_key_bits=2048 ssh_key_file=.ssh/id_rsa
# added a consultant whose account you want to expire
- user: name=james18 shell=/bin/zsh groups=developers expires=1422403387
'''
import os
@ -198,6 +210,8 @@ import pwd
import grp
import syslog
import platform
import socket
import time
try:
import spwd
@ -225,6 +239,7 @@ class User(object):
platform = 'Generic'
distribution = None
SHADOWFILE = '/etc/shadow'
DATE_FORMAT = '%Y-%M-%d'
def __new__(cls, *args, **kwargs):
return load_platform_subclass(User, args, kwargs)
@ -254,6 +269,14 @@ class User(object):
self.ssh_comment = module.params['ssh_key_comment']
self.ssh_passphrase = module.params['ssh_key_passphrase']
self.update_password = module.params['update_password']
self.expires = None
if module.params['expires']:
try:
self.expires = time.gmtime(module.params['expires'])
except Exception,e:
module.fail_json("Invalid expires time %s: %s" %(self.expires, str(e)))
if module.params['ssh_key_file'] is not None:
self.ssh_file = module.params['ssh_key_file']
else:
@ -262,12 +285,13 @@ class User(object):
# select whether we dump additional debug info through syslog
self.syslogging = False
def execute_command(self, cmd, use_unsafe_shell=False):
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
if self.syslogging:
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell)
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def remove_user_userdel(self):
cmd = [self.module.get_bin_path('userdel', True)]
@ -326,6 +350,10 @@ class User(object):
cmd.append('-s')
cmd.append(self.shell)
if self.expires:
cmd.append('--expiredate')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
@ -432,6 +460,10 @@ class User(object):
cmd.append('-s')
cmd.append(self.shell)
if self.expires:
cmd.append('--expiredate')
cmd.append(time.strftime(self.DATE_FORMAT, self.expires))
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
@ -536,7 +568,7 @@ class User(object):
if not os.path.exists(info[5]):
return (1, '', 'User %s home directory does not exist' % self.name)
ssh_key_file = self.get_ssh_key_path()
ssh_dir = os.path.dirname(ssh_key_file)
ssh_dir = os.path.dirname(ssh_key_file)
if not os.path.exists(ssh_dir):
try:
os.mkdir(ssh_dir, 0700)
@ -625,7 +657,7 @@ class User(object):
os.chown(os.path.join(root, f), uid, gid)
except OSError, e:
self.module.exit_json(failed=True, msg="%s" % e)
# ===========================================
@ -702,6 +734,11 @@ class FreeBsdUser(User):
cmd.append('-L')
cmd.append(self.login_class)
if self.expires:
days =( time.mktime(self.expires) - time.time() ) / 86400
cmd.append('-e')
cmd.append(str(int(days)))
# system cannot be handled currently - should we error if its requested?
# create the user
(rc, out, err) = self.execute_command(cmd)
@ -714,7 +751,7 @@ class FreeBsdUser(User):
self.module.get_bin_path('chpass', True),
'-p',
self.password,
self.name
self.name
]
return self.execute_command(cmd)
@ -725,7 +762,7 @@ class FreeBsdUser(User):
self.module.get_bin_path('pw', True),
'usermod',
'-n',
self.name
self.name
]
cmd_len = len(cmd)
info = self.user_info()
@ -760,8 +797,17 @@ class FreeBsdUser(User):
cmd.append(self.shell)
if self.login_class is not None:
cmd.append('-L')
cmd.append(self.login_class)
# find current login class
user_login_class = None
if os.path.exists(self.SHADOWFILE) and os.access(self.SHADOWFILE, os.R_OK):
for line in open(self.SHADOWFILE).readlines():
if line.startswith('%s:' % self.name):
user_login_class = line.split(':')[4]
# act only if login_class change
if self.login_class != user_login_class:
cmd.append('-L')
cmd.append(self.login_class)
if self.groups is not None:
current_groups = self.user_group_membership()
@ -786,6 +832,11 @@ class FreeBsdUser(User):
new_groups = groups | set(current_groups)
cmd.append(','.join(new_groups))
if self.expires:
days = ( time.mktime(self.expires) - time.time() ) / 86400
cmd.append('-e')
cmd.append(str(int(days)))
# modify the user if cmd will do anything
if cmd_len != len(cmd):
(rc, out, err) = self.execute_command(cmd)
@ -1255,7 +1306,7 @@ class SunOS(User):
cmd.append('-G')
new_groups = groups
if self.append:
new_groups.extend(current_groups)
new_groups.update(current_groups)
cmd.append(','.join(new_groups))
if self.comment is not None and info[4] != self.comment:
@ -1304,6 +1355,321 @@ class SunOS(User):
return (rc, out, err)
# ===========================================
class DarwinUser(User):
"""
This is a Darwin Mac OS X User manipulation class.
Main differences are that Darwin:-
- Handles accounts in a database managed by dscl(1)
- Has no useradd/groupadd
- Does not create home directories
- User password must be cleartext
- UID must be given
- System users must ben under 500
This overrides the following methods from the generic class:-
- user_exists()
- create_user()
- remove_user()
- modify_user()
"""
platform = 'Darwin'
distribution = None
SHADOWFILE = None
dscl_directory = '.'
fields = [
('comment', 'RealName'),
('home', 'NFSHomeDirectory'),
('shell', 'UserShell'),
('uid', 'UniqueID'),
('group', 'PrimaryGroupID'),
]
def _get_dscl(self):
return [ self.module.get_bin_path('dscl', True), self.dscl_directory ]
def _list_user_groups(self):
cmd = self._get_dscl()
cmd += [ '-search', '/Groups', 'GroupMembership', self.name ]
(rc, out, err) = self.execute_command(cmd)
groups = []
for line in out.splitlines():
if line.startswith(' ') or line.startswith(')'):
continue
groups.append(line.split()[0])
return groups
def _get_user_property(self, property):
'''Return user PROPERTY as given my dscl(1) read or None if not found.'''
cmd = self._get_dscl()
cmd += [ '-read', '/Users/%s' % self.name, property ]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
return None
# from dscl(1)
# if property contains embedded spaces, the list will instead be
# displayed one entry per line, starting on the line after the key.
lines = out.splitlines()
#sys.stderr.write('*** |%s| %s -> %s\n' % (property, out, lines))
if len(lines) == 1:
return lines[0].split(': ')[1]
else:
if len(lines) > 2:
return '\n'.join([ lines[1].strip() ] + lines[2:])
else:
if len(lines) == 2:
return lines[1].strip()
else:
return None
def _change_user_password(self):
'''Change password for SELF.NAME against SELF.PASSWORD.
Please note that password must be cleatext.
'''
# some documentation on how is stored passwords on OSX:
# http://blog.lostpassword.com/2012/07/cracking-mac-os-x-lion-accounts-passwords/
# http://null-byte.wonderhowto.com/how-to/hack-mac-os-x-lion-passwords-0130036/
# http://pastebin.com/RYqxi7Ca
# on OSX 10.8+ hash is SALTED-SHA512-PBKDF2
# https://pythonhosted.org/passlib/lib/passlib.hash.pbkdf2_digest.html
# https://gist.github.com/nueh/8252572
cmd = self._get_dscl()
if self.password:
cmd += [ '-passwd', '/Users/%s' % self.name, self.password]
else:
cmd += [ '-create', '/Users/%s' % self.name, 'Password', '*']
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Error when changing password',
err=err, out=out, rc=rc)
return (rc, out, err)
def _make_group_numerical(self):
'''Convert SELF.GROUP to is stringed numerical value suitable for dscl.'''
if self.group is not None:
try:
self.group = grp.getgrnam(self.group).gr_gid
except KeyError:
self.module.fail_json(msg='Group "%s" not found. Try to create it first using "group" module.' % self.group)
# We need to pass a string to dscl
self.group = str(self.group)
def __modify_group(self, group, action):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. '''
if action == 'add':
option = '-a'
else:
option = '-d'
cmd = [ 'dseditgroup', '-o', 'edit', option, self.name,
'-t', 'user', group ]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(msg='Cannot %s user "%s" to group "%s".'
% (action, self.name, group),
err=err, out=out, rc=rc)
return (rc, out, err)
def _modify_group(self):
'''Add or remove SELF.NAME to or from GROUP depending on ACTION.
ACTION can be 'add' or 'remove' otherwhise 'remove' is assumed. '''
rc = 0
out = ''
err = ''
changed = False
current = set(self._list_user_groups())
if self.groups is not None:
target = set(self.groups.split(','))
else:
target = set([])
for remove in current - target:
(_rc, _err, _out) = self.__modify_group(remove, 'delete')
rc += rc
out += _out
err += _err
changed = True
for add in target - current:
(_rc, _err, _out) = self.__modify_group(add, 'add')
rc += _rc
out += _out
err += _err
changed = True
return (rc, err, out, changed)
def _update_system_user(self):
'''Hide or show user on login window according SELF.SYSTEM.
Returns 0 if a change has been made, None otherwhise.'''
plist_file = '/Library/Preferences/com.apple.loginwindow.plist'
# http://support.apple.com/kb/HT5017?viewlocale=en_US
uid = int(self.uid)
cmd = [ 'defaults', 'read', plist_file, 'HiddenUsersList' ]
(rc, out, err) = self.execute_command(cmd)
# returned value is
# (
# "_userA",
# "_UserB",
# userc
# )
hidden_users = []
for x in out.splitlines()[1:-1]:
try:
x = x.split('"')[1]
except IndexError:
x = x.strip()
hidden_users.append(x)
if self.system:
if not self.name in hidden_users:
cmd = [ 'defaults', 'write', plist_file,
'HiddenUsersList', '-array-add', self.name ]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot user "%s" to hidden user list.'
% self.name, err=err, out=out, rc=rc)
return 0
else:
if self.name in hidden_users:
del(hidden_users[hidden_users.index(self.name)])
cmd = [ 'defaults', 'write', plist_file,
'HiddenUsersList', '-array' ] + hidden_users
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot remove user "%s" from hidden user list.'
% self.name, err=err, out=out, rc=rc)
return 0
def user_exists(self):
'''Check is SELF.NAME is a known user on the system.'''
cmd = self._get_dscl()
cmd += [ '-list', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
return rc == 0
def remove_user(self):
'''Delete SELF.NAME. If SELF.FORCE is true, remove its home directory.'''
info = self.user_info()
cmd = self._get_dscl()
cmd += [ '-delete', '/Users/%s' % self.name]
(rc, out, err) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot delete user "%s".'
% self.name, err=err, out=out, rc=rc)
if self.force:
if os.path.exists(info[5]):
shutil.rmtree(info[5])
out += "Removed %s" % info[5]
return (rc, out, err)
def create_user(self, command_name='dscl'):
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name]
(rc, err, out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot create user "%s".'
% self.name, err=err, out=out, rc=rc)
self._make_group_numerical()
# Homedir is not created by default
if self.createhome:
if self.home is None:
self.home = '/Users/%s' % self.name
if not os.path.exists(self.home):
os.makedirs(self.home)
self.chown_homedir(int(self.uid), int(self.group), self.home)
for field in self.fields:
if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name,
field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot add property "%s" to user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
out += _out
err += _err
if rc != 0:
return (rc, _err, _out)
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
self._update_system_user()
# here we don't care about change status since it is a creation,
# thus changed is always true.
(rc, _out, _err, changed) = self._modify_group()
out += _out
err += _err
return (rc, err, out)
def modify_user(self):
changed = None
out = ''
err = ''
self._make_group_numerical()
for field in self.fields:
if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]:
current = self._get_user_property(field[1])
if current is None or current != self.__dict__[field[0]]:
cmd = self._get_dscl()
cmd += [ '-create', '/Users/%s' % self.name,
field[1], self.__dict__[field[0]]]
(rc, _err, _out) = self.execute_command(cmd)
if rc != 0:
self.module.fail_json(
msg='Cannot update property "%s" for user "%s".'
% (field[0], self.name), err=err, out=out, rc=rc)
changed = rc
out += _out
err += _err
if self.update_password == 'always':
(rc, _err, _out) = self._change_user_password()
out += _out
err += _err
changed = rc
(rc, _out, _err, _changed) = self._modify_group()
out += _out
err += _err
if _changed is True:
changed = rc
rc = self._update_system_user()
if rc == 0:
changed = rc
return (changed, out, err)
# ===========================================
class AIX(User):
@ -1367,11 +1733,10 @@ class AIX(User):
# set password with chpasswd
if self.password is not None:
cmd = []
cmd.append('echo \''+self.name+':'+self.password+'\' |')
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
self.execute_command(' '.join(cmd), use_unsafe_shell=True)
self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password))
return (rc, out, err)
@ -1430,7 +1795,6 @@ class AIX(User):
cmd.append('-s')
cmd.append(self.shell)
# skip if no changes to be made
if len(cmd) == 1:
(rc, out, err) = (None, '', '')
@ -1443,11 +1807,10 @@ class AIX(User):
# set password with chpasswd
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd = []
cmd.append('echo \''+self.name+':'+self.password+'\' |')
cmd.append(self.module.get_bin_path('chpasswd', True))
cmd.append('-e')
cmd.append('-c')
(rc2, out2, err2) = self.execute_command(' '.join(cmd), use_unsafe_shell=True)
(rc2, out2, err2) = self.execute_command(' '.join(cmd), data="%s:%s" % (self.name, self.password))
else:
(rc2, out2, err2) = (None, '', '')
@ -1458,12 +1821,164 @@ class AIX(User):
# ===========================================
class HPUX(User):
"""
This is a HP-UX User manipulation class.
This overrides the following methods from the generic class:-
- create_user()
- remove_user()
- modify_user()
"""
platform = 'HP-UX'
distribution = None
SHADOWFILE = '/etc/shadow'
def create_user(self):
cmd = ['/usr/sam/lbin/useradd.sam']
if self.uid is not None:
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None and len(self.groups):
groups = self.get_groups_set()
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None:
cmd.append('-d')
cmd.append(self.home)
if self.shell is not None:
cmd.append('-s')
cmd.append(self.shell)
if self.password is not None:
cmd.append('-p')
cmd.append(self.password)
if self.createhome:
cmd.append('-m')
else:
cmd.append('-M')
if self.system:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def remove_user(self):
cmd = ['/usr/sam/lbin/userdel.sam']
if self.force:
cmd.append('-F')
if self.remove:
cmd.append('-r')
cmd.append(self.name)
return self.execute_command(cmd)
def modify_user(self):
cmd = ['/usr/sam/lbin/usermod.sam']
info = self.user_info()
has_append = self._check_usermod_append()
if self.uid is not None and info[2] != int(self.uid):
cmd.append('-u')
cmd.append(self.uid)
if self.non_unique:
cmd.append('-o')
if self.group is not None:
if not self.group_exists(self.group):
self.module.fail_json(msg="Group %s does not exist" % self.group)
ginfo = self.group_info(self.group)
if info[3] != ginfo[2]:
cmd.append('-g')
cmd.append(self.group)
if self.groups is not None:
current_groups = self.user_group_membership()
groups_need_mod = False
groups = []
if self.groups == '':
if current_groups and not self.append:
groups_need_mod = True
else:
groups = self.get_groups_set(remove_existing=False)
group_diff = set(current_groups).symmetric_difference(groups)
if group_diff:
if self.append:
for g in groups:
if g in group_diff:
if has_append:
cmd.append('-a')
groups_need_mod = True
break
else:
groups_need_mod = True
if groups_need_mod:
if self.append and not has_append:
cmd.append('-A')
cmd.append(','.join(group_diff))
else:
cmd.append('-G')
cmd.append(','.join(groups))
if self.comment is not None and info[4] != self.comment:
cmd.append('-c')
cmd.append(self.comment)
if self.home is not None and info[5] != self.home:
cmd.append('-d')
cmd.append(self.home)
if self.move_home:
cmd.append('-m')
if self.shell is not None and info[6] != self.shell:
cmd.append('-s')
cmd.append(self.shell)
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
cmd.append('-p')
cmd.append(self.password)
# skip if no changes to be made
if len(cmd) == 1:
return (None, '', '')
elif self.module.check_mode:
return (0, '', '')
cmd.append(self.name)
return self.execute_command(cmd)
# ===========================================
def main():
ssh_defaults = {
'bits': '2048',
'type': 'rsa',
'passphrase': None,
'comment': 'ansible-generated'
'comment': 'ansible-generated on %s' % socket.gethostname()
}
module = AnsibleModule(
argument_spec = dict(
@ -1494,7 +2009,8 @@ def main():
ssh_key_file=dict(default=None, type='str'),
ssh_key_comment=dict(default=ssh_defaults['comment'], type='str'),
ssh_key_passphrase=dict(default=None, type='str'),
update_password=dict(default='always',choices=['always','on_create'],type='str')
update_password=dict(default='always',choices=['always','on_create'],type='str'),
expires=dict(default=None, type='float'),
),
supports_check_mode=True
)
@ -1562,6 +2078,16 @@ def main():
if user.groups is not None:
result['groups'] = user.groups
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.createhome:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
# deal with ssh key
if user.sshkeygen:
(rc, out, err) = user.ssh_key_gen()
@ -1577,16 +2103,6 @@ def main():
result['ssh_key_file'] = user.get_ssh_key_path()
result['ssh_public_key'] = user.get_ssh_public_key()
# handle missing homedirs
info = user.user_info()
if user.home is None:
user.home = info[5]
if not os.path.exists(user.home) and user.createhome:
if not module.check_mode:
user.create_homedir(user.home)
user.chown_homedir(info[2], info[3], user.home)
result['changed'] = True
module.exit_json(**result)
# import module snippets

@ -54,7 +54,7 @@ version_added: "0.7"
options:
host:
description:
- hostname or IP address to wait for
- A resolvable hostname or IP address to wait for
required: false
default: "127.0.0.1"
aliases: []
@ -63,6 +63,11 @@ options:
- maximum number of seconds to wait for
required: false
default: 300
connect_timeout:
description:
- maximum number of seconds to wait for a connection to happen before closing and retrying
required: false
default: 5
delay:
description:
- number of seconds to wait before starting to poll
@ -123,8 +128,9 @@ EXAMPLES = '''
# wait until the process is finished and pid was destroyed
- wait_for: path=/proc/3466/status state=absent
# Wait 300 seconds for port 22 to become open and contain "OpenSSH", don't start checking for 10 seconds
- local_action: wait_for port=22 host="{{ inventory_hostname }}" search_regex=OpenSSH delay=10
# wait 300 seconds for port 22 to become open and contain "OpenSSH", don't assume the inventory_hostname is resolvable
# and don't start checking for 10 seconds
- local_action: wait_for port=22 host="{{ ansible_ssh_host | default(inventory_hostname) }}" search_regex=OpenSSH delay=10
'''

@ -49,9 +49,12 @@ import re
def _disable_module(module):
name = module.params['name']
a2dismod_binary = module.get_bin_path("a2dismod")
if a2dismod_binary is None:
module.fail_json(msg="a2dismod not found. Perhaps this system does not use a2dismod to manage apache")
result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name))
if re.match(r'.*' + name + r' already disabled.*', stdout, re.S):
if re.match(r'.*\b' + name + r' already disabled', stdout, re.S):
module.exit_json(changed = False, result = "Success")
elif result != 0:
module.fail_json(msg="Failed to disable module %s: %s" % (name, stdout))
@ -61,9 +64,12 @@ def _disable_module(module):
def _enable_module(module):
name = module.params['name']
a2enmod_binary = module.get_bin_path("a2enmod")
if a2enmod_binary is None:
module.fail_json(msg="a2enmod not found. Perhaps this system does not use a2enmod to manage apache")
result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name))
if re.match(r'.*' + name + r' already enabled.*', stdout, re.S):
if re.match(r'.*\b' + name + r' already enabled', stdout, re.S):
module.exit_json(changed = False, result = "Success")
elif result != 0:
module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout))
@ -86,4 +92,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

@ -1,84 +0,0 @@
#!powershell
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# WANT_JSON
# POWERSHELL_COMMON
$params = Parse-Args $args;
$src= Get-Attr $params "src" $FALSE;
If ($src -eq $FALSE)
{
Fail-Json (New-Object psobject) "missing required argument: src";
}
$dest= Get-Attr $params "dest" $FALSE;
If ($dest -eq $FALSE)
{
Fail-Json (New-Object psobject) "missing required argument: dest";
}
# seems to be supplied by the calling environment, but
# probably shouldn't be a test for it existing in the params.
# TODO investigate.
$original_basename = Get-Attr $params "original_basename" $FALSE;
If ($original_basename -eq $FALSE)
{
Fail-Json (New-Object psobject) "missing required argument: original_basename ";
}
$result = New-Object psobject @{
changed = $FALSE
};
# if $dest is a dir, append $original_basename so the file gets copied with its intended name.
if (Test-Path $dest -PathType Container)
{
$dest = Join-Path $dest $original_basename;
}
If (Test-Path $dest)
{
$dest_checksum = Get-FileChecksum ($dest);
$src_checksum = Get-FileChecksum ($src);
If (! $src_checksum.CompareTo($dest_checksum))
{
# New-Item -Force creates subdirs for recursive copies
New-Item -Force $dest -Type file;
Copy-Item -Path $src -Destination $dest -Force;
}
$dest_checksum = Get-FileChecksum ($dest);
If ( $src_checksum.CompareTo($dest_checksum))
{
$result.changed = $TRUE;
}
Else
{
Fail-Json (New-Object psobject) "Failed to place file";
}
}
Else
{
New-Item -Force $dest -Type file;
Copy-Item -Path $src -Destination $dest;
$result.changed = $TRUE;
}
$dest_checksum = Get-FileChecksum($dest);
$result.checksum = $dest_checksum;
Exit-Json $result;

@ -1,60 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import time
DOCUMENTATION = '''
---
module: win_copy
version_added: "1.8"
short_description: Copies files to remote locations on windows hosts.
description:
- The M(win_copy) module copies a file on the local box to remote windows locations.
options:
src:
description:
- Local path to a file to copy to the remote server; can be absolute or relative.
If path is a directory, it is copied recursively. In this case, if path ends
with "/", only inside contents of that directory are copied to destination.
Otherwise, if it does not end with "/", the directory itself with all contents
is copied. This behavior is similar to Rsync.
required: false
default: null
aliases: []
dest:
description:
- Remote absolute path where the file should be copied to. If src is a directory,
this must be a directory too. Use \\ for path separators.
required: true
default: null
author: Michael DeHaan
notes:
- The "win_copy" module recursively copy facility does not scale to lots (>hundreds) of files.
Instead, you may find it better to create files locally, perhaps using win_template, and
then use win_get_url to put them in the correct location.
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- win_copy: src=/srv/myfiles/foo.conf dest=c:\\TEMP\\foo.conf
'''

@ -1,105 +0,0 @@
#!powershell
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# WANT_JSON
# POWERSHELL_COMMON
$params = Parse-Args $args;
# path
$path = Get-Attr $params "path" $FALSE;
If ($path -eq $FALSE)
{
$path = Get-Attr $params "dest" $FALSE;
If ($path -eq $FALSE)
{
$path = Get-Attr $params "name" $FALSE;
If ($path -eq $FALSE)
{
Fail-Json (New-Object psobject) "missing required argument: path";
}
}
}
# JH Following advice from Chris Church, only allow the following states
# in the windows version for now:
# state - file, directory, touch, absent
# (originally was: state - file, link, directory, hard, touch, absent)
$state = Get-Attr $params "state" "file";
#$recurse = Get-Attr $params "recurse" "no";
# force - yes, no
# $force = Get-Attr $params "force" "no";
# result
$result = New-Object psobject @{
changed = $FALSE
};
If ( $state -eq "touch" )
{
If(Test-Path $path)
{
(Get-ChildItem $path).LastWriteTime = Get-Date
}
Else
{
echo $null > $file
}
$result.changed = $TRUE;
}
If (Test-Path $path)
{
$fileinfo = Get-Item $path;
If ( $state -eq "absent" )
{
Remove-Item -Recurse -Force $fileinfo;
$result.changed = $TRUE;
}
Else
{
# Only files have the .Directory attribute.
If ( $state -eq "directory" -and $fileinfo.Directory )
{
Fail-Json (New-Object psobject) "path is not a directory";
}
# Only files have the .Directory attribute.
If ( $state -eq "file" -and -not $fileinfo.Directory )
{
Fail-Json (New-Object psobject) "path is not a file";
}
}
}
Else
{
If ( $state -eq "directory" )
{
New-Item -ItemType directory -Path $path
$result.changed = $TRUE;
}
If ( $state -eq "file" )
{
Fail-Json (New-Object psobject) "path will not be created";
}
}
Exit-Json $result;

@ -1,73 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: win_file
version_added: "1.8"
short_description: Creates, touches or removes files or directories.
extends_documentation_fragment: files
description:
- Creates (empty) files, updates file modification stamps of existing files,
and can create or remove directories.
Unlike M(file), does not modify ownership, permissions or manipulate links.
notes:
- See also M(win_copy), M(win_template), M(copy), M(template), M(assemble)
requirements: [ ]
author: Michael DeHaan
options:
path:
description:
- 'path to the file being managed. Aliases: I(dest), I(name)'
required: true
default: []
aliases: ['dest', 'name']
state:
description:
- If C(directory), all immediate subdirectories will be created if they
do not exist.
If C(file), the file will NOT be created if it does not exist, see the M(copy)
or M(template) module if you want that behavior. If C(absent),
directories will be recursively deleted, and files will be removed.
If C(touch), an empty file will be created if the c(path) does not
exist, while an existing file or directory will receive updated file access and
modification times (similar to the way `touch` works from the command line).
required: false
default: file
choices: [ file, directory, touch, absent ]
'''
EXAMPLES = '''
# create a file
- win_file: path=C:\\temp\\foo.conf
# touch a file (creates if not present, updates modification time if present)
- win_file: path=C:\\temp\\foo.conf state=touch
# remove a file, if present
- win_file: path=C:\\temp\\foo.conf state=absent
# create directory structure
- win_file: path=C:\\temp\\folder\\subfolder state=directory
# remove directory structure
- win_file: path=C:\\temp state=absent
'''

@ -53,9 +53,11 @@ Else
If ($get_md5 -and $result.stat.exists -and -not $result.stat.isdir)
{
$hash = Get-FileChecksum($path);
$sp = new-object -TypeName System.Security.Cryptography.MD5CryptoServiceProvider;
$fp = [System.IO.File]::Open($path, [System.IO.Filemode]::Open, [System.IO.FileAccess]::Read);
$hash = [System.BitConverter]::ToString($sp.ComputeHash($fp)).Replace("-", "").ToLower();
$fp.Dispose();
Set-Attr $result.stat "md5" $hash;
Set-Attr $result.stat "checksum" $hash;
}
Exit-Json $result;

@ -1,52 +0,0 @@
# this is a virtual module that is entirely implemented server side
DOCUMENTATION = '''
---
module: win_template
version_added: 1.8
short_description: Templates a file out to a remote server.
description:
- Templates are processed by the Jinja2 templating language
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- "Six additional variables can be used in templates: C(ansible_managed)
(configurable via the C(defaults) section of C(ansible.cfg)) contains a string
which can be used to describe the template name, host, modification time of the
template file and the owner uid, C(template_host) contains the node name of
the template's machine, C(template_uid) the owner, C(template_path) the
absolute path of the template, C(template_fullpath) is the absolute path of the
template, and C(template_run_date) is the date that the template was rendered. Note that including
a string that uses a date in the template will result in the template being marked 'changed'
each time."
options:
src:
description:
- Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path.
required: true
default: null
aliases: []
dest:
description:
- Location to render the template to on the remote machine.
required: true
default: null
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
choices: [ "yes", "no" ]
default: "no"
notes:
- "templates are loaded with C(trim_blocks=True)."
requirements: []
author: Michael DeHaan
'''
EXAMPLES = '''
# Example
- win_template: src=/mytemplates/foo.j2 dest=C:\\temp\\file.conf
'''
Loading…
Cancel
Save