From 8aff264216b28c5b49e39d016f9b577238f63dbd Mon Sep 17 00:00:00 2001 From: Kamil Kisiel Date: Mon, 2 Dec 2013 23:50:33 -0800 Subject: [PATCH 01/59] Don't modify hgrc when running purge --- source_control/hg | 48 ++++++----------------------------------------- 1 file changed, 6 insertions(+), 42 deletions(-) diff --git a/source_control/hg b/source_control/hg index e6730ad6f59..fcaa73457ad 100644 --- a/source_control/hg +++ b/source_control/hg @@ -59,9 +59,7 @@ options: choices: [ "yes", "no" ] purge: description: - - Deletes untracked files. Runs C(hg purge). Note this requires C(purge) extension to - be enabled if C(purge=yes). This module will modify hgrc file on behalf of the user - and undo the changes before exiting the task. + - Deletes untracked files. Runs C(hg purge). required: false default: "no" choices: [ "yes", "no" ] @@ -85,36 +83,6 @@ EXAMPLES = ''' - hg: repo=https://bitbucket.org/user/repo1 dest=/home/user/repo1 revision=stable purge=yes ''' -def _set_hgrc(hgrc, vals): - parser = ConfigParser.SafeConfigParser() - parser.read(hgrc) - - # val is a list of triple-tuple of the form [(section, option, value),...] - for each in vals: - (section, option, value) = each - if not parser.has_section(section): - parser.add_section(section) - parser.set(section, option, value) - - f = open(hgrc, 'w') - parser.write(f) - f.close() - - -def _undo_hgrc(hgrc, vals): - parser = ConfigParser.SafeConfigParser() - parser.read(hgrc) - - for each in vals: - (section, option, value) = each - if parser.has_section(section): - parser.remove_option(section, option) - - f = open(hgrc, 'w') - parser.write(f) - f.close() - - class Hg(object): def __init__(self, module, dest, repo, revision, hg_path): @@ -129,7 +97,8 @@ class Hg(object): return (rc, out, err) def _list_untracked(self): - return self._command(['purge', '-R', self.dest, '--print']) + args = ['purge', '--config', 'extensions.purge=', '-R', self.dest, '--print'] + return self._command(args) def get_revision(self): """ @@ -168,10 +137,6 @@ class Hg(object): return True def purge(self): - hgrc = os.path.join(self.dest, '.hg/hgrc') - purge_option = [('extensions', 'purge', '')] - _set_hgrc(hgrc, purge_option) # enable purge extension - # before purge, find out if there are any untracked files (rc1, out1, err1) = self._list_untracked() if rc1 != 0: @@ -179,10 +144,9 @@ class Hg(object): # there are some untrackd files if out1 != '': - (rc2, out2, err2) = self._command(['purge', '-R', self.dest]) - if rc2 == 0: - _undo_hgrc(hgrc, purge_option) - else: + args = ['purge', '--config', 'extensions.purge=', '-R', self.dest] + (rc2, out2, err2) = self._command(args) + if rc2 != 0: self.module.fail_json(msg=err2) return True else: From 677f95294e309a474255702bb612326f4ee566db Mon Sep 17 00:00:00 2001 From: "Jasper N. Brouwer" Date: Wed, 4 Dec 2013 21:49:00 +0100 Subject: [PATCH 02/59] Fixed false positive on initctl as enable_cmd Also on update-rc.d In service module --- system/service | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/system/service b/system/service index aac7319d753..2490c6a30fd 100644 --- a/system/service +++ b/system/service @@ -431,10 +431,10 @@ class LinuxService(Service): if check_systemd(self.name): # service is managed by systemd self.enable_cmd = location['systemctl'] - elif os.path.exists("/etc/init/%s.conf" % self.name): + elif os.path.exists("/etc/init/%s.conf" % self.name) and location['initctl']: # service is managed by upstart self.enable_cmd = location['initctl'] - elif os.path.exists("/etc/init.d/%s" % self.name): + elif os.path.exists("/etc/init.d/%s" % self.name) and location['update-rc.d']: # service is managed by with SysV init scripts, but with update-rc.d self.enable_cmd = location['update-rc.d'] else: @@ -649,7 +649,7 @@ class LinuxService(Service): return if self.enable: - # make sure the init.d symlinks are created + # make sure the init.d symlinks are created # otherwise enable might not work (rc, out, err) = self.execute_command("%s %s defaults" \ % (self.enable_cmd, self.name)) From 8fe3b0526298c26af0f954e542cc9508b87cb5e9 Mon Sep 17 00:00:00 2001 From: drewlll2ll Date: Thu, 12 Dec 2013 09:44:29 -0500 Subject: [PATCH 03/59] Added prev_state for directory --- files/file | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/files/file b/files/file index 134b5284efd..8e2af964b9d 100644 --- a/files/file +++ b/files/file @@ -307,6 +307,10 @@ def main(): if not force: module.fail_json(dest=path, src=src, msg='Cannot link, file exists at destination') changed = True + elif prev_state == 'directory': + if not force: + module.fail_json(dest=path, src=src, msg='Cannot link, directory exists at destination') + changed = True else: module.fail_json(dest=path, src=src, msg='unexpected position reached') From 5a66a95bf3185dffa09e889d78bf7502564793ef Mon Sep 17 00:00:00 2001 From: Mike Grozak Date: Fri, 13 Dec 2013 17:10:50 +0100 Subject: [PATCH 04/59] Added to the file module the functionality to force conversion hard link and symlink when the force attribute is set to 'yes' --- files/file | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/file b/files/file index 134b5284efd..5e308e11263 100644 --- a/files/file +++ b/files/file @@ -245,7 +245,7 @@ def main(): module.exit_json(path=path, changed=True) if prev_state != 'absent' and prev_state != state: - if not (force and (prev_state == 'file' or prev_state == 'directory') and state == 'link') and state != 'touch': + if not (force and (prev_state == 'file' or prev_state == 'hard' or prev_state == 'directory') and state == 'link') and state != 'touch': module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, src)) if prev_state == 'absent' and state == 'absent': From 74f9f91abf7d214e9d3baf252da62e932d6beeb1 Mon Sep 17 00:00:00 2001 From: willthames Date: Tue, 17 Dec 2013 12:04:12 +1000 Subject: [PATCH 05/59] Move more responsibility to common EC2 module Moved `AWS_REGIONS` into `ec2` module Created `ec2_connect` method in `ec2` module Updated modules able to use `ec2_connect` and `AWS_REGIONS` --- cloud/ec2 | 28 +--------------------------- cloud/ec2_ami | 28 +--------------------------- cloud/ec2_eip | 44 +++----------------------------------------- cloud/ec2_elb | 9 --------- cloud/ec2_group | 22 ++-------------------- cloud/ec2_tag | 28 +--------------------------- cloud/ec2_vol | 28 +--------------------------- cloud/ec2_vpc | 9 --------- cloud/elasticache | 9 --------- cloud/rds | 30 ++---------------------------- 10 files changed, 11 insertions(+), 224 deletions(-) diff --git a/cloud/ec2 b/cloud/ec2 index 0e0b8aaf0fd..a565b0359a6 100644 --- a/cloud/ec2 +++ b/cloud/ec2 @@ -294,15 +294,6 @@ local_action: import sys import time -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - try: import boto.ec2 from boto.exception import EC2ResponseError @@ -653,24 +644,7 @@ def main(): ) ) - # def get_ec2_creds(module): - # return ec2_url, ec2_access_key, ec2_secret_key, region - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - # If we have a region specified, connect to its endpoint. - if region: - try: - ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - # If we specified an ec2_url then try connecting to it - elif ec2_url: - try: - ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - else: - module.fail_json(msg="Either region or ec2_url must be specified") + ec2 = ec2_connect(module) if module.params.get('state') == 'absent': instance_ids = module.params.get('instance_ids') diff --git a/cloud/ec2_ami b/cloud/ec2_ami index ea7e0ad86dc..f90f23db999 100644 --- a/cloud/ec2_ami +++ b/cloud/ec2_ami @@ -156,15 +156,6 @@ EXAMPLES = ''' import sys import time -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - try: import boto import boto.ec2 @@ -279,24 +270,7 @@ def main(): ) ) - # def get_ec2_creds(module): - # return ec2_url, ec2_access_key, ec2_secret_key, region - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - - # If we have a region specified, connect to its endpoint. - if region: - try: - ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - # If we specified an ec2_url then try connecting to it - elif ec2_url: - try: - ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - else: - module.fail_json(msg="Either region or ec2_url must be specified") + ec2 = ec2_connect(module) if module.params.get('state') == 'absent': if not module.params.get('image_id'): diff --git a/cloud/ec2_eip b/cloud/ec2_eip index 1c5db8cf4c1..4399c6bdf6a 100644 --- a/cloud/ec2_eip +++ b/cloud/ec2_eip @@ -102,38 +102,6 @@ else: boto_found = True -def connect(ec2_url, ec2_access_key, ec2_secret_key, region, module): - - """ Return an ec2 connection""" - # allow environment variables to be used if ansible vars aren't set - if not ec2_url and 'EC2_URL' in os.environ: - ec2_url = os.environ['EC2_URL'] - if not ec2_secret_key and 'EC2_SECRET_KEY' in os.environ: - ec2_secret_key = os.environ['EC2_SECRET_KEY'] - if not ec2_access_key and 'EC2_ACCESS_KEY' in os.environ: - ec2_access_key = os.environ['EC2_ACCESS_KEY'] - - # If we have a region specified, connect to its endpoint. - if region: - try: - ec2 = boto.ec2.connect_to_region(region, - aws_access_key_id=ec2_access_key, - aws_secret_access_key=ec2_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(" %s %s %s " % (region, ec2_access_key, - ec2_secret_key))) - # Otherwise, no region so we fallback to the old connection method - else: - try: - if ec2_url: # if we have an URL set, connect to the specified endpoint - ec2 = boto.connect_ec2_endpoint(ec2_url, ec2_access_key, ec2_secret_key) - else: # otherwise it's Amazon. - ec2 = boto.connect_ec2(ec2_access_key, ec2_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - return ec2 - - def associate_ip_and_instance(ec2, address, instance_id, module): if ip_is_associated_with_instance(ec2, address.public_ip, instance_id, module): module.exit_json(changed=False, public_ip=address.public_ip) @@ -248,8 +216,8 @@ def main(): state = dict(required=False, default='present', choices=['present', 'absent']), ec2_url = dict(required=False, aliases=['EC2_URL']), - ec2_secret_key = dict(required=False, aliases=['EC2_SECRET_KEY'], no_log=True), - ec2_access_key = dict(required=False, aliases=['EC2_ACCESS_KEY']), + ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True), + ec2_access_key = dict(aliases=['aws_access_key', 'access_key']), region = dict(required=False, aliases=['ec2_region']), in_vpc = dict(required=False, choices=BOOLEANS, default=False), ), @@ -259,13 +227,7 @@ def main(): if not boto_found: module.fail_json(msg="boto is required") - ec2_url, ec2_access_key, ec2_secret_key, region = get_ec2_creds(module) - - ec2 = connect(ec2_url, - ec2_access_key, - ec2_secret_key, - region, - module) + ec2 = ec2_connect(module) instance_id = module.params.get('instance_id') public_ip = module.params.get('public_ip') diff --git a/cloud/ec2_elb b/cloud/ec2_elb index 3132d9e9517..4488cf84608 100644 --- a/cloud/ec2_elb +++ b/cloud/ec2_elb @@ -102,15 +102,6 @@ import time import sys import os -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - try: import boto import boto.ec2 diff --git a/cloud/ec2_group b/cloud/ec2_group index c325c1ce301..bdba3f5b050 100644 --- a/cloud/ec2_group +++ b/cloud/ec2_group @@ -113,16 +113,12 @@ def main(): ec2_url=dict(aliases=['EC2_URL']), ec2_secret_key=dict(aliases=['EC2_SECRET_KEY', 'aws_secret_key'], no_log=True), ec2_access_key=dict(aliases=['EC2_ACCESS_KEY', 'aws_access_key']), - region=dict(choices=['eu-west-1', 'sa-east-1', 'us-east-1', 'ap-northeast-1', 'us-west-2', 'us-west-1', 'ap-southeast-1', 'ap-southeast-2']), + region=dict(choices=AWS_REGIONS), state = dict(default='present', choices=['present', 'absent']), ), supports_check_mode=True, ) - # def get_ec2_creds(module): - # return ec2_url, ec2_access_key, ec2_secret_key, region - ec2_url, ec2_access_key, ec2_secret_key, region = get_ec2_creds(module) - name = module.params['name'] description = module.params['description'] vpc_id = module.params['vpc_id'] @@ -131,21 +127,7 @@ def main(): changed = False - # If we have a region specified, connect to its endpoint. - if region: - try: - ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=ec2_access_key, aws_secret_access_key=ec2_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) - # Otherwise, no region so we fallback to the old connection method - else: - try: - if ec2_url: # if we have an URL set, connect to the specified endpoint - ec2 = boto.connect_ec2_endpoint(ec2_url, ec2_access_key, ec2_secret_key) - else: # otherwise it's Amazon. - ec2 = boto.connect_ec2(ec2_access_key, ec2_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg=str(e)) + ec2 = ec2_connect(module) # find the group if present group = None diff --git a/cloud/ec2_tag b/cloud/ec2_tag index 71e6792698d..9bcf8a5dabb 100644 --- a/cloud/ec2_tag +++ b/cloud/ec2_tag @@ -101,15 +101,6 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - def main(): module = AnsibleModule( argument_spec = dict( @@ -123,28 +114,11 @@ def main(): ) ) - # def get_ec2_creds(module): - # return ec2_url, ec2_access_key, ec2_secret_key, region - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - resource = module.params.get('resource') tags = module.params['tags'] state = module.params.get('state') - # If we have a region specified, connect to its endpoint. - if region: - try: - ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - # Otherwise, no region so we fallback to the old connection method - elif ec2_url: - try: - ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - else: - module.fail_json(msg="Either region or ec2_url must be specified") + ec2 = ec2_connect(module) # We need a comparison here so that we can accurately report back changed status. # Need to expand the gettags return format and compare with "tags" and then tag or detag as appropriate. diff --git a/cloud/ec2_vol b/cloud/ec2_vol index e5c7d1eab19..a60e0b71f81 100644 --- a/cloud/ec2_vol +++ b/cloud/ec2_vol @@ -127,15 +127,6 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - def main(): module = AnsibleModule( argument_spec = dict( @@ -151,30 +142,13 @@ def main(): ) ) - # def get_ec2_creds(module): - # return ec2_url, ec2_access_key, ec2_secret_key, region - ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) - instance = module.params.get('instance') volume_size = module.params.get('volume_size') iops = module.params.get('iops') device_name = module.params.get('device_name') zone = module.params.get('zone') - # If we have a region specified, connect to its endpoint. - if region: - try: - ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - # Otherwise, no region so we fallback to the old connection method - elif ec2_url: - try: - ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key, aws_secret_key) - except boto.exception.NoAuthHandlerFound, e: - module.fail_json(msg = str(e)) - else: - module.fail_json(msg="Either region or ec2_url must be specified") + ec2 = ec2_connect(module) # Here we need to get the zone info for the instance. This covers situation where # instance is specified but zone isn't. diff --git a/cloud/ec2_vpc b/cloud/ec2_vpc index 663a574f956..53b60c9dfcd 100644 --- a/cloud/ec2_vpc +++ b/cloud/ec2_vpc @@ -164,15 +164,6 @@ except ImportError: print "failed=True msg='boto required for this module'" sys.exit(1) -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - def get_vpc_info(vpc): """ Retrieves vpc information from an instance diff --git a/cloud/elasticache b/cloud/elasticache index 9b40107d981..de2c8636564 100644 --- a/cloud/elasticache +++ b/cloud/elasticache @@ -137,15 +137,6 @@ import sys import os import time -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - try: import boto from boto.elasticache.layer1 import ElastiCacheConnection diff --git a/cloud/rds b/cloud/rds index 56e32ad2087..4b3af90e4e4 100644 --- a/cloud/rds +++ b/cloud/rds @@ -262,15 +262,6 @@ EXAMPLES = ''' import sys import time -AWS_REGIONS = ['ap-northeast-1', - 'ap-southeast-1', - 'ap-southeast-2', - 'eu-west-1', - 'sa-east-1', - 'us-east-1', - 'us-west-1', - 'us-west-2'] - try: import boto.rds except ImportError: @@ -346,25 +337,7 @@ def main(): apply_immediately = module.params.get('apply_immediately') new_instance_name = module.params.get('new_instance_name') - # allow environment variables to be used if ansible vars aren't set - if not region: - if 'AWS_REGION' in os.environ: - region = os.environ['AWS_REGION'] - elif 'EC2_REGION' in os.environ: - region = os.environ['EC2_REGION'] - - if not aws_secret_key: - if 'AWS_SECRET_KEY' in os.environ: - aws_secret_key = os.environ['AWS_SECRET_KEY'] - elif 'EC2_SECRET_KEY' in os.environ: - aws_secret_key = os.environ['EC2_SECRET_KEY'] - - if not aws_access_key: - if 'AWS_ACCESS_KEY' in os.environ: - aws_access_key = os.environ['AWS_ACCESS_KEY'] - elif 'EC2_ACCESS_KEY' in os.environ: - aws_access_key = os.environ['EC2_ACCESS_KEY'] - + ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module) if not region: module.fail_json(msg = str("region not specified and unable to determine region from EC2_REGION.")) @@ -577,5 +550,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * main() From ded9d626e0aea7fe054358cdcc72579a2ad4eb30 Mon Sep 17 00:00:00 2001 From: "Brad P. Crochet" Date: Tue, 17 Dec 2013 13:24:20 -0500 Subject: [PATCH 06/59] Add support for neutronclient The quantum_* modules will now try neutronclient first, and fall back to quantumclient. If that fails, error out. The code now references neutron instead of quantum in all internal cases. --- cloud/quantum_floating_ip | 69 +++++++++++++------------- cloud/quantum_floating_ip_associate | 56 +++++++++++---------- cloud/quantum_network | 73 ++++++++++++++------------- cloud/quantum_router | 55 +++++++++++---------- cloud/quantum_router_gateway | 73 ++++++++++++++------------- cloud/quantum_router_interface | 73 ++++++++++++++------------- cloud/quantum_subnet | 77 +++++++++++++++-------------- 7 files changed, 249 insertions(+), 227 deletions(-) diff --git a/cloud/quantum_floating_ip b/cloud/quantum_floating_ip index 1d755e67c27..54e1c68d79d 100644 --- a/cloud/quantum_floating_ip +++ b/cloud/quantum_floating_ip @@ -18,11 +18,14 @@ try: from novaclient.v1_1 import client as nova_client - from quantumclient.quantum import client + try: + from neutronclient.neutron import client + except ImportError: + from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient import time except ImportError: - print("failed=True msg='glanceclient,keystoneclient and quantumclient client are required'") + print("failed=True msg='novaclient,keystoneclient and quantumclient (or neutronclient) are required'") DOCUMENTATION = ''' --- @@ -72,7 +75,7 @@ options: - The name of the instance to which the IP address should be assigned required: true default: None -requirements: ["novaclient", "quantumclient", "keystoneclient"] +requirements: ["novaclient", "quantumclient", "neutronclient", "keystoneclient"] ''' EXAMPLES = ''' @@ -99,10 +102,10 @@ def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception as e: - module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message) + module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint -def _get_quantum_client(module, kwargs): +def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) @@ -111,10 +114,10 @@ def _get_quantum_client(module, kwargs): 'endpoint_url': endpoint } try: - quantum = client.Client('2.0', **kwargs) + neutron = client.Client('2.0', **kwargs) except Exception as e: - module.fail_json(msg = "Error in connecting to quantum: %s " % e.message) - return quantum + module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) + return neutron def _get_server_state(module, nova): server_info = None @@ -130,68 +133,68 @@ def _get_server_state(module, nova): break except Exception as e: module.fail_json(msg = "Error in getting the server list: %s" % e.message) - return server_info, server - -def _get_port_info(quantum, module, instance_id): + return server_info, server + +def _get_port_info(neutron, module, instance_id): kwargs = { 'device_id': instance_id, } try: - ports = quantum.list_ports(**kwargs) + ports = neutron.list_ports(**kwargs) except Exception as e: module.fail_json( msg = "Error in listing ports: %s" % e.message) if not ports['ports']: return None, None return ports['ports'][0]['fixed_ips'][0]['ip_address'], ports['ports'][0]['id'] - -def _get_floating_ip(module, quantum, fixed_ip_address): + +def _get_floating_ip(module, neutron, fixed_ip_address): kwargs = { 'fixed_ip_address': fixed_ip_address } try: - ips = quantum.list_floatingips(**kwargs) + ips = neutron.list_floatingips(**kwargs) except Exception as e: module.fail_json(msg = "error in fetching the floatingips's %s" % e.message) if not ips['floatingips']: return None, None return ips['floatingips'][0]['id'], ips['floatingips'][0]['floating_ip_address'] -def _create_floating_ip(quantum, module, port_id, net_id): +def _create_floating_ip(neutron, module, port_id, net_id): kwargs = { 'port_id': port_id, 'floating_network_id': net_id } try: - result = quantum.create_floatingip({'floatingip': kwargs}) + result = neutron.create_floatingip({'floatingip': kwargs}) except Exception as e: module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message) module.exit_json(changed=True, result=result, public_ip=result['floatingip']['floating_ip_address']) -def _get_net_id(quantum, module): +def _get_net_id(neutron, module): kwargs = { 'name': module.params['network_name'], } try: - networks = quantum.list_networks(**kwargs) + networks = neutron.list_networks(**kwargs) except Exception as e: - module.fail_json("Error in listing quantum networks: %s" % e.message) + module.fail_json("Error in listing neutron networks: %s" % e.message) if not networks['networks']: return None return networks['networks'][0]['id'] -def _update_floating_ip(quantum, module, port_id, floating_ip_id): +def _update_floating_ip(neutron, module, port_id, floating_ip_id): kwargs = { 'port_id': port_id } try: - result = quantum.update_floatingip(floating_ip_id, {'floatingip': kwargs}) + result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs}) except Exception as e: module.fail_json(msg="There was an error in updating the floating ip address: %s" % e.message) module.exit_json(changed=True, result=result) def main(): - + module = AnsibleModule( argument_spec = dict( login_username = dict(default='admin'), @@ -200,39 +203,39 @@ def main(): auth_url = dict(default='http://127.0.0.1:35357/v2.0/'), region_name = dict(default=None), network_name = dict(required=True), - instance_name = dict(required=True), + instance_name = dict(required=True), state = dict(default='present', choices=['absent', 'present']) ), ) - + try: - nova = nova_client.Client(module.params['login_username'], module.params['login_password'], + nova = nova_client.Client(module.params['login_username'], module.params['login_password'], module.params['login_tenant_name'], module.params['auth_url'], service_type='compute') - quantum = _get_quantum_client(module, module.params) + neutron = _get_neutron_client(module, module.params) except Exception as e: module.fail_json(msg="Error in authenticating to nova: %s" % e.message) - + server_info, server_obj = _get_server_state(module, nova) if not server_info: module.fail_json(msg="The instance name provided cannot be found") - fixed_ip, port_id = _get_port_info(quantum, module, server_info['id']) + fixed_ip, port_id = _get_port_info(neutron, module, server_info['id']) if not port_id: module.fail_json(msg="Cannot find a port for this instance, maybe fixed ip is not assigned") - floating_id, floating_ip = _get_floating_ip(module, quantum, fixed_ip) + floating_id, floating_ip = _get_floating_ip(module, neutron, fixed_ip) if module.params['state'] == 'present': if floating_ip: module.exit_json(changed = False, public_ip=floating_ip) - net_id = _get_net_id(quantum, module) + net_id = _get_net_id(neutron, module) if not net_id: module.fail_json(msg = "cannot find the network specified, please check") - _create_floating_ip(quantum, module, port_id, net_id) + _create_floating_ip(neutron, module, port_id, net_id) if module.params['state'] == 'absent': if floating_ip: - _update_floating_ip(quantum, module, None, floating_id) + _update_floating_ip(neutron, module, None, floating_id) module.exit_json(changed=False) # this is magic, see lib/ansible/module.params['common.py diff --git a/cloud/quantum_floating_ip_associate b/cloud/quantum_floating_ip_associate index 9b720ea232c..e878fe5086a 100644 --- a/cloud/quantum_floating_ip_associate +++ b/cloud/quantum_floating_ip_associate @@ -18,11 +18,14 @@ try: from novaclient.v1_1 import client as nova_client - from quantumclient.quantum import client + try: + from neutronclient.neutron import client + except ImportError: + from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient import time except ImportError: - print "failed=True msg='glanceclient,novaclient and keystone client are required'" + print "failed=True msg='novaclient, keystone, and quantumclient (or neutronclient) client are required'" DOCUMENTATION = ''' --- @@ -72,7 +75,7 @@ options: - floating ip that should be assigned to the instance required: true default: None -requirements: ["quantumclient", "keystoneclient"] +requirements: ["quantumclient", "neutronclient", "keystoneclient"] ''' EXAMPLES = ''' @@ -103,10 +106,10 @@ def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception as e: - module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message) + module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint -def _get_quantum_client(module, kwargs): +def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) @@ -115,10 +118,10 @@ def _get_quantum_client(module, kwargs): 'endpoint_url': endpoint } try: - quantum = client.Client('2.0', **kwargs) + neutron = client.Client('2.0', **kwargs) except Exception as e: - module.fail_json(msg = "Error in connecting to quantum: %s " % e.message) - return quantum + module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) + return neutron def _get_server_state(module, nova): server_info = None @@ -134,24 +137,24 @@ def _get_server_state(module, nova): break except Exception as e: module.fail_json(msg = "Error in getting the server list: %s" % e.message) - return server_info, server - -def _get_port_id(quantum, module, instance_id): + return server_info, server + +def _get_port_id(neutron, module, instance_id): kwargs = dict(device_id = instance_id) try: - ports = quantum.list_ports(**kwargs) + ports = neutron.list_ports(**kwargs) except Exception as e: module.fail_json( msg = "Error in listing ports: %s" % e.message) if not ports['ports']: return None return ports['ports'][0]['id'] - -def _get_floating_ip_id(module, quantum): + +def _get_floating_ip_id(module, neutron): kwargs = { 'floating_ip_address': module.params['ip_address'] } try: - ips = quantum.list_floatingips(**kwargs) + ips = neutron.list_floatingips(**kwargs) except Exception as e: module.fail_json(msg = "error in fetching the floatingips's %s" % e.message) if not ips['floatingips']: @@ -163,18 +166,18 @@ def _get_floating_ip_id(module, quantum): state = "attached" return state, ip -def _update_floating_ip(quantum, module, port_id, floating_ip_id): +def _update_floating_ip(neutron, module, port_id, floating_ip_id): kwargs = { 'port_id': port_id } try: - result = quantum.update_floatingip(floating_ip_id, {'floatingip': kwargs}) + result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs}) except Exception as e: module.fail_json(msg = "There was an error in updating the floating ip address: %s" % e.message) module.exit_json(changed = True, result = result, public_ip=module.params['ip_address']) def main(): - + module = AnsibleModule( argument_spec = dict( login_username = dict(default='admin'), @@ -183,33 +186,34 @@ def main(): auth_url = dict(default='http://127.0.0.1:35357/v2.0/'), region_name = dict(default=None), ip_address = dict(required=True), - instance_name = dict(required=True), + instance_name = dict(required=True), state = dict(default='present', choices=['absent', 'present']) ), ) - + try: - nova = nova_client.Client(module.params['login_username'], module.params['login_password'], module.params['login_tenant_name'], module.params['auth_url'], service_type='compute') + nova = nova_client.Client(module.params['login_username'], module.params['login_password'], + module.params['login_tenant_name'], module.params['auth_url'], service_type='compute') except Exception as e: module.fail_json( msg = " Error in authenticating to nova: %s" % e.message) - quantum = _get_quantum_client(module, module.params) - state, floating_ip_id = _get_floating_ip_id(module, quantum) + neutron = _get_neutron_client(module, module.params) + state, floating_ip_id = _get_floating_ip_id(module, neutron) if module.params['state'] == 'present': if state == 'attached': module.exit_json(changed = False, result = 'attached', public_ip=module.params['ip_address']) server_info, server_obj = _get_server_state(module, nova) if not server_info: module.fail_json(msg = " The instance name provided cannot be found") - port_id = _get_port_id(quantum, module, server_info['id']) + port_id = _get_port_id(neutron, module, server_info['id']) if not port_id: module.fail_json(msg = "Cannot find a port for this instance, maybe fixed ip is not assigned") - _update_floating_ip(quantum, module, port_id, floating_ip_id) + _update_floating_ip(neutron, module, port_id, floating_ip_id) if module.params['state'] == 'absent': if state == 'detached': module.exit_json(changed = False, result = 'detached') if state == 'attached': - _update_floating_ip(quantum, module, None, floating_ip_id) + _update_floating_ip(neutron, module, None, floating_ip_id) module.exit_json(changed = True, result = "detached") # this is magic, see lib/ansible/module.params['common.py diff --git a/cloud/quantum_network b/cloud/quantum_network index 4eb416f7520..be6951c0b72 100644 --- a/cloud/quantum_network +++ b/cloud/quantum_network @@ -17,10 +17,13 @@ # along with this software. If not, see . try: - from quantumclient.quantum import client + try: + from neutronclient.neutron import client + except ImportError: + from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient except ImportError: - print("failed=True msg='quantumclient and keystone client are required'") + print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") DOCUMENTATION = ''' --- @@ -67,7 +70,7 @@ options: default: present name: description: - - Name to be assigned to the nework + - Name to be assigned to the nework required: true default: None provider_network_type: @@ -100,7 +103,7 @@ options: - Whether the state should be marked as up or down required: false default: true -requirements: ["quantumclient", "keystoneclient"] +requirements: ["quantumclient", "neutronclient", "keystoneclient"] ''' @@ -125,21 +128,21 @@ def _get_ksclient(module, kwargs): password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) - except Exception as e: + except Exception as e: module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) - global _os_keystone + global _os_keystone _os_keystone = kclient - return kclient - + return kclient + def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception as e: - module.fail_json(msg = "Error getting endpoint for Quantum: %s " %e.message) + module.fail_json(msg = "Error getting network endpoint: %s " %e.message) return endpoint -def _get_quantum_client(module, kwargs): +def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) @@ -148,10 +151,10 @@ def _get_quantum_client(module, kwargs): 'endpoint_url': endpoint } try: - quantum = client.Client('2.0', **kwargs) + neutron = client.Client('2.0', **kwargs) except Exception as e: - module.fail_json(msg = " Error in connecting to quantum: %s " %e.message) - return quantum + module.fail_json(msg = " Error in connecting to neutron: %s " %e.message) + return neutron def _set_tenant_id(module): global _os_tenant_id @@ -159,7 +162,7 @@ def _set_tenant_id(module): tenant_name = module.params['login_tenant_name'] else: tenant_name = module.params['tenant_name'] - + for tenant in _os_keystone.tenants.list(): if tenant.name == tenant_name: _os_tenant_id = tenant.id @@ -168,22 +171,22 @@ def _set_tenant_id(module): module.fail_json(msg = "The tenant id cannot be found, please check the paramters") -def _get_net_id(quantum, module): +def _get_net_id(neutron, module): kwargs = { 'tenant_id': _os_tenant_id, 'name': module.params['name'], } try: - networks = quantum.list_networks(**kwargs) + networks = neutron.list_networks(**kwargs) except Exception as e: - module.fail_json(msg = "Error in listing quantum networks: %s" % e.message) - if not networks['networks']: + module.fail_json(msg = "Error in listing neutron networks: %s" % e.message) + if not networks['networks']: return None return networks['networks'][0]['id'] -def _create_network(module, quantum): +def _create_network(module, neutron): - quantum.format = 'json' + neutron.format = 'json' network = { 'name': module.params.get('name'), @@ -212,21 +215,21 @@ def _create_network(module, quantum): network.pop('provider:segmentation_id', None) try: - net = quantum.create_network({'network':network}) + net = neutron.create_network({'network':network}) except Exception as e: module.fail_json(msg = "Error in creating network: %s" % e.message) return net['network']['id'] - -def _delete_network(module, net_id, quantum): + +def _delete_network(module, net_id, neutron): try: - id = quantum.delete_network(net_id) - except Exception as e: + id = neutron.delete_network(net_id) + except Exception as e: module.fail_json(msg = "Error in deleting the network: %s" % e.message) return True def main(): - + module = AnsibleModule( argument_spec = dict( login_username = dict(default='admin'), @@ -237,8 +240,8 @@ def main(): name = dict(required=True), tenant_name = dict(default=None), provider_network_type = dict(default=None, choices=['local', 'vlan', 'flat', 'gre']), - provider_physical_network = dict(default=None), - provider_segmentation_id = dict(default=None), + provider_physical_network = dict(default=None), + provider_segmentation_id = dict(default=None), router_external = dict(default=False, type='bool'), shared = dict(default=False, type='bool'), admin_state_up = dict(default=True, type='bool'), @@ -254,24 +257,24 @@ def main(): if not module.params['provider_segmentation_id']: module.fail_json(msg = " for vlan & gre networks, variable provider_segmentation_id should be set.") - quantum = _get_quantum_client(module, module.params) + neutron = _get_neutron_client(module, module.params) - _set_tenant_id(module) + _set_tenant_id(module) - if module.params['state'] == 'present': - network_id = _get_net_id(quantum, module) + if module.params['state'] == 'present': + network_id = _get_net_id(neutron, module) if not network_id: - network_id = _create_network(module, quantum) + network_id = _create_network(module, neutron) module.exit_json(changed = True, result = "Created", id = network_id) else: module.exit_json(changed = False, result = "Success", id = network_id) if module.params['state'] == 'absent': - network_id = _get_net_id(quantum, module) + network_id = _get_net_id(neutron, module) if not network_id: module.exit_json(changed = False, result = "Success") else: - _delete_network(module, network_id, quantum) + _delete_network(module, network_id, neutron) module.exit_json(changed = True, result = "Deleted") # this is magic, see lib/ansible/module.params['common.py diff --git a/cloud/quantum_router b/cloud/quantum_router index 26387de205f..9d31da2f19a 100644 --- a/cloud/quantum_router +++ b/cloud/quantum_router @@ -17,10 +17,13 @@ # along with this software. If not, see . try: - from quantumclient.quantum import client + try: + from neutronclient.neutron import client + except ImportError: + from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient except ImportError: - print("failed=True msg='quantumclient and keystone client are required'") + print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") DOCUMENTATION = ''' --- @@ -75,7 +78,7 @@ options: - desired admin state of the created router . required: false default: true -requirements: ["quantumclient", "keystoneclient"] +requirements: ["quantumclient", "neutronclient", "keystoneclient"] ''' EXAMPLES = ''' @@ -96,21 +99,21 @@ def _get_ksclient(module, kwargs): password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) - except Exception as e: + except Exception as e: module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone + global _os_keystone _os_keystone = kclient - return kclient - + return kclient + def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception as e: - module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message) + module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint -def _get_quantum_client(module, kwargs): +def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) @@ -119,10 +122,10 @@ def _get_quantum_client(module, kwargs): 'endpoint_url': endpoint } try: - quantum = client.Client('2.0', **kwargs) + neutron = client.Client('2.0', **kwargs) except Exception as e: - module.fail_json(msg = "Error in connecting to quantum: %s " % e.message) - return quantum + module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) + return neutron def _set_tenant_id(module): global _os_tenant_id @@ -139,38 +142,38 @@ def _set_tenant_id(module): module.fail_json(msg = "The tenant id cannot be found, please check the paramters") -def _get_router_id(module, quantum): +def _get_router_id(module, neutron): kwargs = { 'name': module.params['name'], 'tenant_id': _os_tenant_id, } try: - routers = quantum.list_routers(**kwargs) + routers = neutron.list_routers(**kwargs) except Exception as e: module.fail_json(msg = "Error in getting the router list: %s " % e.message) if not routers['routers']: return None return routers['routers'][0]['id'] -def _create_router(module, quantum): +def _create_router(module, neutron): router = { 'name': module.params['name'], 'tenant_id': _os_tenant_id, 'admin_state_up': module.params['admin_state_up'], } try: - new_router = quantum.create_router(dict(router=router)) + new_router = neutron.create_router(dict(router=router)) except Exception as e: module.fail_json( msg = "Error in creating router: %s" % e.message) return new_router['router']['id'] -def _delete_router(module, quantum, router_id): +def _delete_router(module, neutron, router_id): try: - quantum.delete_router(router_id) + neutron.delete_router(router_id) except: module.fail_json("Error in deleting the router") return True - + def main(): module = AnsibleModule( argument_spec = dict( @@ -185,26 +188,26 @@ def main(): admin_state_up = dict(type='bool', default=True), ), ) - - quantum = _get_quantum_client(module, module.params) + + neutron = _get_neutron_client(module, module.params) _set_tenant_id(module) if module.params['state'] == 'present': - router_id = _get_router_id(module, quantum) + router_id = _get_router_id(module, neutron) if not router_id: - router_id = _create_router(module, quantum) + router_id = _create_router(module, neutron) module.exit_json(changed=True, result="Created", id=router_id) else: module.exit_json(changed=False, result="success" , id=router_id) else: - router_id = _get_router_id(module, quantum) + router_id = _get_router_id(module, neutron) if not router_id: module.exit_json(changed=False, result="success") else: - _delete_router(module, quantum, router_id) + _delete_router(module, neutron, router_id) module.exit_json(changed=True, result="deleted") - + # this is magic, see lib/ansible/module.params['common.py from ansible.module_utils.basic import * main() diff --git a/cloud/quantum_router_gateway b/cloud/quantum_router_gateway index 60d500e6f61..68372e785d3 100644 --- a/cloud/quantum_router_gateway +++ b/cloud/quantum_router_gateway @@ -17,10 +17,13 @@ # along with this software. If not, see . try: - from quantumclient.quantum import client + try: + from neutronclient.neutron import client + except ImportError: + from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient except ImportError: - print("failed=True msg='quantumclient and keystone client are required'") + print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") DOCUMENTATION = ''' --- module: quantum_router_gateway @@ -69,7 +72,7 @@ options: - Name of the external network which should be attached to the router. required: true default: None -requirements: ["quantumclient", "keystoneclient"] +requirements: ["quantumclient", "neutronclient", "keystoneclient"] ''' EXAMPLES = ''' @@ -86,21 +89,21 @@ def _get_ksclient(module, kwargs): password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) - except Exception as e: + except Exception as e: module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone + global _os_keystone _os_keystone = kclient - return kclient - + return kclient + def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception as e: - module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message) + module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint -def _get_quantum_client(module, kwargs): +def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) @@ -109,68 +112,68 @@ def _get_quantum_client(module, kwargs): 'endpoint_url': endpoint } try: - quantum = client.Client('2.0', **kwargs) + neutron = client.Client('2.0', **kwargs) except Exception as e: - module.fail_json(msg = "Error in connecting to quantum: %s " % e.message) - return quantum + module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) + return neutron -def _get_router_id(module, quantum): +def _get_router_id(module, neutron): kwargs = { 'name': module.params['router_name'], } try: - routers = quantum.list_routers(**kwargs) + routers = neutron.list_routers(**kwargs) except Exception as e: module.fail_json(msg = "Error in getting the router list: %s " % e.message) if not routers['routers']: return None return routers['routers'][0]['id'] -def _get_net_id(quantum, module): +def _get_net_id(neutron, module): kwargs = { 'name': module.params['network_name'], 'router:external': True } try: - networks = quantum.list_networks(**kwargs) + networks = neutron.list_networks(**kwargs) except Exception as e: - module.fail_json("Error in listing quantum networks: %s" % e.message) + module.fail_json("Error in listing neutron networks: %s" % e.message) if not networks['networks']: return None return networks['networks'][0]['id'] -def _get_port_id(quantum, module, router_id, network_id): +def _get_port_id(neutron, module, router_id, network_id): kwargs = { 'device_id': router_id, 'network_id': network_id, } try: - ports = quantum.list_ports(**kwargs) + ports = neutron.list_ports(**kwargs) except Exception as e: module.fail_json( msg = "Error in listing ports: %s" % e.message) if not ports['ports']: return None return ports['ports'][0]['id'] -def _add_gateway_router(quantum, module, router_id, network_id): +def _add_gateway_router(neutron, module, router_id, network_id): kwargs = { 'network_id': network_id } try: - quantum.add_gateway_router(router_id, kwargs) + neutron.add_gateway_router(router_id, kwargs) except Exception as e: module.fail_json(msg = "Error in adding gateway to router: %s" % e.message) return True - -def _remove_gateway_router(quantum, module, router_id): + +def _remove_gateway_router(neutron, module, router_id): try: - quantum.remove_gateway_router(router_id) + neutron.remove_gateway_router(router_id) except Exception as e: module.fail_json(msg = "Error in removing gateway to router: %s" % e.message) return True - + def main(): - + module = AnsibleModule( argument_spec = dict( login_username = dict(default='admin'), @@ -183,29 +186,29 @@ def main(): state = dict(default='present', choices=['absent', 'present']), ), ) - - quantum = _get_quantum_client(module, module.params) - router_id = _get_router_id(module, quantum) + + neutron = _get_neutron_client(module, module.params) + router_id = _get_router_id(module, neutron) if not router_id: module.fail_json(msg="failed to get the router id, please check the router name") - network_id = _get_net_id(quantum, module) + network_id = _get_net_id(neutron, module) if not network_id: module.fail_json(msg="failed to get the network id, please check the network name and make sure it is external") - + if module.params['state'] == 'present': - port_id = _get_port_id(quantum, module, router_id, network_id) + port_id = _get_port_id(neutron, module, router_id, network_id) if not port_id: - _add_gateway_router(quantum, module, router_id, network_id) + _add_gateway_router(neutron, module, router_id, network_id) module.exit_json(changed=True, result="created") module.exit_json(changed=False, result="success") if module.params['state'] == 'absent': - port_id = _get_port_id(quantum, module, router_id, network_id) + port_id = _get_port_id(neutron, module, router_id, network_id) if not port_id: module.exit_json(changed=False, result="Success") - _remove_gateway_router(quantum, module, router_id) + _remove_gateway_router(neutron, module, router_id) module.exit_json(changed=True, result="Deleted") # this is magic, see lib/ansible/module.params['common.py diff --git a/cloud/quantum_router_interface b/cloud/quantum_router_interface index f34aecacf58..05f1f303a8f 100644 --- a/cloud/quantum_router_interface +++ b/cloud/quantum_router_interface @@ -17,10 +17,13 @@ # along with this software. If not, see . try: - from quantumclient.quantum import client + try: + from neutronclient.neutron import client + except ImportError: + from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient except ImportError: - print("failed=True msg='quantumclient and keystone client are required'") + print("failed=True msg='quantumclient (or neutronclient) and keystone client are required'") DOCUMENTATION = ''' --- module: quantum_router_interface @@ -81,7 +84,7 @@ EXAMPLES = ''' # Attach tenant1's subnet to the external router - quantum_router_interface: state=present login_username=admin login_password=admin - login_tenant_name=admin + login_tenant_name=admin tenant_name=tenant1 router_name=external_route subnet_name=t1subnet @@ -97,21 +100,21 @@ def _get_ksclient(module, kwargs): password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) - except Exception as e: + except Exception as e: module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message) - global _os_keystone + global _os_keystone _os_keystone = kclient - return kclient - + return kclient + def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception as e: - module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message) + module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint -def _get_quantum_client(module, kwargs): +def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) @@ -120,10 +123,10 @@ def _get_quantum_client(module, kwargs): 'endpoint_url': endpoint } try: - quantum = client.Client('2.0', **kwargs) + neutron = client.Client('2.0', **kwargs) except Exception as e: - module.fail_json(msg = "Error in connecting to quantum: %s " % e.message) - return quantum + module.fail_json(msg = "Error in connecting to neutron: %s " % e.message) + return neutron def _set_tenant_id(module): global _os_tenant_id @@ -140,12 +143,12 @@ def _set_tenant_id(module): module.fail_json(msg = "The tenant id cannot be found, please check the paramters") -def _get_router_id(module, quantum): +def _get_router_id(module, neutron): kwargs = { 'name': module.params['router_name'], } try: - routers = quantum.list_routers(**kwargs) + routers = neutron.list_routers(**kwargs) except Exception as e: module.fail_json(msg = "Error in getting the router list: %s " % e.message) if not routers['routers']: @@ -153,27 +156,27 @@ def _get_router_id(module, quantum): return routers['routers'][0]['id'] -def _get_subnet_id(module, quantum): +def _get_subnet_id(module, neutron): subnet_id = None kwargs = { 'tenant_id': _os_tenant_id, 'name': module.params['subnet_name'], } try: - subnets = quantum.list_subnets(**kwargs) + subnets = neutron.list_subnets(**kwargs) except Exception as e: module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) if not subnets['subnets']: return None return subnets['subnets'][0]['id'] - -def _get_port_id(quantum, module, router_id, subnet_id): + +def _get_port_id(neutron, module, router_id, subnet_id): kwargs = { 'tenant_id': _os_tenant_id, 'device_id': router_id, } try: - ports = quantum.list_ports(**kwargs) + ports = neutron.list_ports(**kwargs) except Exception as e: module.fail_json( msg = "Error in listing ports: %s" % e.message) if not ports['ports']: @@ -184,26 +187,26 @@ def _get_port_id(quantum, module, router_id, subnet_id): return port['id'] return None -def _add_interface_router(quantum, module, router_id, subnet_id): +def _add_interface_router(neutron, module, router_id, subnet_id): kwargs = { 'subnet_id': subnet_id } try: - quantum.add_interface_router(router_id, kwargs) + neutron.add_interface_router(router_id, kwargs) except Exception as e: module.fail_json(msg = "Error in adding interface to router: %s" % e.message) return True - -def _remove_interface_router(quantum, module, router_id, subnet_id): + +def _remove_interface_router(neutron, module, router_id, subnet_id): kwargs = { 'subnet_id': subnet_id } try: - quantum.remove_interface_router(router_id, kwargs) + neutron.remove_interface_router(router_id, kwargs) except Exception as e: module.fail_json(msg="Error in removing interface from router: %s" % e.message) return True - + def main(): module = AnsibleModule( argument_spec = dict( @@ -218,32 +221,32 @@ def main(): state = dict(default='present', choices=['absent', 'present']), ), ) - - quantum = _get_quantum_client(module, module.params) + + neutron = _get_neutron_client(module, module.params) _set_tenant_id(module) - router_id = _get_router_id(module, quantum) + router_id = _get_router_id(module, neutron) if not router_id: module.fail_json(msg="failed to get the router id, please check the router name") - subnet_id = _get_subnet_id(module, quantum) + subnet_id = _get_subnet_id(module, neutron) if not subnet_id: module.fail_json(msg="failed to get the subnet id, please check the subnet name") - + if module.params['state'] == 'present': - port_id = _get_port_id(quantum, module, router_id, subnet_id) + port_id = _get_port_id(neutron, module, router_id, subnet_id) if not port_id: - _add_interface_router(quantum, module, router_id, subnet_id) + _add_interface_router(neutron, module, router_id, subnet_id) module.exit_json(changed=True, result="created", id=port_id) module.exit_json(changed=False, result="success", id=port_id) if module.params['state'] == 'absent': - port_id = _get_port_id(quantum, module, router_id, subnet_id) + port_id = _get_port_id(neutron, module, router_id, subnet_id) if not port_id: module.exit_json(changed = False, result = "Success") - _remove_interface_router(quantum, module, router_id, subnet_id) + _remove_interface_router(neutron, module, router_id, subnet_id) module.exit_json(changed=True, result="Deleted") - + # this is magic, see lib/ansible/module.params['common.py from ansible.module_utils.basic import * main() diff --git a/cloud/quantum_subnet b/cloud/quantum_subnet index 372d346f717..9d40131061a 100644 --- a/cloud/quantum_subnet +++ b/cloud/quantum_subnet @@ -17,10 +17,13 @@ # along with this software. If not, see . try: - from quantumclient.quantum import client + try: + from neutronclient.neutron import client + except ImportError: + from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient except ImportError: - print("failed=True msg='quantum and keystone client are required'") + print("failed=True msg='quantumclient (or neutronclient) and keystoneclient are required'") DOCUMENTATION = ''' --- @@ -77,7 +80,7 @@ options: default: None ip_version: description: - - The IP version of the subnet 4 or 6 + - The IP version of the subnet 4 or 6 required: false default: 4 enable_dhcp: @@ -105,7 +108,7 @@ options: - From the subnet pool the last IP that should be assigned to the virtual machines required: false default: None -requirements: ["quantum", "keystoneclient"] +requirements: ["quantumclient", "neutronclient", "keystoneclient"] ''' EXAMPLES = ''' @@ -125,21 +128,21 @@ def _get_ksclient(module, kwargs): password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) - except Exception as e: + except Exception as e: module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) - global _os_keystone + global _os_keystone _os_keystone = kclient - return kclient - + return kclient + def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception as e: - module.fail_json(msg = "Error getting endpoint for glance: %s" % e.message) + module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint -def _get_quantum_client(module, kwargs): +def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) @@ -148,10 +151,10 @@ def _get_quantum_client(module, kwargs): 'endpoint_url': endpoint } try: - quantum = client.Client('2.0', **kwargs) + neutron = client.Client('2.0', **kwargs) except Exception as e: - module.fail_json(msg = " Error in connecting to quantum: %s" % e.message) - return quantum + module.fail_json(msg = " Error in connecting to neutron: %s" % e.message) + return neutron def _set_tenant_id(module): global _os_tenant_id @@ -167,24 +170,24 @@ def _set_tenant_id(module): if not _os_tenant_id: module.fail_json(msg = "The tenant id cannot be found, please check the paramters") -def _get_net_id(quantum, module): +def _get_net_id(neutron, module): kwargs = { 'tenant_id': _os_tenant_id, 'name': module.params['network_name'], } try: - networks = quantum.list_networks(**kwargs) + networks = neutron.list_networks(**kwargs) except Exception as e: - module.fail_json("Error in listing quantum networks: %s" % e.message) + module.fail_json("Error in listing neutron networks: %s" % e.message) if not networks['networks']: return None return networks['networks'][0]['id'] -def _get_subnet_id(module, quantum): +def _get_subnet_id(module, neutron): global _os_network_id subnet_id = None - _os_network_id = _get_net_id(quantum, module) + _os_network_id = _get_net_id(neutron, module) if not _os_network_id: module.fail_json(msg = "network id of network not found.") else: @@ -193,15 +196,15 @@ def _get_subnet_id(module, quantum): 'name': module.params['name'], } try: - subnets = quantum.list_subnets(**kwargs) + subnets = neutron.list_subnets(**kwargs) except Exception as e: module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) if not subnets['subnets']: return None return subnets['subnets'][0]['id'] -def _create_subnet(module, quantum): - quantum.format = 'json' +def _create_subnet(module, neutron): + neutron.format = 'json' subnet = { 'name': module.params['name'], 'ip_version': module.params['ip_version'], @@ -214,7 +217,7 @@ def _create_subnet(module, quantum): } if module.params['allocation_pool_start'] and module.params['allocation_pool_end']: allocation_pools = [ - { + { 'start' : module.params['allocation_pool_start'], 'end' : module.params['allocation_pool_end'] } @@ -227,22 +230,22 @@ def _create_subnet(module, quantum): else: subnet.pop('dns_nameservers') try: - new_subnet = quantum.create_subnet(dict(subnet=subnet)) + new_subnet = neutron.create_subnet(dict(subnet=subnet)) except Exception, e: - module.fail_json(msg = "Failure in creating subnet: %s" % e.message) + module.fail_json(msg = "Failure in creating subnet: %s" % e.message) return new_subnet['subnet']['id'] - - -def _delete_subnet(module, quantum, subnet_id): + + +def _delete_subnet(module, neutron, subnet_id): try: - quantum.delete_subnet(subnet_id) + neutron.delete_subnet(subnet_id) except Exception as e: module.fail_json( msg = "Error in deleting subnet: %s" % e.message) return True - - + + def main(): - + module = AnsibleModule( argument_spec = dict( login_username = dict(default='admin'), @@ -263,23 +266,23 @@ def main(): allocation_pool_end = dict(default=None), ), ) - quantum = _get_quantum_client(module, module.params) + neutron = _get_neutron_client(module, module.params) _set_tenant_id(module) if module.params['state'] == 'present': - subnet_id = _get_subnet_id(module, quantum) + subnet_id = _get_subnet_id(module, neutron) if not subnet_id: - subnet_id = _create_subnet(module, quantum) + subnet_id = _create_subnet(module, neutron) module.exit_json(changed = True, result = "Created" , id = subnet_id) else: module.exit_json(changed = False, result = "success" , id = subnet_id) else: - subnet_id = _get_subnet_id(module, quantum) + subnet_id = _get_subnet_id(module, neutron) if not subnet_id: module.exit_json(changed = False, result = "success") else: - _delete_subnet(module, quantum, subnet_id) + _delete_subnet(module, neutron, subnet_id) module.exit_json(changed = True, result = "deleted") - + # this is magic, see lib/ansible/module.params['common.py from ansible.module_utils.basic import * main() From fd6fff9d8db59964df8389584bf211affc64a6e5 Mon Sep 17 00:00:00 2001 From: "Brad P. Crochet" Date: Tue, 17 Dec 2013 14:24:30 -0500 Subject: [PATCH 07/59] Specify internal_network_name This allow one to specify a specific internal network name for the case where there is more than one nic on an instance. Without this, the list of ports may not return the correct order. Therefore, it is necessary to specify the exact internal network to attach the floating ip to --- cloud/quantum_floating_ip | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/cloud/quantum_floating_ip b/cloud/quantum_floating_ip index 54e1c68d79d..531ddfe2b82 100644 --- a/cloud/quantum_floating_ip +++ b/cloud/quantum_floating_ip @@ -67,7 +67,7 @@ options: default: present network_name: description: - - Name of the network from which IP has to be assigned to VM. Please make sure the network is an external network + - Name of the network from which IP has to be assigned to VM. Please make sure the network is an external network required: true default: None instance_name: @@ -75,6 +75,11 @@ options: - The name of the instance to which the IP address should be assigned required: true default: None + internal_network_name: + description: + - The name of the network of the port to associate with the floating ip. Necessary when VM multiple networks. + required: false + default: None requirements: ["novaclient", "quantumclient", "neutronclient", "keystoneclient"] ''' @@ -82,7 +87,7 @@ EXAMPLES = ''' # Assign a floating ip to the instance from an external network - quantum_floating_ip: state=present login_username=admin login_password=admin login_tenant_name=admin network_name=external_network - instance_name=vm1 + instance_name=vm1 internal_network_name=internal_network ''' def _get_ksclient(module, kwargs): @@ -135,7 +140,13 @@ def _get_server_state(module, nova): module.fail_json(msg = "Error in getting the server list: %s" % e.message) return server_info, server -def _get_port_info(neutron, module, instance_id): +def _get_port_info(neutron, module, instance_id, internal_network_name=None): + if internal_network_name: + kwargs = { + 'name': internal_network_name, + } + networks = neutron.list_networks(**kwargs) + subnet_id = networks['networks'][0]['subnets'][0] kwargs = { 'device_id': instance_id, } @@ -143,9 +154,16 @@ def _get_port_info(neutron, module, instance_id): ports = neutron.list_ports(**kwargs) except Exception as e: module.fail_json( msg = "Error in listing ports: %s" % e.message) + if subnet_id: + port = next(port for port in ports['ports'] if port['fixed_ips'][0]['subnet_id'] == subnet_id) + port_id = port['id'] + fixed_ip_address = port['fixed_ips'][0]['ip_address'] + else: + port_id = ports['ports'][0]['id'] + fixed_ip_address = ports['ports'][0]['fixed_ips'][0]['ip_address'] if not ports['ports']: return None, None - return ports['ports'][0]['fixed_ips'][0]['ip_address'], ports['ports'][0]['id'] + return fixed_ip_address, port_id def _get_floating_ip(module, neutron, fixed_ip_address): kwargs = { @@ -204,7 +222,8 @@ def main(): region_name = dict(default=None), network_name = dict(required=True), instance_name = dict(required=True), - state = dict(default='present', choices=['absent', 'present']) + state = dict(default='present', choices=['absent', 'present']), + internal_network_name = dict(default=None), ), ) @@ -219,7 +238,7 @@ def main(): if not server_info: module.fail_json(msg="The instance name provided cannot be found") - fixed_ip, port_id = _get_port_info(neutron, module, server_info['id']) + fixed_ip, port_id = _get_port_info(neutron, module, server_info['id'], module.params['internal_network_name']) if not port_id: module.fail_json(msg="Cannot find a port for this instance, maybe fixed ip is not assigned") From 5e77eb70e4fb510f57be551d17edb873fc6a4cc3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Roger=20L=C3=B3pez?= Date: Tue, 17 Dec 2013 14:45:42 -0500 Subject: [PATCH 08/59] django_manage need not require virtualenv in PATH The virtualenv parameter to the django_manage command is used to locate the virtualenv and build it if necessary. Access to the virtualenv executable is only needed if the virtualenv directory doesn't exist and needs to be built. This patch allows for the situation where a virtualenv that is not in the PATH was used to create a virtualenv prior to running the django_manage module. --- web_infrastructure/django_manage | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/web_infrastructure/django_manage b/web_infrastructure/django_manage index f6ea9c49149..68eb92c1bfe 100644 --- a/web_infrastructure/django_manage +++ b/web_infrastructure/django_manage @@ -129,12 +129,11 @@ def _ensure_virtualenv(module): if venv_param is None: return - virtualenv = module.get_bin_path('virtualenv', True) - vbin = os.path.join(os.path.expanduser(venv_param), 'bin') activate = os.path.join(vbin, 'activate') if not os.path.exists(activate): + virtualenv = module.get_bin_path('virtualenv', True) vcmd = '%s %s' % (virtualenv, venv_param) vcmd = [virtualenv, venv_param] rc, out_venv, err_venv = module.run_command(vcmd) From 65272f2e5cd8476449cfc36e2876fa5a50745a7c Mon Sep 17 00:00:00 2001 From: Serge van Ginderachter Date: Tue, 17 Dec 2013 21:48:29 +0100 Subject: [PATCH 09/59] bigip_monitor_http: two small bug fixes - extra properties were not set at creation, only when updating which can be overlooked when running the module from more than 1 node... - fix bas var as time_until_up didn't get used --- net_infrastructure/bigip_monitor_http | 42 +++++++++++++-------------- 1 file changed, 20 insertions(+), 22 deletions(-) diff --git a/net_infrastructure/bigip_monitor_http b/net_infrastructure/bigip_monitor_http index 924c826eaa3..7a05808e74c 100644 --- a/net_infrastructure/bigip_monitor_http +++ b/net_infrastructure/bigip_monitor_http @@ -94,19 +94,19 @@ options: required: true default: none ip: - description: + description: - IP address part of the ipport definition. The default API setting is "0.0.0.0". required: false default: none port: - description: + description: - port address part op the ipport definition. Tyhe default API setting is 0. required: false default: none interval: - description: + description: - The interval specifying how frequently the monitor instance of this template will run. By default, this interval is used for up and down states. The default API setting is 5. @@ -199,7 +199,7 @@ def check_monitor_exists(module, api, monitor, parent): def create_monitor(api, monitor, template_attributes): - try: + try: api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) except bigsuds.OperationFailed, e: if "already exists" in str(e): @@ -282,7 +282,7 @@ def set_ipport(api, monitor, ipport): # =========================================== # main loop # -# writing a module for other monitor types should +# writing a module for other monitor types should # only need an updated main() (and monitor specific functions) def main(): @@ -345,19 +345,19 @@ def main(): if port is None: port = cur_ipport['ipport']['port'] else: # use API defaults if not defined to create it - if interval is None: + if interval is None: interval = 5 - if timeout is None: + if timeout is None: timeout = 16 - if ip is None: + if ip is None: ip = '0.0.0.0' - if port is None: + if port is None: port = 0 - if send is None: + if send is None: send = '' - if receive is None: + if receive is None: receive = '' - if receive_disable is None: + if receive_disable is None: receive_disable = '' # define and set address type @@ -394,7 +394,7 @@ def main(): {'type': 'ITYPE_TIMEOUT', 'value': timeout}, {'type': 'ITYPE_TIME_UNTIL_UP', - 'value': interval}] + 'value': time_until_up}] # main logic, monitor generic @@ -405,7 +405,7 @@ def main(): if state == 'absent': if monitor_exists: if not module.check_mode: - # possible race condition if same task + # possible race condition if same task # on other node deleted it first result['changed'] |= delete_monitor(api, monitor) else: @@ -414,26 +414,24 @@ def main(): else: # state present ## check for monitor itself if not monitor_exists: # create it - if not module.check_mode: + if not module.check_mode: # again, check changed status here b/c race conditions # if other task already created it result['changed'] |= create_monitor(api, monitor, template_attributes) - else: + else: result['changed'] |= True ## check for monitor parameters # whether it already existed, or was just created, now update # the update functions need to check for check mode but # cannot update settings if it doesn't exist which happens in check mode - if monitor_exists and not module.check_mode: - result['changed'] |= update_monitor_properties(api, module, monitor, - template_string_properties, - template_integer_properties) - # else assume nothing changed + result['changed'] |= update_monitor_properties(api, module, monitor, + template_string_properties, + template_integer_properties) # we just have to update the ipport if monitor already exists and it's different if monitor_exists and cur_ipport != ipport: - set_ipport(api, monitor, ipport) + set_ipport(api, monitor, ipport) result['changed'] |= True #else: monitor doesn't exist (check mode) or ipport is already ok From 9845376232f6c4885227ba624b6ff75e62235fee Mon Sep 17 00:00:00 2001 From: Janitha Karunaratne Date: Fri, 20 Dec 2013 16:44:18 -0600 Subject: [PATCH 10/59] Fixes #5381 easy_install bug when used with virtualenv --- packaging/easy_install | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packaging/easy_install b/packaging/easy_install index eae361c4321..bdacf8e464b 100644 --- a/packaging/easy_install +++ b/packaging/easy_install @@ -135,7 +135,6 @@ def main(): name = module.params['name'] env = module.params['virtualenv'] executable = module.params['executable'] - easy_install = _get_easy_install(module, env, executable) site_packages = module.params['virtualenv_site_packages'] virtualenv_command = module.params['virtualenv_command'] @@ -159,6 +158,8 @@ def main(): out += out_venv err += err_venv + easy_install = _get_easy_install(module, env, executable) + cmd = None changed = False installed = _is_package_installed(module, name, easy_install) From 9dab01c5e274b73c0d1942124d14921185bdb502 Mon Sep 17 00:00:00 2001 From: "Jasper N. Brouwer" Date: Mon, 23 Dec 2013 09:54:16 +0100 Subject: [PATCH 11/59] Swapped conditions of the changed if statements --- system/service | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/service b/system/service index 2490c6a30fd..ed5712c09f9 100644 --- a/system/service +++ b/system/service @@ -431,10 +431,10 @@ class LinuxService(Service): if check_systemd(self.name): # service is managed by systemd self.enable_cmd = location['systemctl'] - elif os.path.exists("/etc/init/%s.conf" % self.name) and location['initctl']: + elif location['initctl'] and os.path.exists("/etc/init/%s.conf" % self.name): # service is managed by upstart self.enable_cmd = location['initctl'] - elif os.path.exists("/etc/init.d/%s" % self.name) and location['update-rc.d']: + elif location['update-rc.d'] and os.path.exists("/etc/init.d/%s" % self.name): # service is managed by with SysV init scripts, but with update-rc.d self.enable_cmd = location['update-rc.d'] else: From 0955744b3a0b32619cabc4659ef0b2dd6f461590 Mon Sep 17 00:00:00 2001 From: Stoned Elipot Date: Wed, 25 Dec 2013 12:12:11 +0100 Subject: [PATCH 12/59] shell module: document removes option and sync with command module doc --- commands/shell | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/commands/shell b/commands/shell index 5b3969a1fb1..03299b967cc 100644 --- a/commands/shell +++ b/commands/shell @@ -7,20 +7,26 @@ DOCUMENTATION = ''' module: shell short_description: Execute commands in nodes. description: - - The shell module takes the command name followed by a list of arguments, - space delimited. It is almost exactly like the M(command) module but runs + - The M(shell) module takes the command name followed by a list of space-delimited arguments. + It is almost exactly like the M(command) module but runs the command through a shell (C(/bin/sh)) on the remote node. version_added: "0.2" options: - (free form): + free_form: description: - - The command module takes a free form command to run - required: null + - The shell module takes a free form command to run + required: true default: null creates: description: - - a filename, when it already exists, this step will NOT be run - required: false + - a filename, when it already exists, this step will B(not) be run. + required: no + default: null + removes: + description: + - a filename, when it does not exist, this step will B(not) be run. + version_added: "0.8" + required: no default: null chdir: description: From 05c755f2dc043062122e30056839770af22f2ffb Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 25 Dec 2013 13:50:15 -0500 Subject: [PATCH 13/59] Fix a documentation item. --- system/setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/setup b/system/setup index 939752f1451..816039469c5 100755 --- a/system/setup +++ b/system/setup @@ -83,7 +83,7 @@ ansible all -m setup -a 'filter=ansible_*_mb' # Display only facts returned by facter. ansible all -m setup -a 'filter=facter_*' -# Display only facts returned by facter. +# Display only facts about certain interfaces. ansible all -m setup -a 'filter=ansible_eth[0-2]' """ From 3fd2e556197f81e45410df74935ab8c18f85c8a6 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Wed, 25 Dec 2013 13:51:56 -0500 Subject: [PATCH 14/59] User report of DO not taking underscores so tweaking docs --- cloud/digital_ocean | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cloud/digital_ocean b/cloud/digital_ocean index 400bf3bf54b..427956b5f78 100644 --- a/cloud/digital_ocean +++ b/cloud/digital_ocean @@ -112,7 +112,7 @@ EXAMPLES = ''' - digital_ocean: > state=present command=droplet - name=my_new_droplet + name=mydroplet client_id=XXX api_key=XXX size_id=1 @@ -131,7 +131,7 @@ EXAMPLES = ''' state=present command=droplet id=123 - name=my_new_droplet + name=mydroplet client_id=XXX api_key=XXX size_id=1 @@ -147,7 +147,7 @@ EXAMPLES = ''' - digital_ocean: > state=present ssh_key_ids=id1,id2 - name=my_new_droplet + name=mydroplet client_id=XXX api_key=XXX size_id=1 From 8f766bdcdd34f5402e160dbd1ef8b69ad539ff5e Mon Sep 17 00:00:00 2001 From: Maykel Moya Date: Thu, 26 Dec 2013 12:34:33 +0100 Subject: [PATCH 15/59] Missing "is" in file module description --- files/file | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/file b/files/file index 8dcead1b402..86ee83a6938 100644 --- a/files/file +++ b/files/file @@ -124,7 +124,7 @@ options: choices: [ "yes", "no" ] description: - 'force the creation of the symlinks in two cases: the source file does - not exist (but will appear later); the destination exists and a file (so, we need to unlink the + not exist (but will appear later); the destination exists and is a file (so, we need to unlink the "path" file and create symlink to the "src" file in place of it).' notes: - See also M(copy), M(template), M(assemble) From bab510f0c58d41d6821ef244ddc529df54d0a3f7 Mon Sep 17 00:00:00 2001 From: lichesser Date: Thu, 26 Dec 2013 17:53:17 +0100 Subject: [PATCH 16/59] Partial fix for #4565. Works only for Debian 7 and later --- system/setup | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/system/setup b/system/setup index 816039469c5..9a31b10da4d 100755 --- a/system/setup +++ b/system/setup @@ -118,7 +118,8 @@ class Facts(object): '/etc/alpine-release': 'Alpine', '/etc/release': 'Solaris', '/etc/arch-release': 'Archlinux', - '/etc/SuSE-release': 'SuSE' } + '/etc/SuSE-release': 'SuSE', + '/etc/os-release': 'Debian' } SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' } # A list of dicts. If there is a platform with more than one @@ -328,6 +329,11 @@ class Facts(object): elif name == 'SuSE': data = get_file_content(path).splitlines() self.facts['distribution_release'] = data[2].split('=')[1].strip() + elif name == 'Debian': + data = get_file_content(path).split('\n')[0] + release = re.search("PRETTY_NAME.+ \(?([^ ]+?)\)?\"", data) + if release: + self.facts['distribution_release'] = release.groups()[0] else: self.facts['distribution'] = name From 9affb6ed378e4dbbe14e10bbc3c8152c52995dbc Mon Sep 17 00:00:00 2001 From: Brian Coca Date: Thu, 26 Dec 2013 12:01:41 -0500 Subject: [PATCH 17/59] added more usefull example for jinja2 overrides (this one is actually used) Signed-off-by: Brian Coca --- files/template | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/files/template b/files/template index 35ac831e181..41dfba08042 100644 --- a/files/template +++ b/files/template @@ -48,8 +48,11 @@ options: required: false notes: - Since Ansible version 0.9, templates are loaded with C(trim_blocks=True). - - 'You can override jinja2 settings by adding a special header to template file. - i.e. C(#jinja2: trim_blocks: False)' + + - "Also, you can override jinja2 settings by adding a special header to template file. + i.e. C(#jinja2:variable_start_string:'[%' , variable_end_string:'%]') + which changes the variable interpolation markers to [% var %] instead of {{ var }}." + requirements: [] author: Michael DeHaan ''' From a1db3d89ded91308d40007100a3a275e13f64d8e Mon Sep 17 00:00:00 2001 From: beefsalad Date: Thu, 26 Dec 2013 12:25:52 -0600 Subject: [PATCH 18/59] added descriptions for yum options Updated main description to state that the yum module will handle package groups. Added descriptions for each example, as it is unclear to (some) users that @Name implies a group install. --- packaging/yum | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/packaging/yum b/packaging/yum index 4c930f2f777..a33aa797f45 100644 --- a/packaging/yum +++ b/packaging/yum @@ -31,7 +31,7 @@ module: yum version_added: historical short_description: Manages packages with the I(yum) package manager description: - - Will install, upgrade, remove, and list packages with the I(yum) package manager. + - Will install, upgrade, remove, and list packages or package groups with the I(yum) package manager. options: name: description: @@ -94,12 +94,19 @@ author: Seth Vidal ''' EXAMPLES = ''' +# description: installs the latest version of httpd - yum: name=httpd state=latest +# description: removes the httpd package - yum: name=httpd state=removed +# description: installs the latest version of httpd from the testing repo - yum: name=httpd enablerepo=testing state=installed +# description: upgrades all packages - yum: name=* state=latest +# description: installs the nginx rpm from a remote repo - yum: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present +# description: installs nginx rpm from a local file - yum: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present +# description: installs the package group 'Development tool' - yum: name="@Development tools" state=present ''' From 33ff67ea2410dee3730f2062f0aab605f770c337 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 26 Dec 2013 13:46:09 -0500 Subject: [PATCH 19/59] Use task names in examples. --- packaging/yum | 38 ++++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/packaging/yum b/packaging/yum index a33aa797f45..744a876a04f 100644 --- a/packaging/yum +++ b/packaging/yum @@ -31,7 +31,7 @@ module: yum version_added: historical short_description: Manages packages with the I(yum) package manager description: - - Will install, upgrade, remove, and list packages or package groups with the I(yum) package manager. + - Installs, upgrade, removes, and lists packages and groups with the I(yum) package manager. options: name: description: @@ -41,7 +41,7 @@ options: aliases: [] list: description: - - Various non-idempotent commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples. + - Various (non-idempotent) commands for usage with C(/usr/bin/ansible) and I(not) playbooks. See examples. required: false default: null state: @@ -94,20 +94,26 @@ author: Seth Vidal ''' EXAMPLES = ''' -# description: installs the latest version of httpd -- yum: name=httpd state=latest -# description: removes the httpd package -- yum: name=httpd state=removed -# description: installs the latest version of httpd from the testing repo -- yum: name=httpd enablerepo=testing state=installed -# description: upgrades all packages -- yum: name=* state=latest -# description: installs the nginx rpm from a remote repo -- yum: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present -# description: installs nginx rpm from a local file -- yum: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present -# description: installs the package group 'Development tool' -- yum: name="@Development tools" state=present +- name: install the latest version of Apache + yum: name=httpd state=latest + +- name: remove the Apache package + yum: name=httpd state=removed + +- name: install the latest version of Apche from the testing repo + yum: name=httpd enablerepo=testing state=installed + +- name: upgrade all packages + yum: name=* state=latest + +- name: install the nginx rpm from a remote repo + yum: name=http://nginx.org/packages/centos/6/noarch/RPMS/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present + +- name: install nginx rpm from a local file + yum: name=/usr/local/src/nginx-release-centos-6-0.el6.ngx.noarch.rpm state=present + +- name: install the 'Development tools' package group + yum: name="@Development tools" state=present ''' def_qf = "%{name}-%{version}-%{release}.%{arch}" From ecf78e8c93c2e393dee83585075a11c7c3e1f4e8 Mon Sep 17 00:00:00 2001 From: Mikhail Sobolev Date: Thu, 26 Dec 2013 01:07:49 +0200 Subject: [PATCH 20/59] fix short/full description for elasticache --- cloud/elasticache | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/elasticache b/cloud/elasticache index 9b40107d981..fa175bb397e 100644 --- a/cloud/elasticache +++ b/cloud/elasticache @@ -17,9 +17,9 @@ DOCUMENTATION = """ --- module: elasticache +short_description: Manage cache clusters in Amazon Elasticache. description: - Manage cache clusters in Amazon Elasticache. -short_description: Manage cache clusters in Amazon Elasticache. - Returns information about the specified cache cluster. version_added: "1.4" requirements: [ "boto" ] From 59396d38e241edb3ef55f1916138489488f35c7a Mon Sep 17 00:00:00 2001 From: Jim Kleckner Date: Thu, 26 Dec 2013 16:21:16 -0800 Subject: [PATCH 21/59] Fix mysql_db dump and import to use port argument The code for mysql_db did not pass the port argument when state=dump or state=import. --- database/mysql_db | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/database/mysql_db b/database/mysql_db index f949cced301..b6fbe5f83f9 100644 --- a/database/mysql_db +++ b/database/mysql_db @@ -116,13 +116,13 @@ def db_delete(cursor, db): cursor.execute(query) return True -def db_dump(module, host, user, password, db_name, target, socket=None): +def db_dump(module, host, user, password, db_name, target, port, socket=None): cmd = module.get_bin_path('mysqldump', True) cmd += " --quick --user=%s --password=%s" %(user, password) if socket is not None: cmd += " --socket=%s" % socket else: - cmd += " --host=%s" % host + cmd += " --host=%s --port=%s" % (host, port) cmd += " %s" % db_name if os.path.splitext(target)[-1] == '.gz': cmd = cmd + ' | gzip > ' + target @@ -133,13 +133,13 @@ def db_dump(module, host, user, password, db_name, target, socket=None): rc, stdout, stderr = module.run_command(cmd) return rc, stdout, stderr -def db_import(module, host, user, password, db_name, target, socket=None): +def db_import(module, host, user, password, db_name, target, port, socket=None): cmd = module.get_bin_path('mysql', True) cmd += " --user=%s --password=%s" %(user, password) if socket is not None: cmd += " --socket=%s" % socket else: - cmd += " --host=%s" % host + cmd += " --host=%s --port=%s" % (host, port) cmd += " -D %s" % db_name if os.path.splitext(target)[-1] == '.gz': cmd = 'gunzip < ' + target + ' | ' + cmd @@ -282,6 +282,7 @@ def main(): elif state == "dump": rc, stdout, stderr = db_dump(module, login_host, login_user, login_password, db, target, + port=module.params['login_port'], socket=module.params['login_unix_socket']) if rc != 0: module.fail_json(msg="%s" % stderr) @@ -290,6 +291,7 @@ def main(): elif state == "import": rc, stdout, stderr = db_import(module, login_host, login_user, login_password, db, target, + port=module.params['login_port'], socket=module.params['login_unix_socket']) if rc != 0: module.fail_json(msg="%s" % stderr) From 2903c7b5684680a0a851cdd07fa1a08b36d52ce6 Mon Sep 17 00:00:00 2001 From: Jim Kleckner Date: Thu, 26 Dec 2013 16:32:32 -0800 Subject: [PATCH 22/59] Fix documentation example for the fail module The example for the fail module doesn't work: http://www.ansibleworks.com/docs/modules.html#fail The current text shows: - fail: msg="The system may not be provisioned according to the CMDB status." when: "{{ cmdb_status }} != 'to-be-staged'" The "when" documentation indicates that the argument is already a Jinja2 expression: http://www.ansibleworks.com/docs/playbooks_conditionals.html#the-when-statement Thus, the following is when: cmdb_status != "to-be-staged" is preferred even though the following could work but generates a deprecation warning: when: {{cmdb_status != "to-be-staged"}} --- utilities/fail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/utilities/fail b/utilities/fail index 7023f357cac..23f5b83668c 100644 --- a/utilities/fail +++ b/utilities/fail @@ -40,5 +40,5 @@ author: Dag Wieers EXAMPLES = ''' # Example playbook using fail and when together - fail: msg="The system may not be provisioned according to the CMDB status." - when: "{{ cmdb_status }} != 'to-be-staged'" + when: cmdb_status != "to-be-staged" ''' From 0a3293cf537e9c4a973d7c685654ddf477aa62f7 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Fri, 27 Dec 2013 15:01:27 -0500 Subject: [PATCH 23/59] add a note about raw/endraw --- files/template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/files/template b/files/template index 41dfba08042..ccc6bafadc0 100644 --- a/files/template +++ b/files/template @@ -51,7 +51,7 @@ notes: - "Also, you can override jinja2 settings by adding a special header to template file. i.e. C(#jinja2:variable_start_string:'[%' , variable_end_string:'%]') - which changes the variable interpolation markers to [% var %] instead of {{ var }}." + which changes the variable interpolation markers to [% var %] instead of {{ var }}." This is the best way to prevent evaluation of things that look like, but should not be Jinja2. raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated. requirements: [] author: Michael DeHaan From 50c600c3610e0516cffdfdd1fa19e36a2b26827c Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Mon, 30 Dec 2013 20:21:15 -0500 Subject: [PATCH 24/59] Pass scrub_data by default, see response from DO here: https://www.digitalocean.com/blog --- cloud/digital_ocean | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cloud/digital_ocean b/cloud/digital_ocean index 427956b5f78..bd975cf1b90 100644 --- a/cloud/digital_ocean +++ b/cloud/digital_ocean @@ -221,7 +221,7 @@ class Droplet(JsonfyMixIn): raise TimeoutError('Wait for droplet running timeout', self.id) def destroy(self): - return self.manager.destroy_droplet(self.id) + return self.manager.destroy_droplet(self.id, scrub_data=True) @classmethod def setup(cls, client_id, api_key): From a471f6c993b07e2b2b985241ff189793ec2e31df Mon Sep 17 00:00:00 2001 From: Rutger Spiertz Date: Thu, 2 Jan 2014 15:41:24 +0100 Subject: [PATCH 25/59] apt_repository: don't crash if default_file doesn't exist --- packaging/apt_repository | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/packaging/apt_repository b/packaging/apt_repository index 9965bc22a38..26b18ec4bcc 100644 --- a/packaging/apt_repository +++ b/packaging/apt_repository @@ -3,6 +3,7 @@ # (c) 2012, Matt Wright # (c) 2013, Alexander Saltanov +# (c) 2014, Rutger Spiertz # # This file is part of Ansible # @@ -111,8 +112,9 @@ class SourcesList(object): self.files = {} # group sources by file self.default_file = apt_pkg.config.find_file('Dir::Etc::sourcelist') - # read sources.list - self.load(self.default_file) + # read sources.list if it exists + if os.path.isfile(self.default_file): + self.load(self.default_file) # read sources.list.d for file in glob.iglob('%s/*.list' % apt_pkg.config.find_dir('Dir::Etc::sourceparts')): From 8ba9019797e34f8f1f30f0dab5e91c69c8d16191 Mon Sep 17 00:00:00 2001 From: Jim Kleckner Date: Thu, 2 Jan 2014 12:04:03 -0800 Subject: [PATCH 26/59] Add quotes to password argument for dump/import The password is passed on a command line for dump and import and needs quoting. Ideally, this would not be passed on a command line at all - any ideas? Or at least have a stronger form of quoting so that embedded single quotes will be escaped. --- database/mysql_db | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/database/mysql_db b/database/mysql_db index b6fbe5f83f9..cf987011152 100644 --- a/database/mysql_db +++ b/database/mysql_db @@ -118,7 +118,7 @@ def db_delete(cursor, db): def db_dump(module, host, user, password, db_name, target, port, socket=None): cmd = module.get_bin_path('mysqldump', True) - cmd += " --quick --user=%s --password=%s" %(user, password) + cmd += " --quick --user=%s --password='%s'" %(user, password) if socket is not None: cmd += " --socket=%s" % socket else: @@ -135,7 +135,7 @@ def db_dump(module, host, user, password, db_name, target, port, socket=None): def db_import(module, host, user, password, db_name, target, port, socket=None): cmd = module.get_bin_path('mysql', True) - cmd += " --user=%s --password=%s" %(user, password) + cmd += " --user=%s --password='%s'" %(user, password) if socket is not None: cmd += " --socket=%s" % socket else: From e2c7aeca4c970aa76899b5491ecbd4a7db371288 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Thu, 2 Jan 2014 16:17:24 -0500 Subject: [PATCH 27/59] Fixes #5040 setup module: do not add primary interface info to the secondary interface data --- system/setup | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/system/setup b/system/setup index 816039469c5..542280624e4 100755 --- a/system/setup +++ b/system/setup @@ -1540,8 +1540,7 @@ class LinuxNetwork(Network): iface = words[-1] if iface != device: interfaces[iface] = {} - interfaces[iface].update(interfaces[device]) - if "ipv4_secondaries" not in interfaces[iface]: + if not secondary and "ipv4_secondaries" not in interfaces[iface]: interfaces[iface]["ipv4_secondaries"] = [] if not secondary or "ipv4" not in interfaces[iface]: interfaces[iface]['ipv4'] = {'address': address, @@ -1553,6 +1552,15 @@ class LinuxNetwork(Network): 'netmask': netmask, 'network': network, }) + + # add this secondary IP to the main device + if secondary: + interfaces[device]["ipv4_secondaries"].append({ + 'address': address, + 'netmask': netmask, + 'network': network, + }) + # If this is the default address, update default_ipv4 if 'address' in default_ipv4 and default_ipv4['address'] == address: default_ipv4['netmask'] = netmask From a11e5d609e3ddfb75c3f84b765c84ea15ef18608 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Thu, 2 Jan 2014 17:36:52 -0500 Subject: [PATCH 28/59] Basic docsite formatting fixups --- files/template | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/files/template b/files/template index ccc6bafadc0..5f64b6d9e60 100644 --- a/files/template +++ b/files/template @@ -47,11 +47,11 @@ options: - all arguments accepted by the M(file) module also work here required: false notes: - - Since Ansible version 0.9, templates are loaded with C(trim_blocks=True). + - "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)." - "Also, you can override jinja2 settings by adding a special header to template file. i.e. C(#jinja2:variable_start_string:'[%' , variable_end_string:'%]') - which changes the variable interpolation markers to [% var %] instead of {{ var }}." This is the best way to prevent evaluation of things that look like, but should not be Jinja2. raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated. + which changes the variable interpolation markers to [% var %] instead of {{ var }}. This is the best way to prevent evaluation of things that look like, but should not be Jinja2. raw/endraw in Jinja2 will not work as you expect because templates in Ansible are recursively evaluated." requirements: [] author: Michael DeHaan From bec49768d12587bec943b85b8a0c41cc7235e4dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dag=20H=C3=B8idahl?= Date: Fri, 3 Jan 2014 15:15:20 +0100 Subject: [PATCH 29/59] Use "brew list " instead of grepping the output of "brew list". Some brew packages are not listed with their package name, e.g. libjpeg. --- packaging/homebrew | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/homebrew b/packaging/homebrew index 8801a4e6c4d..ab1362acf1d 100644 --- a/packaging/homebrew +++ b/packaging/homebrew @@ -71,7 +71,7 @@ def query_package(module, brew_path, name, state="present"): """ Returns whether a package is installed or not. """ if state == "present": - rc, out, err = module.run_command("%s list -m1 | grep -q '^%s$'" % (brew_path, name)) + rc, out, err = module.run_command("%s list %s" % (brew_path, name)) if rc == 0: return True From fa5a44898367461d0e215c12061939c516a8bc51 Mon Sep 17 00:00:00 2001 From: rgbj Date: Fri, 3 Jan 2014 15:41:12 +0100 Subject: [PATCH 30/59] On OpenBSD, make user module status 'changed' only if necessary when using 'login_class' parameter --- system/user | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/system/user b/system/user index 48bcf751716..797c25e87a4 100644 --- a/system/user +++ b/system/user @@ -901,8 +901,21 @@ class OpenBSDUser(User): cmd.append(self.shell) if self.login_class is not None: - cmd.append('-L') - cmd.append(self.login_class) + # find current login class + user_login_class = None + userinfo_cmd = [self.module.get_bin_path('userinfo', True), self.name] + (rc, out, err) = self.execute_command(userinfo_cmd) + + for line in out.splitlines(): + tokens = line.split() + + if tokens[0] == 'class' and len(tokens) == 2: + user_login_class = tokens[1] + + # act only if login_class change + if self.login_class != user_login_class: + cmd.append('-L') + cmd.append(self.login_class) if self.update_password == 'always' and self.password is not None and info[1] != self.password: cmd.append('-p') From f7233116012d6c4c3aff9e23bca187b1675687da Mon Sep 17 00:00:00 2001 From: James Tanner Date: Fri, 3 Jan 2014 15:29:18 -0500 Subject: [PATCH 31/59] Resolves issues with newer versions of pip not having a --use-mirrors paramater for the install command --- packaging/pip | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/packaging/pip b/packaging/pip index 13cf162fe15..414dfcc0933 100644 --- a/packaging/pip +++ b/packaging/pip @@ -275,6 +275,7 @@ def main(): pip = _get_pip(module, env, module.params['executable']) cmd = '%s %s' % (pip, state_map[state]) + cmd_opts = None # If there's a virtualenv we want things we install to be able to use other # installations that exist as binaries within this virtualenv. Example: we @@ -319,7 +320,11 @@ def main(): is_local_path = True # for tarball or vcs source, applying --use-mirrors doesn't really make sense is_package = is_vcs or is_tar or is_local_path # just a shortcut for bool - if not is_package and state != 'absent' and use_mirrors: + + if cmd_opts is None: + cmd_opts = _get_cmd_options(module, cmd) + + if not is_package and state != 'absent' and use_mirrors and '--use-mirrors' in cmd_opts: cmd += ' --use-mirrors' cmd += ' %s' % _get_full_name(name, version) elif requirements: From 31f81c3a1bbba20192c7cc392263e6c81529e83e Mon Sep 17 00:00:00 2001 From: Paul Beattie Date: Sat, 4 Jan 2014 00:22:14 +0000 Subject: [PATCH 32/59] Fixed typo in example documentation --- system/sysctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/sysctl b/system/sysctl index 71320a3453c..1a4fd7fe47a 100644 --- a/system/sysctl +++ b/system/sysctl @@ -81,7 +81,7 @@ EXAMPLES = ''' # Set kernel.panic to 3 in /tmp/test_sysctl.conf, check if the sysctl key # seems writable, but do not reload sysctl, and do not check kernel value # after (not needed, because the real /etc/sysctl.conf was not updated) -- sysctl: name=kernel.panic value=3 sysctl_file=/tmp/test_sysctl.conf check=before reload=no +- sysctl: name=kernel.panic value=3 sysctl_file=/tmp/test_sysctl.conf checks=before reload=no ''' # ============================================================== From 0fdcb8ea69b266da0538e7ef218f2025de3f6390 Mon Sep 17 00:00:00 2001 From: Michael DeHaan Date: Tue, 7 Jan 2014 06:01:47 -0800 Subject: [PATCH 33/59] Merge pull request #5528 from dverhelst/devel Adding support for detecting RHEV Hypervisor in ansible_virtualization_type --- system/setup | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/system/setup b/system/setup index 542280624e4..c8f825b9ace 100755 --- a/system/setup +++ b/system/setup @@ -2080,6 +2080,11 @@ class LinuxVirtual(Virtual): self.facts['virtualization_role'] = 'guest' return + if product_name == 'RHEV Hypervisor': + self.facts['virtualization_type'] = 'RHEV' + self.facts['virtualization_role'] = 'guest' + return + if product_name == 'VMware Virtual Platform': self.facts['virtualization_type'] = 'VMware' self.facts['virtualization_role'] = 'guest' From 4cd821e9d960e60c0622ff1d064dd51c3d0fd74a Mon Sep 17 00:00:00 2001 From: jctanner Date: Tue, 7 Jan 2014 07:10:31 -0800 Subject: [PATCH 34/59] Merge pull request #5404 from sivel/rax-improvements rax module: improvements --- cloud/rax | 167 +++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 115 insertions(+), 52 deletions(-) diff --git a/cloud/rax b/cloud/rax index d67802ce1e0..03e99ea2a90 100644 --- a/cloud/rax +++ b/cloud/rax @@ -26,6 +26,13 @@ options: api_key: description: - Rackspace API key (overrides I(credentials)) + auto_increment: + description: + - Whether or not to increment a single number with the name of the + created servers. Only applicable when used with the I(group) attribute + or meta key. + default: yes + version_added: 1.5 count: description: - number of instances to launch @@ -147,6 +154,26 @@ EXAMPLES = ''' networks: - private - public + register: rax + +- name: Build an exact count of cloud servers with incremented names + hosts: local + gather_facts: False + tasks: + - name: Server build requests + local_action: + module: rax + credentials: ~/.raxpub + name: test%03d.example.org + flavor: performance1-1 + image: ubuntu-1204-lts-precise-pangolin + state: present + count: 10 + count_offset: 10 + exact_count: yes + group: test + wait: yes + register: rax ''' import sys @@ -199,7 +226,7 @@ def create(module, names, flavor, image, meta, key_name, files, lpath = os.path.expanduser(files[rpath]) try: fileobj = open(lpath, 'r') - files[rpath] = fileobj + files[rpath] = fileobj.read() except Exception, e: module.fail_json(msg='Failed to load %s' % lpath) try: @@ -347,7 +374,8 @@ def delete(module, instance_ids, wait, wait_timeout): def cloudservers(module, state, name, flavor, image, meta, key_name, files, wait, wait_timeout, disk_config, count, group, - instance_ids, exact_count, networks, count_offset): + instance_ids, exact_count, networks, count_offset, + auto_increment): cs = pyrax.cloudservers cnw = pyrax.cloud_networks servers = [] @@ -358,6 +386,15 @@ def cloudservers(module, state, name, flavor, image, meta, key_name, files, elif 'group' in meta and group is None: group = meta['group'] + # When using state=absent with group, the absent block won't match the + # names properly. Use the exact_count functionality to decrease the count + # to the desired level + was_absent = False + if group is not None and state == 'absent': + exact_count = True + state = 'present' + was_absent = True + # Check if the provided image is a UUID and if not, search for an # appropriate image using human_id and name if image: @@ -416,27 +453,43 @@ def cloudservers(module, state, name, flavor, image, meta, key_name, files, module.fail_json(msg='"group" must be provided when using ' '"exact_count"') else: - numbers = set() + if auto_increment: + numbers = set() - try: - name % 0 - except TypeError, e: - if e.message.startswith('not all'): - name = '%s%%d' % name + try: + name % 0 + except TypeError, e: + if e.message.startswith('not all'): + name = '%s%%d' % name + else: + module.fail_json(msg=e.message) + + pattern = re.sub(r'%\d+[sd]', r'(\d+)', name) + for server in cs.servers.list(): + if server.metadata.get('group') == group: + servers.append(server) + match = re.search(pattern, server.name) + if match: + number = int(match.group(1)) + numbers.add(number) + + number_range = xrange(count_offset, count_offset + count) + available_numbers = list(set(number_range) + .difference(numbers)) + else: + for server in cs.servers.list(): + if server.metadata.get('group') == group: + servers.append(server) + + # If state was absent but the count was changed, + # assume we only wanted to remove that number of instances + if was_absent: + diff = len(servers) - count + if diff < 0: + count = 0 else: - module.fail_json(msg=e.message) - - pattern = re.sub(r'%\d+[sd]', r'(\d+)', name) - for server in cs.servers.list(): - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, count_offset + count) - available_numbers = list(set(number_range).difference(numbers)) + count = diff + if len(servers) > count: state = 'absent' del servers[:count] @@ -445,45 +498,52 @@ def cloudservers(module, state, name, flavor, image, meta, key_name, files, instance_ids.append(server.id) delete(module, instance_ids, wait, wait_timeout) elif len(servers) < count: - names = [] - numbers_to_use = available_numbers[:count - len(servers)] - for number in numbers_to_use: - names.append(name % number) + if auto_increment: + names = [] + name_slice = count - len(servers) + numbers_to_use = available_numbers[:name_slice] + for number in numbers_to_use: + names.append(name % number) + else: + names = [name] * (count - len(servers)) else: module.exit_json(changed=False, action=None, instances=[], success=[], error=[], timeout=[], instance_ids={'instances': [], 'success': [], 'error': [], 'timeout': []}) - else: if group is not None: - numbers = set() + if auto_increment: + numbers = set() - try: - name % 0 - except TypeError, e: - if e.message.startswith('not all'): - name = '%s%%d' % name - else: - module.fail_json(msg=e.message) - - pattern = re.sub(r'%\d+[sd]', r'(\d+)', name) - for server in cs.servers.list(): - if server.metadata.get('group') == group: - servers.append(server) - match = re.search(pattern, server.name) - if match: - number = int(match.group(1)) - numbers.add(number) - - number_range = xrange(count_offset, - count_offset + count + len(numbers)) - available_numbers = list(set(number_range).difference(numbers)) - names = [] - numbers_to_use = available_numbers[:count] - for number in numbers_to_use: - names.append(name % number) + try: + name % 0 + except TypeError, e: + if e.message.startswith('not all'): + name = '%s%%d' % name + else: + module.fail_json(msg=e.message) + + pattern = re.sub(r'%\d+[sd]', r'(\d+)', name) + for server in cs.servers.list(): + if server.metadata.get('group') == group: + servers.append(server) + match = re.search(pattern, server.name) + if match: + number = int(match.group(1)) + numbers.add(number) + + number_range = xrange(count_offset, + count_offset + count + len(numbers)) + available_numbers = list(set(number_range) + .difference(numbers)) + names = [] + numbers_to_use = available_numbers[:count] + for number in numbers_to_use: + names.append(name % number) + else: + names = [name] * count else: search_opts = { 'name': name, @@ -552,6 +612,7 @@ def main(): argument_spec = rax_argument_spec() argument_spec.update( dict( + auto_increment=dict(choices=BOOLEANS, default=True, type='bool'), count=dict(default=1, type='int'), count_offset=dict(default=1, type='int'), disk_config=dict(default='auto', choices=['auto', 'manual']), @@ -584,6 +645,7 @@ def main(): 'please remove "service: cloudservers" from your ' 'playbook pertaining to the "rax" module') + auto_increment = module.params.get('auto_increment') count = module.params.get('count') count_offset = module.params.get('count_offset') disk_config = module.params.get('disk_config').upper() @@ -605,7 +667,8 @@ def main(): cloudservers(module, state, name, flavor, image, meta, key_name, files, wait, wait_timeout, disk_config, count, group, - instance_ids, exact_count, networks, count_offset) + instance_ids, exact_count, networks, count_offset, + auto_increment) # import module snippets From 154a99529c3d72a808866e00ee656f0ddc3c1080 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 7 Jan 2014 14:21:22 -0500 Subject: [PATCH 35/59] Fixes #5353 create etc/hostname file in debian if it does not exist --- system/hostname | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/system/hostname b/system/hostname index 9aa7c206a4b..8a9401d2436 100644 --- a/system/hostname +++ b/system/hostname @@ -146,6 +146,12 @@ class DebianStrategy(GenericStrategy): HOSTNAME_FILE = '/etc/hostname' def get_permanent_hostname(self): + if not os.path.isfile(self.HOSTNAME_FILE): + try: + open(self.HOSTNAME_FILE, "a").write("") + except IOError, err: + self.module.fail_json(msg="failed to write file: %s" % + str(err)) try: f = open(self.HOSTNAME_FILE) try: From c74b99ca63c0960155076d22edd17c1e6602c494 Mon Sep 17 00:00:00 2001 From: James Tanner Date: Tue, 7 Jan 2014 14:43:22 -0500 Subject: [PATCH 36/59] Fixes #5369 Do not pass --force-yes to aptitude --- packaging/apt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/packaging/apt b/packaging/apt index 409eb898e62..a7a25898899 100644 --- a/packaging/apt +++ b/packaging/apt @@ -324,13 +324,17 @@ def upgrade(m, mode="yes", force=False, upgrade_command = "safe-upgrade" if force: - force_yes = '--force-yes' + if apt_cmd == APT_GET_CMD: + force_yes = '--force-yes' + else: + force_yes = '' else: force_yes = '' apt_cmd_path = m.get_bin_path(apt_cmd, required=True) cmd = '%s %s -y %s %s %s %s' % (APT_ENVVARS, apt_cmd_path, dpkg_options, force_yes, check_arg, upgrade_command) + open("/tmp/awx.log", "a").write("cmd: %s\n" % cmd) rc, out, err = m.run_command(cmd) if rc: m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out) From e0a720b8b220e5279fdfd827b53f78bb47f3e64e Mon Sep 17 00:00:00 2001 From: Joshua Lund Date: Tue, 7 Jan 2014 18:20:39 -0700 Subject: [PATCH 37/59] Improved the documentation for password generation --- system/user | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/system/user b/system/user index 48bcf751716..aa32393bd95 100644 --- a/system/user +++ b/system/user @@ -77,8 +77,8 @@ options: description: - Optionally set the user's password to this crypted value. See the user example in the github examples directory for what this looks - like in a playbook. - - Passwords values can be generated with "openssl passwd -salt -1 " + like in a playbook. The `FAQ <http://docs.ansible.com/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module>`_ + contains details on various ways to generate these password values. state: required: false default: "present" From 0f0edf69143fa66d192fa5991e108e27831f8f5e Mon Sep 17 00:00:00 2001 From: lichesser <lichesser@users.noreply.github.com> Date: Wed, 8 Jan 2014 02:34:59 +0100 Subject: [PATCH 38/59] Remove debugging code I guess this is left over from testing --- packaging/apt | 1 - 1 file changed, 1 deletion(-) diff --git a/packaging/apt b/packaging/apt index a7a25898899..eb64f8701fb 100644 --- a/packaging/apt +++ b/packaging/apt @@ -334,7 +334,6 @@ def upgrade(m, mode="yes", force=False, apt_cmd_path = m.get_bin_path(apt_cmd, required=True) cmd = '%s %s -y %s %s %s %s' % (APT_ENVVARS, apt_cmd_path, dpkg_options, force_yes, check_arg, upgrade_command) - open("/tmp/awx.log", "a").write("cmd: %s\n" % cmd) rc, out, err = m.run_command(cmd) if rc: m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out) From a3eb6fb532599afa98afb97d1b0859a3435b2157 Mon Sep 17 00:00:00 2001 From: "inetfuture(Aaron Wang)" <inetfuture@gmail.com> Date: Thu, 9 Jan 2014 08:53:17 +0800 Subject: [PATCH 39/59] Fix npm documentation typo. --- packaging/npm | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packaging/npm b/packaging/npm index 0cdcf64c63b..3a4cd13f5d7 100644 --- a/packaging/npm +++ b/packaging/npm @@ -30,7 +30,7 @@ options: name: description: - The name of a node.js library to install - requires: false + required: false path: description: - The base path where to install the node.js libraries @@ -101,7 +101,7 @@ class Npm(object): self.version = kwargs['version'] self.path = kwargs['path'] self.production = kwargs['production'] - + if kwargs['executable']: self.executable = kwargs['executable'] else: @@ -119,7 +119,7 @@ class Npm(object): if self.glbl: cmd.append('--global') if self.production: - cmd.append('--production') + cmd.append('--production') if self.name: cmd.append(self.name_version) @@ -180,7 +180,7 @@ def main(): executable=dict(default=None), state=dict(default='present', choices=['present', 'absent', 'latest']) ) - arg_spec['global']=dict(default='no', type='bool') + arg_spec['global'] = dict(default='no', type='bool') module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True From 414855560eee0c5afdb7c3288e9f81f8f61aaefb Mon Sep 17 00:00:00 2001 From: Matt Martz <matt@sivel.net> Date: Thu, 9 Jan 2014 14:05:32 -0600 Subject: [PATCH 40/59] Ensure the id attribute is returned for a node --- cloud/rax_clb | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/cloud/rax_clb b/cloud/rax_clb index 65435a42be4..56d7f62d78e 100644 --- a/cloud/rax_clb +++ b/cloud/rax_clb @@ -118,7 +118,6 @@ EXAMPLES = ''' ''' import sys -import os from types import NoneType @@ -136,6 +135,12 @@ PROTOCOLS = ['DNS_TCP', 'DNS_UDP', 'FTP', 'HTTP', 'HTTPS', 'IMAPS', 'IMAPv4', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP'] +def node_to_dict(obj): + node = obj.to_dict() + node['id'] = obj.id + return node + + def to_dict(obj): instance = {} for key in dir(obj): @@ -151,7 +156,7 @@ def to_dict(obj): elif key == 'nodes': instance[key] = [] for node in value: - instance[key].append(node.to_dict()) + instance[key].append(node_to_dict(node)) elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')): instance[key] = value From 24eba74edddae9bb67f177461563d9790190f23d Mon Sep 17 00:00:00 2001 From: James Tanner <tanner.jc@gmail.com> Date: Sat, 11 Jan 2014 10:35:33 -0500 Subject: [PATCH 41/59] Add an "accept_hostkey" parameter to the git module to help automatically accept hostkeys for git repos and prevent task hangs when the key is unknown --- source_control/git | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/source_control/git b/source_control/git index 61ef24e1485..320414a35cf 100644 --- a/source_control/git +++ b/source_control/git @@ -43,6 +43,12 @@ options: - What version of the repository to check out. This can be the full 40-character I(SHA-1) hash, the literal string C(HEAD), a branch name, or a tag name. + accept_hostkey: + required: false + default: true + version_added: "1.5" + description: + - Add the hostkey for the repo url if not already added. reference: required: false default: null @@ -118,6 +124,7 @@ EXAMPLES = ''' import re import tempfile + def get_version(git_path, dest, ref="HEAD"): ''' samples the version of the git repo ''' os.chdir(dest) @@ -352,6 +359,7 @@ def main(): force=dict(default='yes', type='bool'), depth=dict(default=None, type='int'), update=dict(default='yes', type='bool'), + accept_hostkey=dict(default='yes', type='bool'), executable=dict(default=None), bare=dict(default='no', type='bool'), ), @@ -369,6 +377,10 @@ def main(): reference = module.params['reference'] git_path = module.params['executable'] or module.get_bin_path('git', True) + # add the git repo's hostkey + if module.params['accept_hostkey']: + add_git_host_key(module, repo, accept_hostkey=True) + if bare: gitconfig = os.path.join(dest, 'config') else: @@ -430,4 +442,6 @@ def main(): # import module snippets from ansible.module_utils.basic import * +from ansible.module_utils.known_hosts import * + main() From 13ce8728de747bdfbdd762319074ddd2510c539b Mon Sep 17 00:00:00 2001 From: James Tanner <tanner.jc@gmail.com> Date: Sat, 11 Jan 2014 11:02:01 -0500 Subject: [PATCH 42/59] Set accept_hostkey to False by default in the git module and fail early if the key is unknown --- source_control/git | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source_control/git b/source_control/git index 320414a35cf..41cf53133d5 100644 --- a/source_control/git +++ b/source_control/git @@ -45,7 +45,7 @@ options: branch name, or a tag name. accept_hostkey: required: false - default: true + default: false version_added: "1.5" description: - Add the hostkey for the repo url if not already added. @@ -359,7 +359,7 @@ def main(): force=dict(default='yes', type='bool'), depth=dict(default=None, type='int'), update=dict(default='yes', type='bool'), - accept_hostkey=dict(default='yes', type='bool'), + accept_hostkey=dict(default='no', type='bool'), executable=dict(default=None), bare=dict(default='no', type='bool'), ), @@ -378,8 +378,8 @@ def main(): git_path = module.params['executable'] or module.get_bin_path('git', True) # add the git repo's hostkey - if module.params['accept_hostkey']: - add_git_host_key(module, repo, accept_hostkey=True) + #if module.params['accept_hostkey']: + add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey']) if bare: gitconfig = os.path.join(dest, 'config') From f0b842b28b3e4bee9015d8058810948058fc0929 Mon Sep 17 00:00:00 2001 From: James Tanner <tanner.jc@gmail.com> Date: Mon, 13 Jan 2014 18:53:02 -0500 Subject: [PATCH 43/59] Fixes #5469 Refactor sysctl module into object oriented code, and add a sysctl_set parameter to manage the values in /proc --- system/sysctl | 425 +++++++++++++++++++++----------------------------- 1 file changed, 182 insertions(+), 243 deletions(-) diff --git a/system/sysctl b/system/sysctl index 1a4fd7fe47a..54f116c6c11 100644 --- a/system/sysctl +++ b/system/sysctl @@ -2,6 +2,7 @@ # -*- coding: utf-8 -*- # (c) 2012, David "DaviXX" CHANIAL <david.chanial@gmail.com> +# (c) 2014, James Tanner <tanner.jc@gmail.com> # # This file is part of Ansible # @@ -41,19 +42,9 @@ options: aliases: [ 'val' ] state: description: - - Whether the entry should be present or absent. + - Whether the entry should be present or absent in the sysctl file. choices: [ "present", "absent" ] default: present - checks: - description: - - If C(none), no smart/facultative checks will be made. If - C(before), some checks are performed before any update (i.e. is - the sysctl key writable?). If C(after), some checks are performed - after an update (i.e. does kernel return the set value?). If - C(both), all of the smart checks (C(before) and C(after)) are - performed. - choices: [ "none", "before", "after", "both" ] - default: both reload: description: - If C(yes), performs a I(/sbin/sysctl -p) if the C(sysctl_file) is @@ -66,6 +57,13 @@ options: - Specifies the absolute path to C(sysctl.conf), if not C(/etc/sysctl.conf). required: false default: /etc/sysctl.conf + sysctl_set: + description: + - Verify token value with the sysctl command and set with -w if necessary + choices: [ "yes", "no" ] + required: false + version_added: 1.5 + default: False notes: [] requirements: [] author: David "DaviXX" CHANIAL <david.chanial@gmail.com> @@ -78,10 +76,14 @@ EXAMPLES = ''' # Remove kernel.panic entry from /etc/sysctl.conf - sysctl: name=kernel.panic state=absent sysctl_file=/etc/sysctl.conf -# Set kernel.panic to 3 in /tmp/test_sysctl.conf, check if the sysctl key -# seems writable, but do not reload sysctl, and do not check kernel value -# after (not needed, because the real /etc/sysctl.conf was not updated) -- sysctl: name=kernel.panic value=3 sysctl_file=/tmp/test_sysctl.conf checks=before reload=no +# Set kernel.panic to 3 in /tmp/test_sysctl.conf +- sysctl: name=kernel.panic value=3 sysctl_file=/tmp/test_sysctl.conf reload=no + +# Set ip fowarding on in /proc and do not reload the sysctl file +- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes + +# Set ip forwarding on in /proc and in the sysctl file and reload if necessary +- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes ''' # ============================================================== @@ -90,137 +92,168 @@ import os import tempfile import re -# ============================================================== - -def reload_sysctl(module, **sysctl_args): - # update needed ? - if not sysctl_args['reload']: - return 0, '' - - # do it - if get_platform().lower() == 'freebsd': - # freebsd doesn't support -p, so reload the sysctl service - rc,out,err = module.run_command('/etc/rc.d/sysctl reload') - else: - # system supports reloading via the -p flag to sysctl, so we'll use that - sysctl_cmd = module.get_bin_path('sysctl', required=True) - rc,out,err = module.run_command([sysctl_cmd, '-p', sysctl_args['sysctl_file']]) - - return rc,out+err - -# ============================================================== - -def write_sysctl(module, lines, **sysctl_args): - # open a tmp file - fd, tmp_path = tempfile.mkstemp('.conf', '.ansible_m_sysctl_', os.path.dirname(sysctl_args['sysctl_file'])) - f = open(tmp_path,"w") - try: - for l in lines: - f.write(l) - except IOError, e: - module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e))) - f.flush() - f.close() - - # replace the real one - module.atomic_move(tmp_path, sysctl_args['sysctl_file']) - - # end - return sysctl_args - -# ============================================================== - -def sysctl_args_expand(**sysctl_args): - if get_platform().lower() == 'freebsd': - # FreeBSD does not use the /proc file system, and instead - # just uses the sysctl command to set the values - sysctl_args['key_path'] = None - else: - sysctl_args['key_path'] = sysctl_args['name'].replace('.' ,'/') - sysctl_args['key_path'] = '/proc/sys/' + sysctl_args['key_path'] - return sysctl_args - -# ============================================================== - -def sysctl_args_collapse(**sysctl_args): - # go ahead - if sysctl_args.get('key_path') is not None: - del sysctl_args['key_path'] - if sysctl_args['state'] == 'absent' and 'value' in sysctl_args: - del sysctl_args['value'] - - # end - return sysctl_args - -# ============================================================== - -def sysctl_check(module, current_step, **sysctl_args): - - # no smart checks at this step ? - if sysctl_args['checks'] == 'none': - return 0, '' - if current_step == 'before' and sysctl_args['checks'] not in ['before', 'both']: - return 0, '' - if current_step == 'after' and sysctl_args['checks'] not in ['after', 'both']: - return 0, '' - - # checking coherence - if sysctl_args['state'] == 'absent' and sysctl_args['value'] is not None: - return 1, 'value=x must not be supplied when state=absent' - - if sysctl_args['state'] == 'present' and sysctl_args['value'] is None: - return 1, 'value=x must be supplied when state=present' - - if not sysctl_args['reload'] and sysctl_args['checks'] in ['after', 'both']: - return 1, 'checks cannot be set to after or both if reload=no' - - if sysctl_args['key_path'] is not None: - # getting file stat - if not os.access(sysctl_args['key_path'], os.F_OK): - return 1, 'key_path is not an existing file, key %s seems invalid' % sysctl_args['key_path'] - if not os.access(sysctl_args['key_path'], os.R_OK): - return 1, 'key_path is not a readable file, key seems to be uncheckable' - - # checks before - if current_step == 'before' and sysctl_args['checks'] in ['before', 'both']: - if sysctl_args['key_path'] is not None and not os.access(sysctl_args['key_path'], os.W_OK): - return 1, 'key_path is not a writable file, key seems to be read only' - return 0, '' - - # checks after - if current_step == 'after' and sysctl_args['checks'] in ['after', 'both']: - if sysctl_args['value'] is not None: - if sysctl_args['key_path'] is not None: - # reading the virtual file - f = open(sysctl_args['key_path'],'r') - output = f.read() - f.close() - else: - # we're on a system without /proc (ie. freebsd), so just - # use the sysctl command to get the currently set value - sysctl_cmd = module.get_bin_path('sysctl', required=True) - rc,output,stderr = module.run_command("%s -n %s" % (sysctl_cmd, sysctl_args['name'])) - if rc != 0: - return 1, 'failed to lookup the value via the sysctl command' - - output = output.strip(' \t\n\r') - output = re.sub(r'\s+', ' ', output) - - # normal case, found value must be equal to the submitted value, and - # we compare the exploded values to handle any whitepsace differences - if output.split() != sysctl_args['value'].split(): - return 1, 'key seems not set to value even after update/sysctl, founded : <%s>, wanted : <%s>' % (output, sysctl_args['value']) - - return 0, '' +class SysctlModule(object): + + def __init__(self, module): + self.module = module + self.args = self.module.params + + self.sysctl_cmd = self.module.get_bin_path('sysctl', required=True) + self.sysctl_file = self.args['sysctl_file'] + + self.proc_value = None # current token value in proc fs + self.file_value = None # current token value in file + self.file_lines = [] # all lines in the file + self.file_values = {} # dict of token values + + self.changed = False # will change occur + self.set_proc = False # does sysctl need to set value + self.write_file = False # does the sysctl file need to be reloaded + + self.process() + + # ============================================================== + # LOGIC + # ============================================================== + + def process(self): + + # Whitespace is bad + self.args['name'] = self.args['name'].strip() + self.args['value'] = self.args['value'].strip() + + thisname = self.args['name'] + + # get the current proc fs value + self.proc_value = self.get_token_curr_value(thisname) + + # get the currect sysctl file value + self.read_sysctl_file() + if thisname not in self.file_values: + self.file_values[thisname] = None + + # update file contents with desired token/value + self.fix_lines() + + # what do we need to do now? + if self.file_values[thisname] is None and self.args['state'] == "present": + self.changed = True + self.write_file = True + elif self.file_values[thisname] != self.args['value']: + self.changed = True + self.write_file = True + if self.args['sysctl_set']: + if self.proc_value is None: + self.changed = True + elif self.proc_value != self.args['value']: + self.changed = True + self.set_proc = True + + # Do the work + if not self.module.check_mode: + if self.write_file: + self.write_sysctl() + if self.write_file and self.args['reload']: + self.reload_sysctl() + if self.set_proc: + self.set_token_value(self.args['name'], self.args['value']) + + # ============================================================== + # SYSCTL COMMAND MANAGEMENT + # ============================================================== + + # Use the sysctl command to find the current value + def get_token_curr_value(self, token): + thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token) + rc,out,err = self.module.run_command(thiscmd) + if rc != 0: + return None + else: + return shlex.split(out)[-1] + + # Use the sysctl command to set the current value + def set_token_value(self, token, value): + thiscmd = "%s -w %s=%s" % (self.sysctl_cmd, token, value) + rc,out,err = self.module.run_command(thiscmd) + if rc != 0: + self.module.fail_json(msg='setting %s failed: %s' (token, out + err)) + else: + return rc + + # Run sysctl -p + def reload_sysctl(self): + # do it + if get_platform().lower() == 'freebsd': + # freebsd doesn't support -p, so reload the sysctl service + rc,out,err = self.module.run_command('/etc/rc.d/sysctl reload') else: - # no value was supplied, so we're checking to make sure - # the associated name is absent. We just fudge this since - # the sysctl isn't really gone, just removed from the conf - # file meaning it will be whatever the system default is - return 0, '' + # system supports reloading via the -p flag to sysctl, so we'll use that + rc,out,err = self.module.run_command([self.sysctl_cmd, '-p', self.sysctl_file]) + + return rc,out+err + + # ============================================================== + # SYSCTL FILE MANAGEMENT + # ============================================================== + + # Get the token value from the sysctl file + def read_sysctl_file(self): + lines = open(self.sysctl_file, "r").readlines() + for line in lines: + line = line.strip() + self.file_lines.append(line) + + # don't split empty lines or comments + if not line or line.startswith("#"): + continue + + k, v = line.split('=',1) + k = k.strip() + v = v.strip() + self.file_values[k] = v.strip() + + # Fix the value in the sysctl file content + def fix_lines(self): + checked = [] + self.fixed_lines = [] + for line in self.file_lines: + if not line.strip() or line.strip().startswith("#"): + self.fixed_lines.append(line) + continue + tmpline = line.strip() + k, v = line.split('=',1) + k = k.strip() + v = v.strip() + if k not in checked: + checked.append(k) + if k == self.args['name']: + if self.args['state'] == "present": + new_line = "%s = %s\n" % (k, self.args['value']) + self.fixed_lines.append(new_line) + else: + new_line = "%s = %s\n" % (k, v) + self.fixed_lines.append(new_line) + + if self.args['name'] not in checked and self.args['state'] == "present": + new_line = "%s = %s\n" % (self.args['name'], self.args['value']) + self.fixed_lines.append(new_line) + + # Completely rewrite the sysctl file + def write_sysctl(self): + # open a tmp file + fd, tmp_path = tempfile.mkstemp('.conf', '.ansible_m_sysctl_', os.path.dirname(self.sysctl_file)) + f = open(tmp_path,"w") + try: + for l in self.fixed_lines: + f.write(l) + except IOError, e: + self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e))) + f.flush() + f.close() + + # replace the real one + self.module.atomic_move(tmp_path, self.sysctl_file) - # weird end - return 1, 'unexpected position reached' # ============================================================== # main @@ -233,110 +266,16 @@ def main(): name = dict(aliases=['key'], required=True), value = dict(aliases=['val'], required=False), state = dict(default='present', choices=['present', 'absent']), - checks = dict(default='both', choices=['none', 'before', 'after', 'both']), reload = dict(default=True, type='bool'), + sysctl_set = dict(default=True, type='bool'), sysctl_file = dict(default='/etc/sysctl.conf') - ) + ), + supports_check_mode=True ) - # defaults - sysctl_args = { - 'changed': False, - 'name': module.params['name'], - 'state': module.params['state'], - 'checks': module.params['checks'], - 'reload': module.params['reload'], - 'value': module.params.get('value'), - 'sysctl_file': module.params['sysctl_file'] - } - - # prepare vars - sysctl_args = sysctl_args_expand(**sysctl_args) - if get_platform().lower() == 'freebsd': - # freebsd does not like spaces around the equal sign - pattern = "%s=%s\n" - else: - pattern = "%s = %s\n" - new_line = pattern % (sysctl_args['name'], sysctl_args['value']) - to_write = [] - founded = False - - # make checks before act - res,msg = sysctl_check(module, 'before', **sysctl_args) - if res != 0: - module.fail_json(msg='checks_before failed with: ' + msg) - - if not os.access(sysctl_args['sysctl_file'], os.W_OK): - try: - f = open(sysctl_args['sysctl_file'],'w') - f.close() - except IOError, e: - module.fail_json(msg='unable to create supplied sysctl file (destination directory probably missing)') - - # reading the file - for line in open(sysctl_args['sysctl_file'], 'r').readlines(): - if not line.strip(): - to_write.append(line) - continue - if line.strip().startswith('#'): - to_write.append(line) - continue - - # write line if not the one searched - ld = {} - ld['name'], ld['val'] = line.split('=',1) - ld['name'] = ld['name'].strip() - - if ld['name'] != sysctl_args['name']: - to_write.append(line) - continue - - # should be absent ? - if sysctl_args['state'] == 'absent': - # not writing the founded line - # mark as changed - sysctl_args['changed'] = True - - # should be present - if sysctl_args['state'] == 'present': - # is the founded line equal to the wanted one ? - ld['val'] = ld['val'].strip() - if ld['val'] == sysctl_args['value']: - # line is equal, writing it without update (but cancel repeats) - if sysctl_args['changed'] == False and founded == False: - to_write.append(line) - founded = True - else: - # update the line (but cancel repeats) - if sysctl_args['changed'] == False and founded == False: - to_write.append(new_line) - sysctl_args['changed'] = True - continue + result = SysctlModule(module) - # if not changed, but should be present, so we have to add it - if sysctl_args['state'] == 'present' and sysctl_args['changed'] == False and founded == False: - to_write.append(new_line) - sysctl_args['changed'] = True - - # has changed ? - res = 0 - if sysctl_args['changed'] == True: - sysctl_args = write_sysctl(module, to_write, **sysctl_args) - res,msg = reload_sysctl(module, **sysctl_args) - - # make checks after act - res,msg = sysctl_check(module, 'after', **sysctl_args) - if res != 0: - module.fail_json(msg='checks_after failed with: ' + msg) - - # look at the next link to avoid this workaround - # https://groups.google.com/forum/?fromgroups=#!topic/ansible-project/LMY-dwF6SQk - changed = sysctl_args['changed'] - del sysctl_args['changed'] - - # end - sysctl_args = sysctl_args_collapse(**sysctl_args) - module.exit_json(changed=changed, **sysctl_args) + module.exit_json(changed=result.changed) sys.exit(0) # import module snippets From 0d55cca4c3474909d831da0db981e65c0b8b1a73 Mon Sep 17 00:00:00 2001 From: Danilo Bargen <gezuru@gmail.com> Date: Tue, 14 Jan 2014 01:11:40 +0100 Subject: [PATCH 44/59] apt_key module: Case insensitive presence checking Right now key_present() is case sensitive, only uppercase key IDs work. By using "grep -i", lowercase IDs should work too. --- packaging/apt_key | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/apt_key b/packaging/apt_key index dee73762c62..eee86337020 100644 --- a/packaging/apt_key +++ b/packaging/apt_key @@ -124,7 +124,7 @@ def all_keys(module, keyring): return results def key_present(module, key_id): - (rc, out, err) = module.run_command("apt-key list | 2>&1 grep -q %s" % key_id) + (rc, out, err) = module.run_command("apt-key list | 2>&1 grep -i -q %s" % key_id) return rc == 0 def download_key(module, url): From ab8d5c5924a5246a5ee831c284628840483ea885 Mon Sep 17 00:00:00 2001 From: David Busby <oneiroi@fedoraproject.org> Date: Tue, 14 Jan 2014 12:00:25 +0000 Subject: [PATCH 45/59] Documentation update to yield example of "WITH GRANT OPTION" which is undocumented previous --- database/mysql_user | 3 +++ 1 file changed, 3 insertions(+) diff --git a/database/mysql_user b/database/mysql_user index 1eeb81f112c..e7fad3d77c6 100644 --- a/database/mysql_user +++ b/database/mysql_user @@ -114,6 +114,9 @@ EXAMPLES = """ # Create database user with name 'bob' and password '12345' with all database privileges - mysql_user: name=bob password=12345 priv=*.*:ALL state=present +# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION' +- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present + # Ensure no user named 'sally' exists, also passing in the auth credentials. - mysql_user: login_user=root login_password=123456 name=sally state=absent From beac8292d5480f151d67c81b488171d984447cf8 Mon Sep 17 00:00:00 2001 From: Rene Moser <mail@renemoser.net> Date: Tue, 14 Jan 2014 14:20:18 +0100 Subject: [PATCH 46/59] zypper_repository: claims to support check_mode, but does not. Fixes GH-5614. --- packaging/zypper_repository | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packaging/zypper_repository b/packaging/zypper_repository index f585d0bde2e..2dc177dc7bf 100644 --- a/packaging/zypper_repository +++ b/packaging/zypper_repository @@ -120,7 +120,7 @@ def main(): description=dict(required=False), disable_gpg_check = dict(required=False, default='no', type='bool'), ), - supports_check_mode=True, + supports_check_mode=False, ) repo = module.params['repo'] From 1b712ec48de7d5c59efbdcdac0f29d582e0b04b9 Mon Sep 17 00:00:00 2001 From: James Tanner <tanner.jc@gmail.com> Date: Tue, 14 Jan 2014 10:48:57 -0500 Subject: [PATCH 47/59] sysctl module: fail if reloading the file did not succeed --- system/sysctl | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/system/sysctl b/system/sysctl index 54f116c6c11..050c2effb1c 100644 --- a/system/sysctl +++ b/system/sysctl @@ -189,8 +189,9 @@ class SysctlModule(object): else: # system supports reloading via the -p flag to sysctl, so we'll use that rc,out,err = self.module.run_command([self.sysctl_cmd, '-p', self.sysctl_file]) - - return rc,out+err + + if rc != 0: + self.module.fail_json(msg="Failed to reload sysctl: %s" % str(out) + str(err)) # ============================================================== # SYSCTL FILE MANAGEMENT From 42dab7bed8d8cebbb7beeaa38fc5de512d3a11e1 Mon Sep 17 00:00:00 2001 From: James Tanner <tanner.jc@gmail.com> Date: Tue, 14 Jan 2014 14:22:37 -0500 Subject: [PATCH 48/59] Fixes #5601 Do not append extra args when checking for pip command options --- packaging/pip | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/packaging/pip b/packaging/pip index 414dfcc0933..56775177ded 100644 --- a/packaging/pip +++ b/packaging/pip @@ -147,9 +147,8 @@ EXAMPLES = ''' def _get_cmd_options(module, cmd): thiscmd = cmd + " --help" rc, stdout, stderr = module.run_command(thiscmd) - #import epdb; epdb.serve() if rc != 0: - module.fail_json(msg="Could not get --help output from %s" % virtualenv) + module.fail_json(msg="Could not get output from %s: %s" % (thiscmd, stdout + stderr)) words = stdout.strip().split() cmd_options = [ x for x in words if x.startswith('--') ] @@ -322,7 +321,7 @@ def main(): is_package = is_vcs or is_tar or is_local_path # just a shortcut for bool if cmd_opts is None: - cmd_opts = _get_cmd_options(module, cmd) + cmd_opts = _get_cmd_options(module, '%s %s' % (pip, state_map[state])) if not is_package and state != 'absent' and use_mirrors and '--use-mirrors' in cmd_opts: cmd += ' --use-mirrors' From 587103e56446a4031a8aac73dba25520f0137f1f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ren=C3=A9=20Moser?= <mail@renemoser.net> Date: Wed, 15 Jan 2014 10:03:43 +0100 Subject: [PATCH 49/59] subversion: use LANG=C to prevent regex failures Fixes bug GH-5549. --- source_control/subversion | 1 + 1 file changed, 1 insertion(+) diff --git a/source_control/subversion b/source_control/subversion index 38417e801b5..0bb25605533 100644 --- a/source_control/subversion +++ b/source_control/subversion @@ -94,6 +94,7 @@ class Subversion(object): def _exec(self, args): bits = [ + 'LANG=C', self.svn_path, '--non-interactive', '--trust-server-cert', From cb6f7748d55e67912284b840fd1fd788a41cd1de Mon Sep 17 00:00:00 2001 From: Michael DeHaan <michael.dehaan@gmail.com> Date: Wed, 15 Jan 2014 09:52:17 -0500 Subject: [PATCH 50/59] All modules should be using /usr/bin/python in the shebang as ansible_python_interpreter processes this (see FAQ). Fixing for standardization purposes. --- cloud/docker | 3 +-- net_infrastructure/openvswitch_bridge | 2 +- net_infrastructure/openvswitch_port | 2 +- packaging/rpm_key | 2 +- system/modprobe | 2 +- 5 files changed, 5 insertions(+), 6 deletions(-) diff --git a/cloud/docker b/cloud/docker index 9abf63e18ab..d4fb1f372a9 100644 --- a/cloud/docker +++ b/cloud/docker @@ -1,5 +1,4 @@ -#!/usr/bin/env python -# +#!/usr/bin/python # (c) 2013, Cove Schneider # diff --git a/net_infrastructure/openvswitch_bridge b/net_infrastructure/openvswitch_bridge index 9e8d521d39e..4b05f4079f5 100644 --- a/net_infrastructure/openvswitch_bridge +++ b/net_infrastructure/openvswitch_bridge @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/python #coding: utf-8 -*- # This module is free software: you can redistribute it and/or modify diff --git a/net_infrastructure/openvswitch_port b/net_infrastructure/openvswitch_port index a33946e9a1b..00684496b45 100644 --- a/net_infrastructure/openvswitch_port +++ b/net_infrastructure/openvswitch_port @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/python #coding: utf-8 -*- # This module is free software: you can redistribute it and/or modify diff --git a/packaging/rpm_key b/packaging/rpm_key index a1c4b036a62..82532477348 100644 --- a/packaging/rpm_key +++ b/packaging/rpm_key @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/python # -*- coding: utf-8 -*- """ diff --git a/system/modprobe b/system/modprobe index 80ec66d9b18..82ca86b9bd5 100755 --- a/system/modprobe +++ b/system/modprobe @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/python #coding: utf-8 -*- # This module is free software: you can redistribute it and/or modify From 272c3634cd3332565fde22555e3f52efa3e1bdcb Mon Sep 17 00:00:00 2001 From: James Tanner <tanner.jc@gmail.com> Date: Wed, 15 Jan 2014 10:25:27 -0500 Subject: [PATCH 51/59] sysctl module: append newline character to lines in the file --- system/sysctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/sysctl b/system/sysctl index 050c2effb1c..c16056937d7 100644 --- a/system/sysctl +++ b/system/sysctl @@ -246,7 +246,7 @@ class SysctlModule(object): f = open(tmp_path,"w") try: for l in self.fixed_lines: - f.write(l) + f.write(l.strip() + "\n") except IOError, e: self.module.fail_json(msg="Failed to write to file %s: %s" % (tmp_path, str(e))) f.flush() From 30f7b2d298a3a7b5458c6ae9652905151a191a55 Mon Sep 17 00:00:00 2001 From: James Tanner <tanner.jc@gmail.com> Date: Wed, 15 Jan 2014 17:10:10 -0500 Subject: [PATCH 52/59] Fixes #5486 Keep authorized key options in tact and ordered --- system/authorized_key | 38 +++++++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 7 deletions(-) diff --git a/system/authorized_key b/system/authorized_key index 7626a9a07cb..d6ebfc0fcf1 100644 --- a/system/authorized_key +++ b/system/authorized_key @@ -114,6 +114,27 @@ import tempfile import re import shlex +class keydict(dict): + + """ a dictionary that maintains the order of keys as they are added """ + + # http://stackoverflow.com/questions/2328235/pythonextend-the-dict-class + + def __init__(self, *args, **kw): + super(keydict,self).__init__(*args, **kw) + self.itemlist = super(keydict,self).keys() + def __setitem__(self, key, value): + self.itemlist.append(key) + super(keydict,self).__setitem__(key, value) + def __iter__(self): + return iter(self.itemlist) + def keys(self): + return self.itemlist + def values(self): + return [self[key] for key in self] + def itervalues(self): + return (self[key] for key in self) + def keyfile(module, user, write=False, path=None, manage_dir=True): """ Calculate name of authorized keys file, optionally creating the @@ -176,7 +197,8 @@ def parseoptions(module, options): reads a string containing ssh-key options and returns a dictionary of those options ''' - options_dict = {} + options_dict = keydict() #ordered dict + key_order = [] if options: token_exp = [ # matches separator @@ -198,8 +220,10 @@ def parseoptions(module, options): if is_valid_option: if len(match.groups()) == 2: options_dict[match.group(1)] = match.group(2) + key_order.append(match.group(1)) else: options_dict[text] = None + key_order.append(text) break if not match: module.fail_json(msg="invalid option string: %s" % options) @@ -246,9 +270,8 @@ def parsekey(module, raw_key): # check for options if type_index is None: return None - elif type_index == 1: - # parse the options and store them - options = key_parts[0] + elif type_index > 0: + options = " ".join(key_parts[:type_index]) # parse the options (if any) options = parseoptions(module, options) @@ -292,7 +315,7 @@ def writekeys(module, filename, keys): option_str = "" if options: option_strings = [] - for option_key in sorted(options.keys()): + for option_key in options.keys(): if options[option_key]: option_strings.append("%s=\"%s\"" % (option_key, options[option_key])) else: @@ -330,10 +353,11 @@ def enforce_state(module, params): # Check our new keys, if any of them exist we'll continue. for new_key in key: + parsed_new_key = parsekey(module, new_key) if key_options is not None: - new_key = "%s %s" % (key_options, new_key) + parsed_options = parseoptions(module, key_options) + parsed_new_key = (parsed_new_key[0], parsed_new_key[1], parsed_options, parsed_new_key[3]) - parsed_new_key = parsekey(module, new_key) if not parsed_new_key: module.fail_json(msg="invalid key specified: %s" % new_key) From cf359df1c5bbebae4eb2091d9b613f94a0a205dd Mon Sep 17 00:00:00 2001 From: willthames <will@thames.id.au> Date: Thu, 16 Jan 2014 13:14:44 +1000 Subject: [PATCH 53/59] ec2_elb module: Minor improvements to documentation ELB(s) cause a weird formatting behaviour - it appears as EL**s** Added choices documentation to the state option --- cloud/ec2_elb | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/cloud/ec2_elb b/cloud/ec2_elb index 4488cf84608..6a8a25986ec 100644 --- a/cloud/ec2_elb +++ b/cloud/ec2_elb @@ -17,9 +17,9 @@ DOCUMENTATION = """ --- module: ec2_elb -short_description: De-registers or registers instances from EC2 ELB(s) +short_description: De-registers or registers instances from EC2 ELBs description: - - This module de-registers or registers an AWS EC2 instance from the ELB(s) + - This module de-registers or registers an AWS EC2 instance from the ELBs that it belongs to. - Returns fact "ec2_elbs" which is a list of elbs attached to the instance if state=absent is passed as an argument. @@ -32,6 +32,7 @@ options: description: - register or deregister the instance required: true + choices: ['present', 'absent'] instance_id: description: From a9c5aa8f78bb42d693b661f8e108cefe27f5a51b Mon Sep 17 00:00:00 2001 From: James Tanner <tanner.jc@gmail.com> Date: Thu, 16 Jan 2014 11:46:06 -0500 Subject: [PATCH 54/59] Remove unused variable from authorized_key --- system/authorized_key | 3 --- 1 file changed, 3 deletions(-) diff --git a/system/authorized_key b/system/authorized_key index d6ebfc0fcf1..ee613a90422 100644 --- a/system/authorized_key +++ b/system/authorized_key @@ -198,7 +198,6 @@ def parseoptions(module, options): and returns a dictionary of those options ''' options_dict = keydict() #ordered dict - key_order = [] if options: token_exp = [ # matches separator @@ -220,10 +219,8 @@ def parseoptions(module, options): if is_valid_option: if len(match.groups()) == 2: options_dict[match.group(1)] = match.group(2) - key_order.append(match.group(1)) else: options_dict[text] = None - key_order.append(text) break if not match: module.fail_json(msg="invalid option string: %s" % options) From df72690e6cbf4033e96d99af8f4f18c3bcafa8bb Mon Sep 17 00:00:00 2001 From: Scott Gilbert <scott@workdesktop.(none)> Date: Thu, 16 Jan 2014 12:14:37 -0600 Subject: [PATCH 55/59] Allow keystring passed to authorized_key to contain blank lines and comments --- system/authorized_key | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/system/authorized_key b/system/authorized_key index ee613a90422..c9b178c3742 100644 --- a/system/authorized_key +++ b/system/authorized_key @@ -341,7 +341,9 @@ def enforce_state(module, params): state = params.get("state", "present") key_options = params.get("key_options", None) - key = key.split('\n') + # extract indivial keys into an array, skipping blank lines and comments + key = [s for s in key.splitlines() if s and not s.startswith('#')] + # check current state -- just get the filename, don't create file do_write = False From 4ef5b60f3d5ff41fb4a6cc037c6c3f0745794b63 Mon Sep 17 00:00:00 2001 From: rishid <rishid@gmail.com> Date: Thu, 16 Jan 2014 16:08:51 -0500 Subject: [PATCH 56/59] Add support for Scientific Linux for hostname module hostname module was lacking support for Scientific Linux, this commit adds it. --- system/hostname | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/system/hostname b/system/hostname index 8a9401d2436..781bdcd08aa 100644 --- a/system/hostname +++ b/system/hostname @@ -256,6 +256,11 @@ class AmazonLinuxHostname(Hostname): distribution = 'Amazon' strategy_class = RedHatStrategy +class ScientificLinuxHostname(Hostname): + platform = 'Linux' + distribution = 'Scientific' + strategy_class = RedHatStrategy + # =========================================== class FedoraStrategy(GenericStrategy): From 03aba39b593f49b4156f7a30e38a8e7b6475eadf Mon Sep 17 00:00:00 2001 From: James Tanner <tanner.jc@gmail.com> Date: Thu, 16 Jan 2014 16:30:13 -0500 Subject: [PATCH 57/59] Fixes #5656 quote values with multiple columns before calling the sysctl command. Calling sysctl should also not be true by default --- system/sysctl | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/system/sysctl b/system/sysctl index c16056937d7..407d36286dc 100644 --- a/system/sysctl +++ b/system/sysctl @@ -173,6 +173,8 @@ class SysctlModule(object): # Use the sysctl command to set the current value def set_token_value(self, token, value): + if len(value.split()) > 0: + value = '"' + value + '"' thiscmd = "%s -w %s=%s" % (self.sysctl_cmd, token, value) rc,out,err = self.module.run_command(thiscmd) if rc != 0: @@ -268,7 +270,7 @@ def main(): value = dict(aliases=['val'], required=False), state = dict(default='present', choices=['present', 'absent']), reload = dict(default=True, type='bool'), - sysctl_set = dict(default=True, type='bool'), + sysctl_set = dict(default=False, type='bool'), sysctl_file = dict(default='/etc/sysctl.conf') ), supports_check_mode=True From f3b86abc30c66b80bee3aafcc549a5ffc88d4ab8 Mon Sep 17 00:00:00 2001 From: James Tanner <tanner.jc@gmail.com> Date: Fri, 17 Jan 2014 11:30:07 -0500 Subject: [PATCH 58/59] Fixes #5661 Handle null values for sysctl parameters --- system/sysctl | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/system/sysctl b/system/sysctl index 407d36286dc..59b92eb6f48 100644 --- a/system/sysctl +++ b/system/sysctl @@ -120,7 +120,10 @@ class SysctlModule(object): # Whitespace is bad self.args['name'] = self.args['name'].strip() - self.args['value'] = self.args['value'].strip() + if self.args['value'] is not None: + self.args['value'] = self.args['value'].strip() + else: + self.args['value'] = "" thisname = self.args['name'] @@ -169,7 +172,7 @@ class SysctlModule(object): if rc != 0: return None else: - return shlex.split(out)[-1] + return out # Use the sysctl command to set the current value def set_token_value(self, token, value): From 624c563e4382dc1bd9d0147d328a82fdc109bb5c Mon Sep 17 00:00:00 2001 From: Simon Zimmermann <simon@insmo.com> Date: Fri, 17 Jan 2014 22:31:12 +0100 Subject: [PATCH 59/59] fix str format error due to missing '%' in sysctl module --- system/sysctl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/system/sysctl b/system/sysctl index 59b92eb6f48..3541a45aee9 100644 --- a/system/sysctl +++ b/system/sysctl @@ -181,7 +181,7 @@ class SysctlModule(object): thiscmd = "%s -w %s=%s" % (self.sysctl_cmd, token, value) rc,out,err = self.module.run_command(thiscmd) if rc != 0: - self.module.fail_json(msg='setting %s failed: %s' (token, out + err)) + self.module.fail_json(msg='setting %s failed: %s' % (token, out + err)) else: return rc