diff --git a/cloud/cloudformation b/cloud/cloudformation
index e072f3923f8..02132f56325 100644
--- a/cloud/cloudformation
+++ b/cloud/cloudformation
@@ -196,7 +196,7 @@ def main():
template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']),
template=dict(default=None, required=True),
- disable_rollback=dict(default=False),
+ disable_rollback=dict(default=False, type='bool'),
tags=dict(default=None)
)
)
@@ -250,7 +250,7 @@ def main():
operation = 'CREATE'
except Exception, err:
error_msg = boto_exception(err)
- if 'AlreadyExistsException' in error_msg:
+ if 'AlreadyExistsException' in error_msg or 'already exists' in error_msg:
update = True
else:
module.fail_json(msg=error_msg)
diff --git a/cloud/digital_ocean b/cloud/digital_ocean
index a6721a55da1..efebf5f1bcf 100644
--- a/cloud/digital_ocean
+++ b/cloud/digital_ocean
@@ -20,7 +20,7 @@ DOCUMENTATION = '''
module: digital_ocean
short_description: Create/delete a droplet/SSH_key in DigitalOcean
description:
- - Create/delete a droplet in DigitalOcean and optionally waits for it to be 'running', or deploy an SSH key.
+ - Create/delete a droplet in DigitalOcean and optionally wait for it to be 'running', or deploy an SSH key.
version_added: "1.3"
options:
command:
@@ -35,10 +35,10 @@ options:
choices: ['present', 'active', 'absent', 'deleted']
client_id:
description:
- - Digital Ocean manager id.
+ - DigitalOcean manager id.
api_key:
description:
- - Digital Ocean api key.
+ - DigitalOcean api key.
id:
description:
- Numeric, the droplet id you want to operate on.
@@ -47,34 +47,40 @@ options:
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key.
unique_name:
description:
- - Bool, require unique hostnames. By default, digital ocean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence.
+ - Bool, require unique hostnames. By default, DigitalOcean allows multiple hosts with the same name. Setting this to "yes" allows only one host per name. Useful for idempotence.
version_added: "1.4"
default: "no"
choices: [ "yes", "no" ]
size_id:
description:
- - Numeric, this is the id of the size you would like the droplet created at.
+ - Numeric, this is the id of the size you would like the droplet created with.
image_id:
description:
- Numeric, this is the id of the image you would like the droplet created with.
region_id:
description:
- - "Numeric, this is the id of the region you would like your server"
+ - "Numeric, this is the id of the region you would like your server to be created in."
ssh_key_ids:
description:
- - Optional, comma separated list of ssh_key_ids that you would like to be added to the server
+ - Optional, comma separated list of ssh_key_ids that you would like to be added to the server.
virtio:
description:
- - "Bool, turn on virtio driver in droplet for improved network and storage I/O"
+ - "Bool, turn on virtio driver in droplet for improved network and storage I/O."
version_added: "1.4"
default: "yes"
choices: [ "yes", "no" ]
private_networking:
description:
- - "Bool, add an additional, private network interface to droplet for inter-droplet communication"
+ - "Bool, add an additional, private network interface to droplet for inter-droplet communication."
version_added: "1.4"
default: "no"
choices: [ "yes", "no" ]
+ backups_enabled:
+ description:
+ - Optional, Boolean, enables backups for your droplet.
+ version_added: "1.6"
+ default: "no"
+ choices: [ "yes", "no" ]
wait:
description:
- Wait for the droplet to be in state 'running' before returning. If wait is "no" an ip_address may not be returned.
@@ -164,11 +170,11 @@ try:
import dopy
from dopy.manager import DoError, DoManager
except ImportError, e:
- print "failed=True msg='dopy >= 0.2.2 required for this module'"
+ print "failed=True msg='dopy >= 0.2.3 required for this module'"
sys.exit(1)
-if dopy.__version__ < '0.2.2':
- print "failed=True msg='dopy >= 0.2.2 required for this module'"
+if dopy.__version__ < '0.2.3':
+ print "failed=True msg='dopy >= 0.2.3 required for this module'"
sys.exit(1)
class TimeoutError(DoError):
@@ -229,8 +235,8 @@ class Droplet(JsonfyMixIn):
cls.manager = DoManager(client_id, api_key)
@classmethod
- def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False):
- json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking)
+ def add(cls, name, size_id, image_id, region_id, ssh_key_ids=None, virtio=True, private_networking=False, backups_enabled=False):
+ json = cls.manager.new_droplet(name, size_id, image_id, region_id, ssh_key_ids, virtio, private_networking, backups_enabled)
droplet = cls(json)
return droplet
@@ -333,7 +339,8 @@ def core(module):
region_id=getkeyordie('region_id'),
ssh_key_ids=module.params['ssh_key_ids'],
virtio=module.params['virtio'],
- private_networking=module.params['private_networking']
+ private_networking=module.params['private_networking'],
+ backups_enabled=module.params['backups_enabled'],
)
if droplet.is_powered_on():
@@ -348,7 +355,7 @@ def core(module):
elif state in ('absent', 'deleted'):
# First, try to find a droplet by id.
- droplet = Droplet.find(id=getkeyordie('id'))
+ droplet = Droplet.find(module.params['id'])
# If we couldn't find the droplet and the user is allowing unique
# hostnames, then check to see if a droplet with the specified
@@ -392,8 +399,9 @@ def main():
image_id = dict(type='int'),
region_id = dict(type='int'),
ssh_key_ids = dict(default=''),
- virtio = dict(type='bool', choices=BOOLEANS, default='yes'),
- private_networking = dict(type='bool', choices=BOOLEANS, default='no'),
+ virtio = dict(type='bool', default='yes'),
+ private_networking = dict(type='bool', default='no'),
+ backups_enabled = dict(type='bool', default='no'),
id = dict(aliases=['droplet_id'], type='int'),
unique_name = dict(type='bool', default='no'),
wait = dict(type='bool', default=True),
diff --git a/cloud/digital_ocean_domain b/cloud/digital_ocean_domain
new file mode 100644
index 00000000000..ef9338c1765
--- /dev/null
+++ b/cloud/digital_ocean_domain
@@ -0,0 +1,242 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+DOCUMENTATION = '''
+---
+module: digital_ocean_domain
+short_description: Create/delete a DNS record in DigitalOcean
+description:
+ - Create/delete a DNS record in DigitalOcean.
+version_added: "1.6"
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'active', 'absent', 'deleted']
+ client_id:
+ description:
+ - Digital Ocean manager id.
+ api_key:
+ description:
+ - Digital Ocean api key.
+ id:
+ description:
+ - Numeric, the droplet id you want to operate on.
+ name:
+ description:
+ - String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain.
+ ip:
+ description:
+ - The IP address to point a domain at.
+
+notes:
+ - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
+'''
+
+
+EXAMPLES = '''
+# Create a domain record
+
+- digital_ocean_domain: >
+ state=present
+ name=my.digitalocean.domain
+ ip=127.0.0.1
+
+# Create a droplet and a corresponding domain record
+
+- digital_cean_droplet: >
+ state=present
+ name=test_droplet
+ size_id=1
+ region_id=2
+ image_id=3
+ register: test_droplet
+
+- digital_ocean_domain: >
+ state=present
+ name={{ test_droplet.name }}.my.domain
+ ip={{ test_droplet.ip_address }}
+'''
+
+import sys
+import os
+import time
+
+try:
+ from dopy.manager import DoError, DoManager
+except ImportError as e:
+ print "failed=True msg='dopy required for this module'"
+ sys.exit(1)
+
+class TimeoutError(DoError):
+ def __init__(self, msg, id):
+ super(TimeoutError, self).__init__(msg)
+ self.id = id
+
+class JsonfyMixIn(object):
+ def to_json(self):
+ return self.__dict__
+
+class DomainRecord(JsonfyMixIn):
+ manager = None
+
+ def __init__(self, json):
+ self.__dict__.update(json)
+ update_attr = __init__
+
+ def update(self, data = None, record_type = None):
+ json = self.manager.edit_domain_record(self.domain_id,
+ self.id,
+ record_type if record_type is not None else self.record_type,
+ data if data is not None else self.data)
+ self.__dict__.update(json)
+ return self
+
+ def destroy(self):
+ json = self.manager.destroy_domain_record(self.domain_id, self.id)
+ return json
+
+class Domain(JsonfyMixIn):
+ manager = None
+
+ def __init__(self, domain_json):
+ self.__dict__.update(domain_json)
+
+ def destroy(self):
+ self.manager.destroy_domain(self.id)
+
+ def records(self):
+ json = self.manager.all_domain_records(self.id)
+ return map(DomainRecord, json)
+
+ @classmethod
+ def add(cls, name, ip):
+ json = cls.manager.new_domain(name, ip)
+ return cls(json)
+
+ @classmethod
+ def setup(cls, client_id, api_key):
+ cls.manager = DoManager(client_id, api_key)
+ DomainRecord.manager = cls.manager
+
+ @classmethod
+ def list_all(cls):
+ domains = cls.manager.all_domains()
+ return map(cls, domains)
+
+ @classmethod
+ def find(cls, name=None, id=None):
+ if name is None and id is None:
+ return False
+
+ domains = Domain.list_all()
+
+ if id is not None:
+ for domain in domains:
+ if domain.id == id:
+ return domain
+
+ if name is not None:
+ for domain in domains:
+ if domain.name == name:
+ return domain
+
+ return False
+
+def core(module):
+ def getkeyordie(k):
+ v = module.params[k]
+ if v is None:
+ module.fail_json(msg='Unable to load %s' % k)
+ return v
+
+ try:
+ # params['client_id'] will be None even if client_id is not passed in
+ client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
+ api_key = module.params['api_key'] or os.environ['DO_API_KEY']
+ except KeyError, e:
+ module.fail_json(msg='Unable to load %s' % e.message)
+
+ changed = True
+ state = module.params['state']
+
+ Domain.setup(client_id, api_key)
+ if state in ('present'):
+ domain = Domain.find(id=module.params["id"])
+
+ if not domain:
+ domain = Domain.find(name=getkeyordie("name"))
+
+ if not domain:
+ domain = Domain.add(getkeyordie("name"),
+ getkeyordie("ip"))
+ module.exit_json(changed=True, domain=domain.to_json())
+ else:
+ records = domain.records()
+ at_record = None
+ for record in records:
+ if record.name == "@":
+ at_record = record
+
+ if not at_record.data == getkeyordie("ip"):
+ record.update(data=getkeyordie("ip"), record_type='A')
+ module.exit_json(changed=True, domain=Domain.find(id=record.domain_id).to_json())
+
+ module.exit_json(changed=False, domain=domain.to_json())
+
+ elif state in ('absent'):
+ domain = None
+ if "id" in module.params:
+ domain = Domain.find(id=module.params["id"])
+
+ if not domain and "name" in module.params:
+ domain = Domain.find(name=module.params["name"])
+
+ if not domain:
+ module.exit_json(changed=False, msg="Domain not found.")
+
+ event_json = domain.destroy()
+ module.exit_json(changed=True, event=event_json)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'),
+ client_id = dict(aliases=['CLIENT_ID'], no_log=True),
+ api_key = dict(aliases=['API_KEY'], no_log=True),
+ name = dict(type='str'),
+ id = dict(aliases=['droplet_id'], type='int'),
+ ip = dict(type='str'),
+ ),
+ required_one_of = (
+ ['id', 'name'],
+ ),
+ )
+
+ try:
+ core(module)
+ except TimeoutError as e:
+ module.fail_json(msg=str(e), id=e.id)
+ except (DoError, Exception) as e:
+ module.fail_json(msg=str(e))
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/cloud/digital_ocean_sshkey b/cloud/digital_ocean_sshkey
new file mode 100644
index 00000000000..8ae7af47793
--- /dev/null
+++ b/cloud/digital_ocean_sshkey
@@ -0,0 +1,178 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+DOCUMENTATION = '''
+---
+module: digital_ocean_sshkey
+short_description: Create/delete an SSH key in DigitalOcean
+description:
+ - Create/delete an SSH key.
+version_added: "1.6"
+options:
+ state:
+ description:
+ - Indicate desired state of the target.
+ default: present
+ choices: ['present', 'absent']
+ client_id:
+ description:
+ - Digital Ocean manager id.
+ api_key:
+ description:
+ - Digital Ocean api key.
+ id:
+ description:
+ - Numeric, the SSH key id you want to operate on.
+ name:
+ description:
+ - String, this is the name of an SSH key to create or destroy.
+ ssh_pub_key:
+ description:
+ - The public SSH key you want to add to your account.
+
+notes:
+ - Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
+'''
+
+
+EXAMPLES = '''
+# Ensure a SSH key is present
+# If a key matches this name, will return the ssh key id and changed = False
+# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
+
+- digital_ocean_sshkey: >
+ state=present
+ name=my_ssh_key
+ ssh_pub_key='ssh-rsa AAAA...'
+ client_id=XXX
+ api_key=XXX
+
+'''
+
+import sys
+import os
+import time
+
+try:
+ from dopy.manager import DoError, DoManager
+except ImportError as e:
+ print "failed=True msg='dopy required for this module'"
+ sys.exit(1)
+
+class TimeoutError(DoError):
+ def __init__(self, msg, id):
+ super(TimeoutError, self).__init__(msg)
+ self.id = id
+
+class JsonfyMixIn(object):
+ def to_json(self):
+ return self.__dict__
+
+class SSH(JsonfyMixIn):
+ manager = None
+
+ def __init__(self, ssh_key_json):
+ self.__dict__.update(ssh_key_json)
+ update_attr = __init__
+
+ def destroy(self):
+ self.manager.destroy_ssh_key(self.id)
+ return True
+
+ @classmethod
+ def setup(cls, client_id, api_key):
+ cls.manager = DoManager(client_id, api_key)
+
+ @classmethod
+ def find(cls, name):
+ if not name:
+ return False
+ keys = cls.list_all()
+ for key in keys:
+ if key.name == name:
+ return key
+ return False
+
+ @classmethod
+ def list_all(cls):
+ json = cls.manager.all_ssh_keys()
+ return map(cls, json)
+
+ @classmethod
+ def add(cls, name, key_pub):
+ json = cls.manager.new_ssh_key(name, key_pub)
+ return cls(json)
+
+def core(module):
+ def getkeyordie(k):
+ v = module.params[k]
+ if v is None:
+ module.fail_json(msg='Unable to load %s' % k)
+ return v
+
+ try:
+ # params['client_id'] will be None even if client_id is not passed in
+ client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
+ api_key = module.params['api_key'] or os.environ['DO_API_KEY']
+ except KeyError, e:
+ module.fail_json(msg='Unable to load %s' % e.message)
+
+ changed = True
+ state = module.params['state']
+
+ SSH.setup(client_id, api_key)
+ name = getkeyordie('name')
+ if state in ('present'):
+ key = SSH.find(name)
+ if key:
+ module.exit_json(changed=False, ssh_key=key.to_json())
+ key = SSH.add(name, getkeyordie('ssh_pub_key'))
+ module.exit_json(changed=True, ssh_key=key.to_json())
+
+ elif state in ('absent'):
+ key = SSH.find(name)
+ if not key:
+ module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
+ key.destroy()
+ module.exit_json(changed=True)
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(choices=['present', 'absent'], default='present'),
+ client_id = dict(aliases=['CLIENT_ID'], no_log=True),
+ api_key = dict(aliases=['API_KEY'], no_log=True),
+ name = dict(type='str'),
+ id = dict(aliases=['droplet_id'], type='int'),
+ ssh_pub_key = dict(type='str'),
+ ),
+ required_one_of = (
+ ['id', 'name'],
+ ),
+ )
+
+ try:
+ core(module)
+ except TimeoutError as e:
+ module.fail_json(msg=str(e), id=e.id)
+ except (DoError, Exception) as e:
+ module.fail_json(msg=str(e))
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/cloud/docker b/cloud/docker
index a1e9a5074c8..3fb82fd7dc5 100644
--- a/cloud/docker
+++ b/cloud/docker
@@ -148,7 +148,7 @@ options:
- Set the state of the container
required: false
default: present
- choices: [ "present", "stopped", "absent", "killed", "restarted" ]
+ choices: [ "present", "running", "stopped", "absent", "killed", "restarted" ]
aliases: []
privileged:
description:
@@ -169,6 +169,20 @@ options:
default: null
aliases: []
version_added: "1.5"
+ stdin_open:
+ description:
+ - Keep stdin open
+ required: false
+ default: false
+ aliases: []
+ version_added: "1.6"
+ tty:
+ description:
+ - Allocate a pseudo-tty
+ required: false
+ default: false
+ aliases: []
+ version_added: "1.6"
author: Cove Schneider, Joshua Conner, Pavel Antonov
requirements: [ "docker-py >= 0.3.0" ]
'''
@@ -287,6 +301,7 @@ import sys
from urlparse import urlparse
try:
import docker.client
+ import docker.utils
from requests.exceptions import *
except ImportError, e:
HAS_DOCKER_PY = False
@@ -331,7 +346,7 @@ class DockerManager:
if self.module.params.get('volumes'):
self.binds = {}
self.volumes = {}
- vols = self.parse_list_from_param('volumes')
+ vols = self.module.params.get('volumes')
for vol in vols:
parts = vol.split(":")
# host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container)
@@ -345,46 +360,32 @@ class DockerManager:
self.lxc_conf = None
if self.module.params.get('lxc_conf'):
self.lxc_conf = []
- options = self.parse_list_from_param('lxc_conf')
+ options = self.module.params.get('lxc_conf')
for option in options:
parts = option.split(':')
self.lxc_conf.append({"Key": parts[0], "Value": parts[1]})
self.exposed_ports = None
if self.module.params.get('expose'):
- expose = self.parse_list_from_param('expose')
- self.exposed_ports = self.get_exposed_ports(expose)
+ self.exposed_ports = self.get_exposed_ports(self.module.params.get('expose'))
self.port_bindings = None
if self.module.params.get('ports'):
- ports = self.parse_list_from_param('ports')
- self.port_bindings = self.get_port_bindings(ports)
+ self.port_bindings = self.get_port_bindings(self.module.params.get('ports'))
self.links = None
if self.module.params.get('links'):
- links = self.parse_list_from_param('links')
- self.links = dict(map(lambda x: x.split(':'), links))
+ self.links = dict(map(lambda x: x.split(':'), self.module.params.get('links')))
self.env = None
if self.module.params.get('env'):
- env = self.parse_list_from_param('env')
- self.env = dict(map(lambda x: x.split("="), env))
+ self.env = dict(map(lambda x: x.split("="), self.module.params.get('env')))
# connect to docker server
docker_url = urlparse(module.params.get('docker_url'))
self.client = docker.Client(base_url=docker_url.geturl())
- def parse_list_from_param(self, param_name, delimiter=','):
- """
- Get a list from a module parameter, whether it's specified as a delimiter-separated string or is already in list form.
- """
- param_list = self.module.params.get(param_name)
- if not isinstance(param_list, list):
- param_list = param_list.split(delimiter)
- return param_list
-
-
def get_exposed_ports(self, expose_list):
"""
Parse the ports and protocols (TCP/UDP) to expose in the docker-py `create_container` call from the docker CLI-style syntax.
@@ -409,7 +410,9 @@ class DockerManager:
"""
binds = {}
for port in ports:
- parts = port.split(':')
+ # ports could potentially be an array like [80, 443], so we make sure they're strings
+ # before splitting
+ parts = str(port).split(':')
container_port = parts[-1]
if '/' not in container_port:
container_port = int(parts[-1])
@@ -522,15 +525,19 @@ class DockerManager:
'command': self.module.params.get('command'),
'ports': self.exposed_ports,
'volumes': self.volumes,
- 'volumes_from': self.module.params.get('volumes_from'),
'mem_limit': _human_to_bytes(self.module.params.get('memory_limit')),
'environment': self.env,
- 'dns': self.module.params.get('dns'),
'hostname': self.module.params.get('hostname'),
'detach': self.module.params.get('detach'),
'name': self.module.params.get('name'),
+ 'stdin_open': self.module.params.get('stdin_open'),
+ 'tty': self.module.params.get('tty'),
}
+ if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) < 0:
+ params['dns'] = self.module.params.get('dns')
+ params['volumes_from'] = self.module.params.get('volumes_from')
+
def do_create(count, params):
results = []
for _ in range(count):
@@ -558,6 +565,11 @@ class DockerManager:
'privileged': self.module.params.get('privileged'),
'links': self.links,
}
+
+ if docker.utils.compare_version('1.10', self.client.version()['ApiVersion']) >= 0:
+ params['dns'] = self.module.params.get('dns')
+ params['volumes_from'] = self.module.params.get('volumes_from')
+
for i in containers:
self.client.start(i['Id'], **params)
self.increment_counter('started')
@@ -616,12 +628,12 @@ def main():
count = dict(default=1),
image = dict(required=True),
command = dict(required=False, default=None),
- expose = dict(required=False, default=None),
- ports = dict(required=False, default=None),
+ expose = dict(required=False, default=None, type='list'),
+ ports = dict(required=False, default=None, type='list'),
publish_all_ports = dict(default=False, type='bool'),
- volumes = dict(default=None),
+ volumes = dict(default=None, type='list'),
volumes_from = dict(default=None),
- links = dict(default=None),
+ links = dict(default=None, type='list'),
memory_limit = dict(default=0),
memory_swap = dict(default=0),
docker_url = dict(default='unix://var/run/docker.sock'),
@@ -629,13 +641,15 @@ def main():
password = dict(),
email = dict(),
hostname = dict(default=None),
- env = dict(),
+ env = dict(type='list'),
dns = dict(),
detach = dict(default=True, type='bool'),
- state = dict(default='present', choices=['absent', 'present', 'stopped', 'killed', 'restarted']),
+ state = dict(default='running', choices=['absent', 'present', 'running', 'stopped', 'killed', 'restarted']),
debug = dict(default=False, type='bool'),
privileged = dict(default=False, type='bool'),
- lxc_conf = dict(default=None),
+ stdin_open = dict(default=False, type='bool'),
+ tty = dict(default=False, type='bool'),
+ lxc_conf = dict(default=None, type='list'),
name = dict(default=None)
)
)
@@ -662,25 +676,35 @@ def main():
changed = False
# start/stop containers
- if state == "present":
-
- # make sure a container with `name` is running
- if name and "/" + name not in map(lambda x: x.get('Name'), running_containers):
+ if state in [ "running", "present" ]:
+
+ # make sure a container with `name` exists, if not create and start it
+ if name and "/" + name not in map(lambda x: x.get('Name'), deployed_containers):
containers = manager.create_containers(1)
- manager.start_containers(containers)
-
- # start more containers if we don't have enough
- elif delta > 0:
- containers = manager.create_containers(delta)
- manager.start_containers(containers)
-
- # stop containers if we have too many
- elif delta < 0:
- containers_to_stop = running_containers[0:abs(delta)]
- containers = manager.stop_containers(containers_to_stop)
- manager.remove_containers(containers_to_stop)
-
- facts = manager.get_running_containers()
+ if state == "present": #otherwise it get (re)started later anyways..
+ manager.start_containers(containers)
+ running_containers = manager.get_running_containers()
+ deployed_containers = manager.get_deployed_containers()
+
+ if state == "running":
+ # make sure a container with `name` is running
+ if name and "/" + name not in map(lambda x: x.get('Name'), running_containers):
+ manager.start_containers(deployed_containers)
+
+ # start more containers if we don't have enough
+ elif delta > 0:
+ containers = manager.create_containers(delta)
+ manager.start_containers(containers)
+
+ # stop containers if we have too many
+ elif delta < 0:
+ containers_to_stop = running_containers[0:abs(delta)]
+ containers = manager.stop_containers(containers_to_stop)
+ manager.remove_containers(containers_to_stop)
+
+ facts = manager.get_running_containers()
+ else:
+ acts = manager.get_deployed_containers()
# stop and remove containers
elif state == "absent":
diff --git a/cloud/docker_image b/cloud/docker_image
index 5fcdfad573c..2f5a02b4521 100644
--- a/cloud/docker_image
+++ b/cloud/docker_image
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python
#
# (c) 2014, Pavel Antonov
@@ -137,6 +137,9 @@ class DockerImageManager:
self.changed = True
for chunk in stream:
+ if not chunk:
+ continue
+
chunk_json = json.loads(chunk)
if 'error' in chunk_json:
diff --git a/cloud/ec2 b/cloud/ec2
index e050611fcf8..5935b7dc578 100644
--- a/cloud/ec2
+++ b/cloud/ec2
@@ -67,6 +67,13 @@ options:
required: true
default: null
aliases: []
+ spot_price:
+ version_added: "1.5"
+ description:
+ - Maximum spot price to bid, If not set a regular on-demand instance is requested. A spot request is made with this maximum bid. When it is filled, the instance is started.
+ required: false
+ default: null
+ aliases: []
image:
description:
- I(emi) (or I(ami)) to use for the instance
@@ -97,24 +104,12 @@ options:
- how long before wait gives up, in seconds
default: 300
aliases: []
- ec2_url:
+ spot_wait_timeout:
+ version_added: "1.5"
description:
- - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
- required: false
- default: null
+ - how long to wait for the spot instance request to be fulfilled
+ default: 600
aliases: []
- aws_secret_key:
- description:
- - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
- required: false
- default: null
- aliases: [ 'ec2_secret_key', 'secret_key' ]
- aws_access_key:
- description:
- - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
- required: false
- default: null
- aliases: [ 'ec2_access_key', 'access_key' ]
count:
description:
- number of instances to launch
@@ -157,7 +152,7 @@ options:
default: null
aliases: []
assign_public_ip:
- version_added: "1.4"
+ version_added: "1.5"
description:
- when provisioning within vpc, assign a public IP address. Boto library must be 2.13.0+
required: false
@@ -184,6 +179,12 @@ options:
required: false
default: null
aliases: []
+ source_dest_check:
+ version_added: "1.6"
+ description:
+ - Enable or Disable the Source/Destination checks (for NAT instances and Virtual Routers)
+ required: false
+ default: true
state:
version_added: "1.3"
description:
@@ -198,6 +199,12 @@ options:
required: false
default: null
aliases: []
+ ebs_optimized:
+ version_added: "1.6"
+ description:
+ - whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
+ required: false
+ default: false
exact_count:
version_added: "1.5"
description:
@@ -212,17 +219,9 @@ options:
required: false
default: null
aliases: []
- validate_certs:
- description:
- - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
- required: false
- default: "yes"
- choices: ["yes", "no"]
- aliases: []
- version_added: "1.5"
-requirements: [ "boto" ]
author: Seth Vidal, Tim Gerla, Lester Wade
+extends_documentation_fragment: aws
'''
EXAMPLES = '''
@@ -253,7 +252,7 @@ EXAMPLES = '''
db: postgres
monitoring: yes
-# Single instance with additional IOPS volume from snapshot
+# Single instance with additional IOPS volume from snapshot and volume delete on termination
local_action:
module: ec2
key_name: mykey
@@ -268,6 +267,7 @@ local_action:
device_type: io1
iops: 1000
volume_size: 100
+ delete_on_termination: true
monitoring: yes
# Multiple groups example
@@ -311,6 +311,19 @@ local_action:
vpc_subnet_id: subnet-29e63245
assign_public_ip: yes
+# Spot instance example
+- local_action:
+ module: ec2
+ spot_price: 0.24
+ spot_wait_timeout: 600
+ keypair: mykey
+ group_id: sg-1dc53f72
+ instance_type: m1.small
+ image: ami-6e649707
+ wait: yes
+ vpc_subnet_id: subnet-29e63245
+ assign_public_ip: yes
+
# Launch instances, runs some tasks
# and then terminate them
@@ -557,7 +570,8 @@ def get_instance_info(inst):
'root_device_type': inst.root_device_type,
'root_device_name': inst.root_device_name,
'state': inst.state,
- 'hypervisor': inst.hypervisor}
+ 'hypervisor': inst.hypervisor,
+ 'ebs_optimized': inst.ebs_optimized}
try:
instance_info['virtualization_type'] = getattr(inst,'virtualization_type')
except AttributeError:
@@ -620,6 +634,17 @@ def create_block_device(module, ec2, volume):
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'))
+def boto_supports_param_in_spot_request(ec2, param):
+ """
+ Check if Boto library has a in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
+
+ ec2: authenticated ec2 connection object
+
+ Returns:
+ True if boto library has the named param as an argument on the request_spot_instances method, else False
+ """
+ method = getattr(ec2, 'request_spot_instances')
+ return param in method.func_code.co_varnames
def enforce_count(module, ec2):
@@ -644,7 +669,6 @@ def enforce_count(module, ec2):
for inst in instance_dict_array:
instances.append(inst)
-
elif len(instances) > exact_count:
changed = True
to_remove = len(instances) - exact_count
@@ -690,6 +714,7 @@ def create_instances(module, ec2, override_count=None):
group_id = module.params.get('group_id')
zone = module.params.get('zone')
instance_type = module.params.get('instance_type')
+ spot_price = module.params.get('spot_price')
image = module.params.get('image')
if override_count:
count = override_count
@@ -700,6 +725,7 @@ def create_instances(module, ec2, override_count=None):
ramdisk = module.params.get('ramdisk')
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
+ spot_wait_timeout = int(module.params.get('spot_wait_timeout'))
placement_group = module.params.get('placement_group')
user_data = module.params.get('user_data')
instance_tags = module.params.get('instance_tags')
@@ -708,8 +734,10 @@ def create_instances(module, ec2, override_count=None):
private_ip = module.params.get('private_ip')
instance_profile_name = module.params.get('instance_profile_name')
volumes = module.params.get('volumes')
+ ebs_optimized = module.params.get('ebs_optimized')
exact_count = module.params.get('exact_count')
count_tag = module.params.get('count_tag')
+ source_dest_check = module.boolean(module.params.get('source_dest_check'))
# group_id and group_name are exclusive of each other
if group_id and group_name:
@@ -760,18 +788,16 @@ def create_instances(module, ec2, override_count=None):
try:
params = {'image_id': image,
'key_name': key_name,
- 'client_token': id,
- 'min_count': count_remaining,
- 'max_count': count_remaining,
'monitoring_enabled': monitoring,
'placement': zone,
- 'placement_group': placement_group,
'instance_type': instance_type,
'kernel_id': kernel,
'ramdisk_id': ramdisk,
- 'private_ip_address': private_ip,
'user_data': user_data}
+ if ebs_optimized:
+ params['ebs_optimized'] = ebs_optimized
+
if boto_supports_profile_name_arg(ec2):
params['instance_profile_name'] = instance_profile_name
else:
@@ -788,13 +814,19 @@ def create_instances(module, ec2, override_count=None):
msg="assign_public_ip only available with vpc_subnet_id")
else:
- interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
- subnet_id=vpc_subnet_id,
- groups=group_id,
- associate_public_ip_address=assign_public_ip)
+ if private_ip:
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ subnet_id=vpc_subnet_id,
+ private_ip_address=private_ip,
+ groups=group_id,
+ associate_public_ip_address=assign_public_ip)
+ else:
+ interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
+ subnet_id=vpc_subnet_id,
+ groups=group_id,
+ associate_public_ip_address=assign_public_ip)
interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)
- params['network_interfaces'] = interfaces
-
+ params['network_interfaces'] = interfaces
else:
params['subnet_id'] = vpc_subnet_id
if vpc_subnet_id:
@@ -814,38 +846,88 @@ def create_instances(module, ec2, override_count=None):
params['block_device_map'] = bdm
- res = ec2.run_instances(**params)
- except boto.exception.BotoServerError, e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
-
- instids = [ i.id for i in res.instances ]
- while True:
- try:
- res.connection.get_all_instances(instids)
- break
- except boto.exception.EC2ResponseError, e:
- if "InvalidInstanceID.NotFound
" in str(e):
- # there's a race between start and get an instance
- continue
+ # check to see if we're using spot pricing first before starting instances
+ if not spot_price:
+ if assign_public_ip and private_ip:
+ params.update(dict(
+ min_count = count_remaining,
+ max_count = count_remaining,
+ client_token = id,
+ placement_group = placement_group,
+ ))
else:
- module.fail_json(msg = str(e))
+ params.update(dict(
+ min_count = count_remaining,
+ max_count = count_remaining,
+ client_token = id,
+ placement_group = placement_group,
+ private_ip_address = private_ip,
+ ))
+
+ res = ec2.run_instances(**params)
+ instids = [ i.id for i in res.instances ]
+ while True:
+ try:
+ ec2.get_all_instances(instids)
+ break
+ except boto.exception.EC2ResponseError as e:
+ if "InvalidInstanceID.NotFound
" in str(e):
+ # there's a race between start and get an instance
+ continue
+ else:
+ module.fail_json(msg = str(e))
+ else:
+ if private_ip:
+ module.fail_json(
+ msg='private_ip only available with on-demand (non-spot) instances')
+ if boto_supports_param_in_spot_request(ec2, placement_group):
+ params['placement_group'] = placement_group
+ elif placement_group :
+ module.fail_json(
+ msg="placement_group parameter requires Boto version 2.3.0 or higher.")
+
+ params.update(dict(
+ count = count_remaining,
+ ))
+ res = ec2.request_spot_instances(spot_price, **params)
+
+ # Now we have to do the intermediate waiting
+ if wait:
+ spot_req_inst_ids = dict()
+ spot_wait_timeout = time.time() + spot_wait_timeout
+ while spot_wait_timeout > time.time():
+ reqs = ec2.get_all_spot_instance_requests()
+ for sirb in res:
+ if sirb.id in spot_req_inst_ids:
+ continue
+ for sir in reqs:
+ if sir.id == sirb.id and sir.instance_id is not None:
+ spot_req_inst_ids[sirb.id] = sir.instance_id
+ if len(spot_req_inst_ids) < count:
+ time.sleep(5)
+ else:
+ break
+ if spot_wait_timeout <= time.time():
+ module.fail_json(msg = "wait for spot requests timeout on %s" % time.asctime())
+ instids = spot_req_inst_ids.values()
+ except boto.exception.BotoServerError, e:
+ module.fail_json(msg = "Instance creation failed => %s: %s" % (e.error_code, e.error_message))
if instance_tags:
try:
ec2.create_tags(instids, instance_tags)
except boto.exception.EC2ResponseError, e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ module.fail_json(msg = "Instance tagging failed => %s: %s" % (e.error_code, e.error_message))
# wait here until the instances are up
- this_res = []
num_running = 0
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time() and num_running < len(instids):
- res_list = res.connection.get_all_instances(instids)
- if len(res_list) > 0:
- this_res = res_list[0]
- num_running = len([ i for i in this_res.instances if i.state=='running' ])
- else:
+ res_list = ec2.get_all_instances(instids)
+ num_running = 0
+ for res in res_list:
+ num_running += len([ i for i in res.instances if i.state=='running' ])
+ if len(res_list) <= 0:
# got a bad response of some sort, possibly due to
# stale/cached data. Wait a second and then try again
time.sleep(1)
@@ -859,8 +941,14 @@ def create_instances(module, ec2, override_count=None):
# waiting took too long
module.fail_json(msg = "wait for instances running timeout on %s" % time.asctime())
- for inst in this_res.instances:
- running_instances.append(inst)
+ #We do this after the loop ends so that we end up with one list
+ for res in res_list:
+ running_instances.extend(res.instances)
+
+ # Enabled by default by Amazon
+ if not source_dest_check:
+ for inst in res.instances:
+ inst.modify_attribute('sourceDestCheck', False)
instance_dict_array = []
created_instance_ids = []
@@ -1020,13 +1108,15 @@ def main():
group_id = dict(type='list'),
zone = dict(aliases=['aws_zone', 'ec2_zone']),
instance_type = dict(aliases=['type']),
+ spot_price = dict(),
image = dict(),
kernel = dict(),
- count = dict(default='1'),
+ count = dict(type='int', default='1'),
monitoring = dict(type='bool', default=False),
ramdisk = dict(),
wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
+ spot_wait_timeout = dict(default=600),
placement_group = dict(),
user_data = dict(),
instance_tags = dict(type='dict'),
@@ -1035,10 +1125,12 @@ def main():
private_ip = dict(),
instance_profile_name = dict(),
instance_ids = dict(type='list'),
+ source_dest_check = dict(type='bool', default=True),
state = dict(default='present'),
exact_count = dict(type='int', default=None),
count_tag = dict(),
volumes = dict(type='list'),
+ ebs_optimized = dict(),
)
)
diff --git a/cloud/ec2_ami b/cloud/ec2_ami
index 866f2caf767..3baf70a438f 100644
--- a/cloud/ec2_ami
+++ b/cloud/ec2_ami
@@ -22,24 +22,6 @@ short_description: create or destroy an image in ec2, return imageid
description:
- Creates or deletes ec2 images. This module has a dependency on python-boto >= 2.5
options:
- ec2_url:
- description:
- - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
- required: false
- default: null
- aliases: []
- aws_secret_key:
- description:
- - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
- required: false
- default: null
- aliases: [ 'ec2_secret_key', 'secret_key' ]
- aws_access_key:
- description:
- - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
- required: false
- default: null
- aliases: ['ec2_access_key', 'access_key' ]
instance_id:
description:
- instance id of the image to create
@@ -101,17 +83,9 @@ options:
required: false
default: null
aliases: []
- validate_certs:
- description:
- - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
- required: false
- default: "yes"
- choices: ["yes", "no"]
- aliases: []
- version_added: "1.5"
-requirements: [ "boto" ]
author: Evan Duffield
+extends_documentation_fragment: aws
'''
# Thank you to iAcquire for sponsoring development of this module.
diff --git a/cloud/ec2_ami_search b/cloud/ec2_ami_search
new file mode 100644
index 00000000000..932dca855a8
--- /dev/null
+++ b/cloud/ec2_ami_search
@@ -0,0 +1,196 @@
+#!/usr/bin/python
+#
+# (c) 2013, Nimbis Services
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+DOCUMENTATION = '''
+---
+module: ec2_ami_search
+short_description: Retrieve AWS AMI for a given operating system.
+version_added: "1.6"
+description:
+ - Look up the most recent AMI on AWS for a given operating system.
+ - Returns C(ami), C(aki), C(ari), C(serial), C(tag)
+ - If there is no AKI or ARI associated with an image, these will be C(null).
+ - Only supports images from cloud-images.ubuntu.com
+ - 'Example output: C({"ami": "ami-69f5a900", "changed": false, "aki": "aki-88aa75e1", "tag": "release", "ari": null, "serial": "20131024"})'
+version_added: "1.6"
+options:
+ distro:
+ description: Linux distribution (e.g., C(ubuntu))
+ required: true
+ choices: ["ubuntu"]
+ release:
+ description: short name of the release (e.g., C(precise))
+ required: true
+ stream:
+ description: Type of release.
+ required: false
+ default: "server"
+ choices: ["server", "desktop"]
+ store:
+ description: Back-end store for instance
+ required: false
+ default: "ebs"
+ choices: ["ebs", "instance-store"]
+ arch:
+ description: CPU architecture
+ required: false
+ default: "amd64"
+ choices: ["i386", "amd64"]
+ region:
+ description: EC2 region
+ required: false
+ default: us-east-1
+ choices: ["ap-northeast-1", "ap-southeast-1", "ap-southeast-2",
+ "eu-west-1", "sa-east-1", "us-east-1", "us-west-1", "us-west-2"]
+ virt:
+ description: virutalization type
+ required: false
+ default: paravirtual
+ choices: ["paravirtual", "hvm"]
+
+author: Lorin Hochstein
+'''
+
+EXAMPLES = '''
+- name: Launch an Ubuntu 12.04 (Precise Pangolin) EC2 instance
+ hosts: 127.0.0.1
+ connection: local
+ tasks:
+ - name: Get the Ubuntu precise AMI
+ ec2_ami_search: distro=ubuntu release=precise region=us-west-1 store=instance-store
+ register: ubuntu_image
+ - name: Start the EC2 instance
+ ec2: image={{ ubuntu_image.ami }} instance_type=m1.small key_name=mykey
+'''
+
+import csv
+import json
+import urllib2
+import urlparse
+
+SUPPORTED_DISTROS = ['ubuntu']
+
+AWS_REGIONS = ['ap-northeast-1',
+ 'ap-southeast-1',
+ 'ap-southeast-2',
+ 'eu-west-1',
+ 'sa-east-1',
+ 'us-east-1',
+ 'us-west-1',
+ 'us-west-2']
+
+
+def get_url(module, url):
+ """ Get url and return response """
+ try:
+ r = urllib2.urlopen(url)
+ except (urllib2.HTTPError, urllib2.URLError), e:
+ code = getattr(e, 'code', -1)
+ module.fail_json(msg="Request failed: %s" % str(e), status_code=code)
+ return r
+
+
+def ubuntu(module):
+ """ Get the ami for ubuntu """
+
+ release = module.params['release']
+ stream = module.params['stream']
+ store = module.params['store']
+ arch = module.params['arch']
+ region = module.params['region']
+ virt = module.params['virt']
+
+ url = get_ubuntu_url(release, stream)
+
+ req = get_url(module, url)
+ reader = csv.reader(req, delimiter='\t')
+ try:
+ ami, aki, ari, tag, serial = lookup_ubuntu_ami(reader, release, stream,
+ store, arch, region, virt)
+ module.exit_json(changed=False, ami=ami, aki=aki, ari=ari, tag=tag,
+ serial=serial)
+ except KeyError:
+ module.fail_json(msg="No matching AMI found")
+
+
+def lookup_ubuntu_ami(table, release, stream, store, arch, region, virt):
+ """ Look up the Ubuntu AMI that matches query given a table of AMIs
+
+ table: an iterable that returns a row of
+ (release, stream, tag, serial, region, ami, aki, ari, virt)
+ release: ubuntu release name
+ stream: 'server' or 'desktop'
+ store: 'ebs' or 'instance-store'
+ arch: 'i386' or 'amd64'
+ region: EC2 region
+ virt: 'paravirtual' or 'hvm'
+
+ Returns (ami, aki, ari, tag, serial)"""
+ expected = (release, stream, store, arch, region, virt)
+
+ for row in table:
+ (actual_release, actual_stream, tag, serial,
+ actual_store, actual_arch, actual_region, ami, aki, ari,
+ actual_virt) = row
+ actual = (actual_release, actual_stream, actual_store, actual_arch,
+ actual_region, actual_virt)
+ if actual == expected:
+ # aki and ari are sometimes blank
+ if aki == '':
+ aki = None
+ if ari == '':
+ ari = None
+ return (ami, aki, ari, tag, serial)
+
+ raise KeyError()
+
+
+def get_ubuntu_url(release, stream):
+ url = "https://cloud-images.ubuntu.com/query/%s/%s/released.current.txt"
+ return url % (release, stream)
+
+
+def main():
+ arg_spec = dict(
+ distro=dict(required=True, choices=SUPPORTED_DISTROS),
+ release=dict(required=True),
+ stream=dict(required=False, default='server',
+ choices=['desktop', 'server']),
+ store=dict(required=False, default='ebs',
+ choices=['ebs', 'instance-store']),
+ arch=dict(required=False, default='amd64',
+ choices=['i386', 'amd64']),
+ region=dict(required=False, default='us-east-1', choices=AWS_REGIONS),
+ virt=dict(required=False, default='paravirtual',
+ choices=['paravirtual', 'hvm'])
+ )
+ module = AnsibleModule(argument_spec=arg_spec)
+ distro = module.params['distro']
+
+ if distro == 'ubuntu':
+ ubuntu(module)
+ else:
+ module.fail_json(msg="Unsupported distro: %s" % distro)
+
+
+
+# this is magic, see lib/ansible/module_common.py
+#<>
+
+if __name__ == '__main__':
+ main()
diff --git a/cloud/ec2_asg b/cloud/ec2_asg
new file mode 100644
index 00000000000..6528d951180
--- /dev/null
+++ b/cloud/ec2_asg
@@ -0,0 +1,219 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+DOCUMENTATION = """
+---
+module: ec2_asg
+short_description: Create or delete AWS Autoscaling Groups
+description:
+ - Can create or delete AWS Autoscaling Groups
+ - Works with the ec2_lc module to manage Launch Configurations
+version_added: "1.6"
+author: Gareth Rushgrove
+options:
+ state:
+ description:
+ - register or deregister the instance
+ required: true
+ choices: ['present', 'absent']
+ name:
+ description:
+ - Unique name for group to be created or deleted
+ required: true
+ load_balancers:
+ description:
+ - List of ELB names to use for the group
+ required: false
+ availability_zones:
+ description:
+ - List of availability zone names in which to create the group.
+ required: false
+ launch_config_name:
+ description:
+ - Name of the Launch configuration to use for the group. See the ec2_lc module for managing these.
+ required: false
+ min_size:
+ description:
+ - Minimum number of instances in group
+ required: false
+ max_size:
+ description:
+ - Maximum number of instances in group
+ required: false
+ desired_capacity:
+ description:
+ - Desired number of instances in group
+ required: false
+ region:
+ description:
+ - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
+ required: false
+ aliases: ['aws_region', 'ec2_region']
+ vpc_zone_identifier:
+ description:
+ - List of VPC subnets to use
+ required: false
+ default: None
+extends_documentation_fragment: aws
+"""
+
+EXAMPLES = '''
+- ec2_asg:
+ name: special
+ load_balancers: 'lb1,lb2'
+ availability_zones: 'eu-west-1a,eu-west-1b'
+ launch_config_name: 'lc-1'
+ min_size: 1
+ max_size: 10
+ desired_capacity: 5
+ vpc_zone_identifier: 'subnet-abcd1234,subnet-1a2b3c4d'
+'''
+
+import sys
+import time
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+try:
+ import boto.ec2.autoscale
+ from boto.ec2.autoscale import AutoScaleConnection, AutoScalingGroup
+ from boto.exception import BotoServerError
+except ImportError:
+ print "failed=True msg='boto required for this module'"
+ sys.exit(1)
+
+
+def enforce_required_arguments(module):
+ ''' As many arguments are not required for autoscale group deletion
+ they cannot be mandatory arguments for the module, so we enforce
+ them here '''
+ missing_args = []
+ for arg in ('min_size', 'max_size', 'launch_config_name', 'availability_zones'):
+ if module.params[arg] is None:
+ missing_args.append(arg)
+ if missing_args:
+ module.fail_json(msg="Missing required arguments for autoscaling group create/update: %s" % ",".join(missing_args))
+
+
+def create_autoscaling_group(connection, module):
+ enforce_required_arguments(module)
+
+ group_name = module.params.get('name')
+ load_balancers = module.params['load_balancers']
+ availability_zones = module.params['availability_zones']
+ launch_config_name = module.params.get('launch_config_name')
+ min_size = module.params['min_size']
+ max_size = module.params['max_size']
+ desired_capacity = module.params.get('desired_capacity')
+ vpc_zone_identifier = module.params.get('vpc_zone_identifier')
+
+ launch_configs = connection.get_all_launch_configurations(names=[launch_config_name])
+
+ as_groups = connection.get_all_groups(names=[group_name])
+
+ if not as_groups:
+ ag = AutoScalingGroup(
+ group_name=group_name,
+ load_balancers=load_balancers,
+ availability_zones=availability_zones,
+ launch_config=launch_configs[0],
+ min_size=min_size,
+ max_size=max_size,
+ desired_capacity=desired_capacity,
+ vpc_zone_identifier=vpc_zone_identifier,
+ connection=connection)
+
+ try:
+ connection.create_auto_scaling_group(ag)
+ module.exit_json(changed=True)
+ except BotoServerError, e:
+ module.fail_json(msg=str(e))
+ else:
+ as_group = as_groups[0]
+ changed = False
+ for attr in ('launch_config_name', 'max_size', 'min_size', 'desired_capacity',
+ 'vpc_zone_identifier', 'availability_zones'):
+ if getattr(as_group, attr) != module.params.get(attr):
+ changed = True
+ setattr(as_group, attr, module.params.get(attr))
+ # handle loadbalancers separately because None != []
+ load_balancers = module.params.get('load_balancers') or []
+ if as_group.load_balancers != load_balancers:
+ changed = True
+ as_group.load_balancers = module.params.get('load_balancers')
+
+ try:
+ if changed:
+ as_group.update()
+ module.exit_json(changed=changed)
+ except BotoServerError, e:
+ module.fail_json(msg=str(e))
+
+
+def delete_autoscaling_group(connection, module):
+ group_name = module.params.get('name')
+ groups = connection.get_all_groups(names=[group_name])
+ if groups:
+ group = groups[0]
+ group.shutdown_instances()
+
+ instances = True
+ while instances:
+ connection.get_all_groups()
+ for group in groups:
+ if group.name == group_name:
+ if not group.instances:
+ instances = False
+ time.sleep(10)
+
+ group.delete()
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True, type='str'),
+ load_balancers=dict(type='list'),
+ availability_zones=dict(type='list'),
+ launch_config_name=dict(type='str'),
+ min_size=dict(type='int'),
+ max_size=dict(type='int'),
+ desired_capacity=dict(type='int'),
+ vpc_zone_identifier=dict(type='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ state = module.params.get('state')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ try:
+ connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg=str(e))
+
+ if state == 'present':
+ create_autoscaling_group(connection, module)
+ elif state == 'absent':
+ delete_autoscaling_group(connection, module)
+
+main()
diff --git a/cloud/ec2_eip b/cloud/ec2_eip
index de041f42227..e1182108097 100644
--- a/cloud/ec2_eip
+++ b/cloud/ec2_eip
@@ -23,24 +23,6 @@ options:
required: false
choices: ['present', 'absent']
default: present
- ec2_url:
- description:
- - URL to use to connect to EC2-compatible cloud (by default the module will use EC2 endpoints)
- required: false
- default: null
- aliases: [ EC2_URL ]
- ec2_access_key:
- description:
- - EC2 access key. If not specified then the EC2_ACCESS_KEY environment variable is used.
- required: false
- default: null
- aliases: [ EC2_ACCESS_KEY ]
- ec2_secret_key:
- description:
- - EC2 secret key. If not specified then the EC2_SECRET_KEY environment variable is used.
- required: false
- default: null
- aliases: [ EC2_SECRET_KEY ]
region:
description:
- the EC2 region to use
@@ -53,16 +35,14 @@ options:
required: false
default: false
version_added: "1.4"
- validate_certs:
+ reuse_existing_ip_allowed:
description:
- - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
+ - Reuse an EIP that is not associated to an instance (when available), instead of allocating a new one.
required: false
- default: "yes"
- choices: ["yes", "no"]
- aliases: []
- version_added: "1.5"
+ default: false
+ version_added: "1.6"
-requirements: [ "boto" ]
+extends_documentation_fragment: aws
author: Lorin Hochstein
notes:
- This module will return C(public_ip) on success, which will contain the
@@ -175,13 +155,27 @@ def ip_is_associated_with_instance(ec2, public_ip, instance_id, module):
return False
-def allocate_address(ec2, domain, module):
- """ Allocate a new elastic IP address and return it """
+def allocate_address(ec2, domain, module, reuse_existing_ip_allowed):
+ """ Allocate a new elastic IP address (when needed) and return it """
# If we're in check mode, nothing else to do
if module.check_mode:
module.exit_json(change=True)
- address = ec2.allocate_address(domain=domain)
+ if reuse_existing_ip_allowed:
+ if domain:
+ domain_filter = { 'domain' : domain }
+ else:
+ domain_filter = { 'domain' : 'standard' }
+ all_addresses = ec2.get_all_addresses(filters=domain_filter)
+
+ unassociated_addresses = filter(lambda a: a.instance_id is None, all_addresses)
+ if unassociated_addresses:
+ address = unassociated_addresses[0];
+ else:
+ address = ec2.allocate_address(domain=domain)
+ else:
+ address = ec2.allocate_address(domain=domain)
+
return address
@@ -224,7 +218,8 @@ def main():
public_ip = dict(required=False, aliases= ['ip']),
state = dict(required=False, default='present',
choices=['present', 'absent']),
- in_vpc = dict(required=False, choices=BOOLEANS, default=False),
+ in_vpc = dict(required=False, type='bool', default=False),
+ reuse_existing_ip_allowed = dict(required=False, type='bool', default=False),
)
)
@@ -243,18 +238,19 @@ def main():
state = module.params.get('state')
in_vpc = module.params.get('in_vpc')
domain = "vpc" if in_vpc else None
+ reuse_existing_ip_allowed = module.params.get('reuse_existing_ip_allowed');
if state == 'present':
if public_ip is None:
if instance_id is None:
- address = allocate_address(ec2, domain, module)
+ address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed)
module.exit_json(changed=True, public_ip=address.public_ip)
else:
# Determine if the instance is inside a VPC or not
instance = find_instance(ec2, instance_id, module)
if instance.vpc_id != None:
domain = "vpc"
- address = allocate_address(ec2, domain, module)
+ address = allocate_address(ec2, domain, module, reuse_existing_ip_allowed)
else:
address = find_address(ec2, public_ip, module)
associate_ip_and_instance(ec2, address, instance_id, module)
diff --git a/cloud/ec2_elb b/cloud/ec2_elb
index ebd90aeda82..e76816fbca3 100644
--- a/cloud/ec2_elb
+++ b/cloud/ec2_elb
@@ -25,7 +25,6 @@ description:
if state=absent is passed as an argument.
- Will be marked changed when called only if there are ELBs found to operate on.
version_added: "1.2"
-requirements: [ "boto" ]
author: John Jarvis
options:
state:
@@ -33,29 +32,15 @@ options:
- register or deregister the instance
required: true
choices: ['present', 'absent']
-
instance_id:
description:
- EC2 Instance ID
required: true
-
ec2_elbs:
description:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false
default: None
- aws_secret_key:
- description:
- - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
- required: false
- default: None
- aliases: ['ec2_secret_key', 'secret_key' ]
- aws_access_key:
- description:
- - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
- required: false
- default: None
- aliases: ['ec2_access_key', 'access_key' ]
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
@@ -82,7 +67,13 @@ options:
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
-
+ wait_timeout:
+ description:
+ - Number of seconds to wait for an instance to change state. If 0 then this module may return an error if a transient error occurs. If non-zero then any transient errors are ignored until the timeout is reached. Ignored when wait=no.
+ required: false
+ default: 0
+ version_added: "1.6"
+extends_documentation_fragment: aws
"""
EXAMPLES = """
@@ -124,16 +115,15 @@ class ElbManager:
"""Handles EC2 instance ELB registration and de-registration"""
def __init__(self, module, instance_id=None, ec2_elbs=None,
- aws_access_key=None, aws_secret_key=None, region=None):
- self.aws_access_key = aws_access_key
- self.aws_secret_key = aws_secret_key
+ region=None, **aws_connect_params):
self.module = module
self.instance_id = instance_id
self.region = region
+ self.aws_connect_params = aws_connect_params
self.lbs = self._get_instance_lbs(ec2_elbs)
self.changed = False
- def deregister(self, wait):
+ def deregister(self, wait, timeout):
"""De-register the instance from all ELBs and wait for the ELB
to report it out-of-service"""
@@ -146,18 +136,17 @@ class ElbManager:
return
if wait:
- self._await_elb_instance_state(lb, 'OutOfService', initial_state)
+ self._await_elb_instance_state(lb, 'OutOfService', initial_state, timeout)
else:
# We cannot assume no change was made if we don't wait
# to find out
self.changed = True
- def register(self, wait, enable_availability_zone):
+ def register(self, wait, enable_availability_zone, timeout):
"""Register the instance for all ELBs and wait for the ELB
to report the instance in-service"""
for lb in self.lbs:
- if wait:
- initial_state = self._get_instance_health(lb)
+ initial_state = self._get_instance_health(lb)
if enable_availability_zone:
self._enable_availailability_zone(lb)
@@ -165,7 +154,7 @@ class ElbManager:
lb.register_instances([self.instance_id])
if wait:
- self._await_elb_instance_state(lb, 'InService', initial_state)
+ self._await_elb_instance_state(lb, 'InService', initial_state, timeout)
else:
# We cannot assume no change was made if we don't wait
# to find out
@@ -195,10 +184,12 @@ class ElbManager:
# lb.availability_zones
return instance.placement in lb.availability_zones
- def _await_elb_instance_state(self, lb, awaited_state, initial_state):
+ def _await_elb_instance_state(self, lb, awaited_state, initial_state, timeout):
"""Wait for an ELB to change state
lb: load balancer
awaited_state : state to poll for (string)"""
+
+ wait_timeout = time.time() + timeout
while True:
instance_state = self._get_instance_health(lb)
@@ -217,7 +208,8 @@ class ElbManager:
# If it's pending, we'll skip further checks andd continue waiting
pass
elif (awaited_state == 'InService'
- and instance_state.reason_code == "Instance"):
+ and instance_state.reason_code == "Instance"
+ and time.time() >= wait_timeout):
# If the reason_code for the instance being out of service is
# "Instance" this indicates a failure state, e.g. the instance
# has failed a health check or the ELB does not have the
@@ -262,9 +254,8 @@ class ElbManager:
are attached to self.instance_id"""
try:
- endpoint="elasticloadbalancing.%s.amazonaws.com" % self.region
- connect_region = RegionInfo(name=self.region, endpoint=endpoint)
- elb = boto.ec2.elb.ELBConnection(self.aws_access_key, self.aws_secret_key, region=connect_region)
+ elb = connect_to_aws(boto.ec2.elb, self.region,
+ **self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
@@ -283,23 +274,22 @@ class ElbManager:
def _get_instance(self):
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
try:
- endpoint = "ec2.%s.amazonaws.com" % self.region
- connect_region = RegionInfo(name=self.region, endpoint=endpoint)
- ec2_conn = boto.ec2.EC2Connection(self.aws_access_key, self.aws_secret_key, region=connect_region)
+ ec2 = connect_to_aws(boto.ec2, self.region,
+ **self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
- return ec2_conn.get_only_instances(instance_ids=[self.instance_id])[0]
+ return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
- state={'required': True,
- 'choices': ['present', 'absent']},
+ state={'required': True},
instance_id={'required': True},
ec2_elbs={'default': None, 'required': False, 'type':'list'},
- enable_availability_zone={'default': True, 'required': False, 'choices': BOOLEANS, 'type': 'bool'},
- wait={'required': False, 'choices': BOOLEANS, 'default': True, 'type': 'bool'}
+ enable_availability_zone={'default': True, 'required': False, 'type': 'bool'},
+ wait={'required': False, 'default': True, 'type': 'bool'},
+ wait_timeout={'requred': False, 'default': 0, 'type': 'int'}
)
)
@@ -307,21 +297,22 @@ def main():
argument_spec=argument_spec,
)
- # def get_ec2_creds(module):
- # return ec2_url, ec2_access_key, ec2_secret_key, region
- ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
ec2_elbs = module.params['ec2_elbs']
- region = module.params['region']
wait = module.params['wait']
enable_availability_zone = module.params['enable_availability_zone']
+ timeout = module.params['wait_timeout']
if module.params['state'] == 'present' and 'ec2_elbs' not in module.params:
module.fail_json(msg="ELBs are required for registration")
instance_id = module.params['instance_id']
- elb_man = ElbManager(module, instance_id, ec2_elbs, aws_access_key,
- aws_secret_key, region=region)
+ elb_man = ElbManager(module, instance_id, ec2_elbs,
+ region=region, **aws_connect_params)
if ec2_elbs is not None:
for elb in ec2_elbs:
@@ -330,9 +321,9 @@ def main():
module.fail_json(msg=msg)
if module.params['state'] == 'present':
- elb_man.register(wait, enable_availability_zone)
+ elb_man.register(wait, enable_availability_zone, timeout)
elif module.params['state'] == 'absent':
- elb_man.deregister(wait)
+ elb_man.deregister(wait, timeout)
ansible_facts = {'ec2_elbs': [lb.name for lb in elb_man.lbs]}
ec2_facts_result = dict(changed=elb_man.changed, ansible_facts=ansible_facts)
diff --git a/cloud/ec2_elb_lb b/cloud/ec2_elb_lb
index cc2c1454876..5de76cb5df0 100644
--- a/cloud/ec2_elb_lb
+++ b/cloud/ec2_elb_lb
@@ -22,7 +22,6 @@ short_description: Creates or destroys Amazon ELB.
- Returns information about the load balancer.
- Will be marked changed when called only if state is changed.
version_added: "1.5"
-requirements: [ "boto" ]
author: Jim Dalton
options:
state:
@@ -51,37 +50,23 @@ options:
- Purge existing availability zones on ELB that are not found in zones
required: false
default: false
- health_check:
+ security_group_ids:
description:
- - An associative array of health check configuration settigs (see example)
+ - A list of security groups to apply to the elb
require: false
default: None
- aws_secret_key:
- description:
- - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
- required: false
- default: None
- aliases: ['ec2_secret_key', 'secret_key']
- aws_access_key:
+ version_added: "1.6"
+ health_check:
description:
- - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
- required: false
+ - An associative array of health check configuration settigs (see example)
+ require: false
default: None
- aliases: ['ec2_access_key', 'access_key']
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
- validate_certs:
- description:
- - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
- required: false
- default: "yes"
- choices: ["yes", "no"]
- aliases: []
- version_added: "1.5"
-
+extends_documentation_fragment: aws
"""
EXAMPLES = """
@@ -183,18 +168,18 @@ class ElbManager(object):
"""Handles ELB creation and destruction"""
def __init__(self, module, name, listeners=None, purge_listeners=None,
- zones=None, purge_zones=None, health_check=None,
- aws_access_key=None, aws_secret_key=None, region=None):
+ zones=None, purge_zones=None, security_group_ids=None, health_check=None,
+ region=None, **aws_connect_params):
self.module = module
self.name = name
self.listeners = listeners
self.purge_listeners = purge_listeners
self.zones = zones
self.purge_zones = purge_zones
+ self.security_group_ids = security_group_ids
self.health_check = health_check
- self.aws_access_key = aws_access_key
- self.aws_secret_key = aws_secret_key
+ self.aws_connect_params = aws_connect_params
self.region = region
self.changed = False
@@ -209,6 +194,7 @@ class ElbManager(object):
self._create_elb()
else:
self._set_zones()
+ self._set_security_groups()
self._set_elb_listeners()
self._set_health_check()
@@ -228,6 +214,7 @@ class ElbManager(object):
'name': self.elb.name,
'dns_name': self.elb.dns_name,
'zones': self.elb.availability_zones,
+ 'security_group_ids': self.elb.security_groups,
'status': self.status
}
@@ -262,11 +249,8 @@ class ElbManager(object):
def _get_elb_connection(self):
try:
- endpoint = "elasticloadbalancing.%s.amazonaws.com" % self.region
- connect_region = RegionInfo(name=self.region, endpoint=endpoint)
- return boto.ec2.elb.ELBConnection(self.aws_access_key,
- self.aws_secret_key,
- region=connect_region)
+ return connect_to_aws(boto.ec2.elb, self.region,
+ **self.aws_connect_params)
except boto.exception.NoAuthHandlerFound, e:
self.module.fail_json(msg=str(e))
@@ -281,6 +265,7 @@ class ElbManager(object):
listeners = [self._listener_as_tuple(l) for l in self.listeners]
self.elb = self.elb_conn.create_load_balancer(name=self.name,
zones=self.zones,
+ security_groups=self.security_group_ids,
complex_listeners=listeners)
if self.elb:
self.changed = True
@@ -405,6 +390,11 @@ class ElbManager(object):
if zones_to_disable:
self._disable_zones(zones_to_disable)
+ def _set_security_groups(self):
+ if self.security_group_ids != None and set(self.elb.security_groups) != set(self.security_group_ids):
+ self.elb_conn.apply_security_groups_to_lb(self.name, self.security_group_ids)
+ self.Changed = True
+
def _set_health_check(self):
"""Set health check values on ELB as needed"""
if self.health_check:
@@ -452,11 +442,10 @@ def main():
state={'required': True, 'choices': ['present', 'absent']},
name={'required': True},
listeners={'default': None, 'required': False, 'type': 'list'},
- purge_listeners={'default': True, 'required': False,
- 'choices': BOOLEANS, 'type': 'bool'},
+ purge_listeners={'default': True, 'required': False, 'type': 'bool'},
zones={'default': None, 'required': False, 'type': 'list'},
- purge_zones={'default': False, 'required': False,
- 'choices': BOOLEANS, 'type': 'bool'},
+ purge_zones={'default': False, 'required': False, 'type': 'bool'},
+ security_group_ids={'default': None, 'required': False, 'type': 'list'},
health_check={'default': None, 'required': False, 'type': 'dict'},
)
)
@@ -465,9 +454,9 @@ def main():
argument_spec=argument_spec,
)
- # def get_ec2_creds(module):
- # return ec2_url, ec2_access_key, ec2_secret_key, region
- ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
name = module.params['name']
state = module.params['state']
@@ -475,6 +464,7 @@ def main():
purge_listeners = module.params['purge_listeners']
zones = module.params['zones']
purge_zones = module.params['purge_zones']
+ security_group_ids = module.params['security_group_ids']
health_check = module.params['health_check']
if state == 'present' and not listeners:
@@ -484,8 +474,8 @@ def main():
module.fail_json(msg="At least one availability zone is required for ELB creation")
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
- purge_zones, health_check, aws_access_key,
- aws_secret_key, region=region)
+ purge_zones, security_group_ids, health_check,
+ region=region, **aws_connect_params)
if state == 'present':
elb_man.ensure_ok()
diff --git a/cloud/ec2_facts b/cloud/ec2_facts
index 1c17fa5b717..3fade4d1a05 100644
--- a/cloud/ec2_facts
+++ b/cloud/ec2_facts
@@ -21,7 +21,15 @@ DOCUMENTATION = '''
module: ec2_facts
short_description: Gathers facts about remote hosts within ec2 (aws)
version_added: "1.0"
-options: {}
+options:
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
description:
- This module fetches data from the metadata servers in ec2 (aws).
Eucalyptus cloud provides a similar service and this module should
@@ -41,7 +49,6 @@ EXAMPLES = '''
when: ansible_ec2_instance_type == "t1.micro"
'''
-import urllib2
import socket
import re
@@ -62,7 +69,8 @@ class Ec2Metadata(object):
'us-west-1',
'us-west-2')
- def __init__(self, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None):
+ def __init__(self, module, ec2_metadata_uri=None, ec2_sshdata_uri=None, ec2_userdata_uri=None):
+ self.module = module
self.uri_meta = ec2_metadata_uri or self.ec2_metadata_uri
self.uri_user = ec2_userdata_uri or self.ec2_userdata_uri
self.uri_ssh = ec2_sshdata_uri or self.ec2_sshdata_uri
@@ -70,12 +78,12 @@ class Ec2Metadata(object):
self._prefix = 'ansible_ec2_%s'
def _fetch(self, url):
- try:
- return urllib2.urlopen(url).read()
- except urllib2.HTTPError:
- return
- except urllib2.URLError:
- return
+ (response, info) = fetch_url(self.module, url, force=True)
+ if response:
+ data = response.read()
+ else:
+ data = None
+ return data
def _mangle_fields(self, fields, uri, filter_patterns=['public-keys-0']):
new_fields = {}
@@ -150,17 +158,20 @@ class Ec2Metadata(object):
return data
def main():
-
- ec2_facts = Ec2Metadata().run()
- ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts)
+ argument_spec = url_argument_spec()
module = AnsibleModule(
- argument_spec = dict(),
+ argument_spec = argument_spec,
supports_check_mode = True,
)
+
+ ec2_facts = Ec2Metadata(module).run()
+ ec2_facts_result = dict(changed=False, ansible_facts=ec2_facts)
+
module.exit_json(**ec2_facts_result)
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
main()
diff --git a/cloud/ec2_group b/cloud/ec2_group
index bbbb0fc24e0..56581ecd778 100644
--- a/cloud/ec2_group
+++ b/cloud/ec2_group
@@ -24,32 +24,19 @@ options:
required: false
rules:
description:
- - List of firewall rules to enforce in this group (see example).
- required: true
- region:
- description:
- - the EC2 region to use
- required: false
- default: null
- aliases: []
- ec2_url:
- description:
- - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints)
+ - List of firewall inbound rules to enforce in this group (see example).
required: false
- default: null
- aliases: []
- ec2_secret_key:
+ rules_egress:
description:
- - EC2 secret key
+ - List of firewall outbound rules to enforce in this group (see example).
required: false
- default: null
- aliases: ['aws_secret_key']
- ec2_access_key:
+ version_added: "1.6"
+ region:
description:
- - EC2 access key
+ - the EC2 region to use
required: false
default: null
- aliases: ['aws_access_key']
+ aliases: []
state:
version_added: "1.4"
description:
@@ -57,16 +44,13 @@ options:
required: false
default: 'present'
aliases: []
- validate_certs:
- description:
- - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
- required: false
- default: "yes"
- choices: ["yes", "no"]
- aliases: []
- version_added: "1.5"
-requirements: [ "boto" ]
+extends_documentation_fragment: aws
+
+notes:
+ - If a rule declares a group_name and that group doesn't exist, it will be
+ automatically created. In that case, group_desc should be provided as well.
+ The module will refuse to create a depended-on group without a description.
'''
EXAMPLES = '''
@@ -99,6 +83,13 @@ EXAMPLES = '''
- proto: all
# the containing group name may be specified here
group_name: example
+ rules_egress:
+ - proto: tcp
+ from_port: 80
+ to_port: 80
+ group_name: example-other
+ # description to use if example-other needs to be created
+ group_desc: other example EC2 group
'''
try:
@@ -114,6 +105,55 @@ def addRulesToLookup(rules, prefix, dict):
dict["%s-%s-%s-%s-%s-%s" % (prefix, rule.ip_protocol, rule.from_port, rule.to_port,
grant.group_id, grant.cidr_ip)] = rule
+
+def get_target_from_rule(rule, name, groups):
+ """
+ Returns tuple of (group_id, ip) after validating rule params.
+
+ rule: Dict describing a rule.
+ name: Name of the security group being managed.
+ groups: Dict of all available security groups.
+
+ AWS accepts an ip range or a security group as target of a rule. This
+ function validate the rule specification and return either a non-None
+ group_id or a non-None ip range.
+ """
+
+ group_id = None
+ group_name = None
+ ip = None
+ target_group_created = False
+ if 'group_id' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg="Specify group_id OR cidr_ip, not both")
+ elif 'group_name' in rule and 'cidr_ip' in rule:
+ module.fail_json(msg="Specify group_name OR cidr_ip, not both")
+ elif 'group_id' in rule and 'group_name' in rule:
+ module.fail_json(msg="Specify group_id OR group_name, not both")
+ elif 'group_id' in rule:
+ group_id = rule['group_id']
+ elif 'group_name' in rule:
+ group_name = rule['group_name']
+ if group_name in groups:
+ group_id = groups[group_name].id
+ elif group_name == name:
+ group_id = group.id
+ groups[group_id] = group
+ groups[group_name] = group
+ else:
+ if not rule.get('group_desc', '').strip():
+ module.fail_json(msg="group %s will be automatically created by rule %s and no description was provided" % (group_name, rule))
+ if not module.check_mode:
+ auto_group = ec2.create_security_group(group_name, rule['group_desc'], vpc_id=vpc_id)
+ group_id = auto_group.id
+ groups[group_id] = auto_group
+ groups[group_name] = auto_group
+ target_group_created = True
+ elif 'cidr_ip' in rule:
+ ip = rule['cidr_ip']
+
+ return group_id, ip, target_group_created
+
+
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
@@ -121,6 +161,7 @@ def main():
description=dict(required=True),
vpc_id=dict(),
rules=dict(),
+ rules_egress=dict(),
state = dict(default='present', choices=['present', 'absent']),
)
)
@@ -133,6 +174,7 @@ def main():
description = module.params['description']
vpc_id = module.params['vpc_id']
rules = module.params['rules']
+ rules_egress = module.params['rules_egress']
state = module.params.get('state')
changed = False
@@ -183,39 +225,29 @@ def main():
'''no match found, create it'''
if not module.check_mode:
group = ec2.create_security_group(name, description, vpc_id=vpc_id)
+
+ # When a group is created, an egress_rule ALLOW ALL
+ # to 0.0.0.0/0 is added automatically but it's not
+ # reflected in the object returned by the AWS API
+ # call. We re-read the group for getting an updated object
+ group = ec2.get_all_security_groups(group_ids=(group.id,))[0]
changed = True
else:
module.fail_json(msg="Unsupported state requested: %s" % state)
# create a lookup for all existing rules on the group
if group:
+
+ # Manage ingress rules
groupRules = {}
addRulesToLookup(group.rules, 'in', groupRules)
# Now, go through all provided rules and ensure they are there.
if rules:
for rule in rules:
- group_id = None
- group_name = None
- ip = None
- if 'group_id' in rule and 'cidr_ip' in rule:
- module.fail_json(msg="Specify group_id OR cidr_ip, not both")
- elif 'group_name' in rule and 'cidr_ip' in rule:
- module.fail_json(msg="Specify group_name OR cidr_ip, not both")
- elif 'group_id' in rule and 'group_name' in rule:
- module.fail_json(msg="Specify group_id OR group_name, not both")
- elif 'group_id' in rule:
- group_id = rule['group_id']
- elif 'group_name' in rule:
- group_name = rule['group_name']
- if group_name in groups:
- group_id = groups[group_name].id
- elif group_name == name:
- group_id = group.id
- groups[group_id] = group
- groups[group_name] = group
- elif 'cidr_ip' in rule:
- ip = rule['cidr_ip']
+ group_id, ip, target_group_created = get_target_from_rule(rule, name, groups)
+ if target_group_created:
+ changed = True
if rule['proto'] == 'all':
rule['proto'] = -1
@@ -246,6 +278,58 @@ def main():
group.revoke(rule.ip_protocol, rule.from_port, rule.to_port, grant.cidr_ip, grantGroup)
changed = True
+ # Manage egress rules
+ groupRules = {}
+ addRulesToLookup(group.rules_egress, 'out', groupRules)
+
+ # Now, go through all provided rules and ensure they are there.
+ if rules_egress:
+ for rule in rules_egress:
+ group_id, ip, target_group_created = get_target_from_rule(rule, name, groups)
+ if target_group_created:
+ changed = True
+
+ if rule['proto'] == 'all':
+ rule['proto'] = -1
+ rule['from_port'] = None
+ rule['to_port'] = None
+
+ # If rule already exists, don't later delete it
+ ruleId = "%s-%s-%s-%s-%s-%s" % ('out', rule['proto'], rule['from_port'], rule['to_port'], group_id, ip)
+ if ruleId in groupRules:
+ del groupRules[ruleId]
+ # Otherwise, add new rule
+ else:
+ grantGroup = None
+ if group_id:
+ grantGroup = groups[group_id].id
+
+ if not module.check_mode:
+ ec2.authorize_security_group_egress(
+ group_id=group.id,
+ ip_protocol=rule['proto'],
+ from_port=rule['from_port'],
+ to_port=rule['to_port'],
+ src_group_id=grantGroup,
+ cidr_ip=ip)
+ changed = True
+
+ # Finally, remove anything left in the groupRules -- these will be defunct rules
+ for rule in groupRules.itervalues():
+ for grant in rule.grants:
+ grantGroup = None
+ if grant.group_id:
+ grantGroup = groups[grant.group_id].id
+ if not module.check_mode:
+ ec2.revoke_security_group_egress(
+ group_id=group.id,
+ ip_protocol=rule.ip_protocol,
+ from_port=rule.from_port,
+ to_port=rule.to_port,
+ src_group_id=grantGroup,
+ cidr_ip=grant.cidr_ip)
+ changed = True
+
if group:
module.exit_json(changed=changed, group_id=group.id)
else:
diff --git a/cloud/ec2_key b/cloud/ec2_key
index 5e6950d2c8b..9c8274f764a 100644
--- a/cloud/ec2_key
+++ b/cloud/ec2_key
@@ -24,40 +24,28 @@ options:
required: false
default: null
aliases: []
- ec2_url:
- description:
- - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints)
- required: false
- default: null
- aliases: []
- ec2_secret_key:
- description:
- - EC2 secret key
- required: false
- default: null
- aliases: ['aws_secret_key', 'secret_key']
- ec2_access_key:
- description:
- - EC2 access key
- required: false
- default: null
- aliases: ['aws_access_key', 'access_key']
state:
description:
- create or delete keypair
required: false
default: 'present'
aliases: []
- validate_certs:
+ wait:
+ description:
+ - Wait for the specified action to complete before returning.
+ required: false
+ default: false
+ aliases: []
+ version_added: "1.6"
+ wait_timeout:
description:
- - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
+ - How long before wait gives up, in seconds
required: false
- default: "yes"
- choices: ["yes", "no"]
+ default: 300
aliases: []
- version_added: "1.5"
+ version_added: "1.6"
-requirements: [ "boto" ]
+extends_documentation_fragment: aws
author: Vincent Viallet
'''
@@ -104,12 +92,18 @@ except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
+import random
+import string
+
+
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
key_material=dict(required=False),
state = dict(default='present', choices=['present', 'absent']),
+ wait = dict(type='bool', default=False),
+ wait_timeout = dict(default=300),
)
)
module = AnsibleModule(
@@ -120,6 +114,8 @@ def main():
name = module.params['name']
state = module.params.get('state')
key_material = module.params.get('key_material')
+ wait = module.params.get('wait')
+ wait_timeout = int(module.params.get('wait_timeout'))
changed = False
@@ -134,6 +130,16 @@ def main():
'''found a match, delete it'''
try:
key.delete()
+ if wait:
+ start = time.time()
+ action_complete = False
+ while (time.time() - start) < wait_timeout:
+ if not ec2.get_key_pair(name):
+ action_complete = True
+ break
+ time.sleep(1)
+ if not action_complete:
+ module.fail_json(msg="timed out while waiting for the key to be removed")
except Exception, e:
module.fail_json(msg="Unable to delete key pair '%s' - %s" % (key, e))
else:
@@ -145,10 +151,45 @@ def main():
# Ensure requested key is present
elif state == 'present':
if key:
- '''existing key found'''
- # Should check if the fingerprint is the same - but lack of info
- # and different fingerprint provided (pub or private) depending if
- # the key has been created of imported.
+ # existing key found
+ if key_material:
+ # EC2's fingerprints are non-trivial to generate, so push this key
+ # to a temporary name and make ec2 calculate the fingerprint for us.
+ #
+ # http://blog.jbrowne.com/?p=23
+ # https://forums.aws.amazon.com/thread.jspa?messageID=352828
+
+ # find an unused name
+ test = 'empty'
+ while test:
+ randomchars = [random.choice(string.ascii_letters + string.digits) for x in range(0,10)]
+ tmpkeyname = "ansible-" + ''.join(randomchars)
+ test = ec2.get_key_pair(tmpkeyname)
+
+ # create tmp key
+ tmpkey = ec2.import_key_pair(tmpkeyname, key_material)
+ # get tmp key fingerprint
+ tmpfingerprint = tmpkey.fingerprint
+ # delete tmp key
+ tmpkey.delete()
+
+ if key.fingerprint != tmpfingerprint:
+ if not module.check_mode:
+ key.delete()
+ key = ec2.import_key_pair(name, key_material)
+
+ if wait:
+ start = time.time()
+ action_complete = False
+ while (time.time() - start) < wait_timeout:
+ if ec2.get_key_pair(name):
+ action_complete = True
+ break
+ time.sleep(1)
+ if not action_complete:
+ module.fail_json(msg="timed out while waiting for the key to be re-created")
+
+ changed = True
pass
# if the key doesn't exist, create it now
@@ -164,6 +205,18 @@ def main():
retrieve the private key
'''
key = ec2.create_key_pair(name)
+
+ if wait:
+ start = time.time()
+ action_complete = False
+ while (time.time() - start) < wait_timeout:
+ if ec2.get_key_pair(name):
+ action_complete = True
+ break
+ time.sleep(1)
+ if not action_complete:
+ module.fail_json(msg="timed out while waiting for the key to be created")
+
changed = True
if key:
diff --git a/cloud/ec2_lc b/cloud/ec2_lc
new file mode 100644
index 00000000000..91905a38894
--- /dev/null
+++ b/cloud/ec2_lc
@@ -0,0 +1,199 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+DOCUMENTATION = """
+---
+module: ec2_lc
+short_description: Create or delete AWS Autoscaling Launch Configurations
+description:
+ - Can create or delete AwS Autoscaling Configurations
+ - Works with the ec2_asg module to manage Autoscaling Groups
+version_added: "1.6"
+author: Gareth Rushgrove
+options:
+ state:
+ description:
+ - register or deregister the instance
+ required: true
+ choices: ['present', 'absent']
+ name:
+ description:
+ - Unique name for configuration
+ required: true
+ image_id:
+ description:
+ - The AMI unique identifier to be used for the group
+ required: false
+ key_name:
+ description:
+ - The SSH key name to be used for access to managed instances
+ required: false
+ security_groups:
+ description:
+ - A list of security groups into which instances should be found
+ required: false
+ region:
+ description:
+ - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
+ required: false
+ aliases: ['aws_region', 'ec2_region']
+ volumes:
+ description:
+ - a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
+ required: false
+ default: null
+ aliases: []
+ user_data:
+ description:
+ - opaque blob of data which is made available to the ec2 instance
+ required: false
+ default: null
+ aliases: []
+extends_documentation_fragment: aws
+"""
+
+EXAMPLES = '''
+- ec2_lc:
+ name: special
+ image_id: ami-XXX
+ key_name: default
+ security_groups: 'group,group2'
+
+'''
+
+import sys
+import time
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+try:
+ from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
+ import boto.ec2.autoscale
+ from boto.ec2.autoscale import LaunchConfiguration
+ from boto.exception import BotoServerError
+except ImportError:
+ print "failed=True msg='boto required for this module'"
+ sys.exit(1)
+
+
+def create_block_device(module, volume):
+ # Not aware of a way to determine this programatically
+ # http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
+ MAX_IOPS_TO_SIZE_RATIO = 30
+ if 'snapshot' not in volume and 'ephemeral' not in volume:
+ if 'volume_size' not in volume:
+ module.fail_json(msg='Size must be specified when creating a new volume or modifying the root volume')
+ if 'snapshot' in volume:
+ if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume:
+ module.fail_json(msg='io1 volumes must have an iops value set')
+ if 'ephemeral' in volume:
+ if 'snapshot' in volume:
+ module.fail_json(msg='Cannot set both ephemeral and snapshot')
+ return BlockDeviceType(snapshot_id=volume.get('snapshot'),
+ ephemeral_name=volume.get('ephemeral'),
+ size=volume.get('volume_size'),
+ volume_type=volume.get('device_type'),
+ delete_on_termination=volume.get('delete_on_termination', False),
+ iops=volume.get('iops'))
+
+
+def create_launch_config(connection, module):
+ name = module.params.get('name')
+ image_id = module.params.get('image_id')
+ key_name = module.params.get('key_name')
+ security_groups = module.params['security_groups']
+ user_data = module.params.get('user_data')
+ volumes = module.params['volumes']
+ instance_type = module.params.get('instance_type')
+ bdm = BlockDeviceMapping()
+
+ if volumes:
+ for volume in volumes:
+ if 'device_name' not in volume:
+ module.fail_json(msg='Device name must be set for volume')
+ # Minimum volume size is 1GB. We'll use volume size explicitly set to 0
+ # to be a signal not to create this volume
+ if 'volume_size' not in volume or int(volume['volume_size']) > 0:
+ bdm[volume['device_name']] = create_block_device(module, volume)
+
+ lc = LaunchConfiguration(
+ name=name,
+ image_id=image_id,
+ key_name=key_name,
+ security_groups=security_groups,
+ user_data=user_data,
+ block_device_mappings=[bdm],
+ instance_type=instance_type)
+
+ launch_configs = connection.get_all_launch_configurations(names=[name])
+ changed = False
+ if not launch_configs:
+ try:
+ connection.create_launch_configuration(lc)
+ launch_configs = connection.get_all_launch_configurations(names=[name])
+ changed = True
+ except BotoServerError, e:
+ module.fail_json(msg=str(e))
+ result = launch_configs[0]
+
+ module.exit_json(changed=changed, name=result.name, created_time=str(result.created_time),
+ image_id=result.image_id, arn=result.launch_configuration_arn,
+ security_groups=result.security_groups, instance_type=instance_type)
+
+
+def delete_launch_config(connection, module):
+ name = module.params.get('name')
+ launch_configs = connection.get_all_launch_configurations(names=[name])
+ if launch_configs:
+ launch_configs[0].delete()
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True, type='str'),
+ image_id=dict(type='str'),
+ key_name=dict(type='str'),
+ security_groups=dict(type='list'),
+ user_data=dict(type='str'),
+ volumes=dict(type='list'),
+ instance_type=dict(type='str'),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ try:
+ connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg=str(e))
+
+ state = module.params.get('state')
+
+ if state == 'present':
+ create_launch_config(connection, module)
+ elif state == 'absent':
+ delete_launch_config(connection, module)
+
+main()
diff --git a/cloud/ec2_metric_alarm b/cloud/ec2_metric_alarm
new file mode 100644
index 00000000000..4791330dbe2
--- /dev/null
+++ b/cloud/ec2_metric_alarm
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+DOCUMENTATION = """
+module: ec2_metric_alarm
+short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
+description:
+ - Can create or delete AWS metric alarms
+ - Metrics you wish to alarm on must already exist
+version_added: "1.6"
+author: Zacharie Eakin
+options:
+ state:
+ description:
+ - register or deregister the alarm
+ required: true
+ choices: ['present', 'absent']
+ name:
+ desciption:
+ - Unique name for the alarm
+ required: true
+ metric:
+ description:
+ - Name of the monitored metric (e.g. CPUUtilization)
+ - Metric must already exist
+ required: false
+ namespace:
+ description:
+ - Name of the appropriate namespace, which determines the category it will appear under in cloudwatch
+ required: false
+ options: ['AWS/AutoScaling','AWS/Billing','AWS/DynamoDB','AWS/ElastiCache','AWS/EBS','AWS/EC2','AWS/ELB','AWS/ElasticMapReduce','AWS/OpsWorks','AWS/Redshift','AWS/RDS','AWS/Route53','AWS/SNS','AWS/SQS','AWS/StorageGateway']
+ statistic:
+ description:
+ - Operation applied to the metric
+ - Works in conjunction with period and evaluation_periods to determine the comparison value
+ required: false
+ options: ['SampleCount','Average','Sum','Minimum','Maximum']
+ comparison:
+ description:
+ - Determines how the threshold value is compared
+ required: false
+ options: ['<=','<','>','>=']
+ threshold:
+ description:
+ - Sets the min/max bound for triggering the alarm
+ required: false
+ period:
+ description:
+ - The time (in seconds) between metric evaluations
+ required: false
+ evaluation_periods:
+ description:
+ - The number of times in which the metric is evaluated before final calculation
+ required: false
+ unit:
+ description:
+ - The threshold's unit of measurement
+ required: false
+ options: ['Seconds','Microseconds','Milliseconds','Bytes','Kilobytes','Megabytes','Gigabytes','Terabytes','Bits','Kilobits','Megabits','Gigabits','Terabits','Percent','Count','Bytes/Second','Kilobytes/Second','Megabytes/Second','Gigabytes/Second','Terabytes/Second','Bits/Second','Kilobits/Second','Megabits/Second','Gigabits/Second','Terabits/Second','Count/Second','None']
+ description:
+ description:
+ - A longer desciption of the alarm
+ required: false
+ dimensions:
+ description:
+ - Describes to what the alarm is applied
+ required: false
+ alarm_actions:
+ description:
+ - A list of the names action(s) taken when the alarm is in the 'alarm' status
+ required: false
+ insufficient_data_actions:
+ description:
+ - A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
+ required: false
+ ok_actions:
+ description:
+ - A list of the names of action(s) to take when the alarm is in the 'ok' status
+ required: false
+extends_documentation_fragment: aws
+"""
+
+EXAMPLES = '''
+ - name: create alarm
+ ec2_metric_alarm:
+ state: present
+ region: ap-southeast-2
+ name: "cpu-low"
+ metric: "CPUUtilization"
+ namespace: "AWS/EC2"
+ statistic: Average
+ comparison: "<="
+ threshold: 5.0
+ period: 300
+ evaluation_periods: 3
+ unit: "Percent"
+ description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
+ dimensions: {'InstanceId':'i-XXX'}
+ alarm_actions: ["action1","action2"]
+
+
+'''
+
+import sys
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+try:
+ import boto.ec2.cloudwatch
+ from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
+ from boto.exception import BotoServerError
+except ImportError:
+ print "failed=True msg='boto required for this module'"
+ sys.exit(1)
+
+
+def create_metric_alarm(connection, module):
+
+ name = module.params.get('name')
+ metric = module.params.get('metric')
+ namespace = module.params.get('namespace')
+ statistic = module.params.get('statistic')
+ comparison = module.params.get('comparison')
+ threshold = module.params.get('threshold')
+ period = module.params.get('period')
+ evaluation_periods = module.params.get('evaluation_periods')
+ unit = module.params.get('unit')
+ description = module.params.get('description')
+ dimensions = module.params.get('dimensions')
+ alarm_actions = module.params.get('alarm_actions')
+ insufficient_data_actions = module.params.get('insufficient_data_actions')
+ ok_actions = module.params.get('ok_actions')
+
+ alarms = connection.describe_alarms(alarm_names=[name])
+
+ if not alarms:
+
+ alm = MetricAlarm(
+ name=name,
+ metric=metric,
+ namespace=namespace,
+ statistic=statistic,
+ comparison=comparison,
+ threshold=threshold,
+ period=period,
+ evaluation_periods=evaluation_periods,
+ unit=unit,
+ description=description,
+ dimensions=dimensions,
+ alarm_actions=alarm_actions,
+ insufficient_data_actions=insufficient_data_actions,
+ ok_actions=ok_actions
+ )
+ try:
+ connection.create_alarm(alm)
+ module.exit_json(changed=True)
+ except BotoServerError, e:
+ module.fail_json(msg=str(e))
+
+ else:
+ alarm = alarms[0]
+ changed = False
+
+ for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
+ if getattr(alarm, attr) != module.params.get(attr):
+ changed = True
+ setattr(alarm, attr, module.params.get(attr))
+ #this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
+ comparison = alarm.comparison
+ comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
+ alarm.comparison = comparisons[comparison]
+
+ dim1 = module.params.get('dimensions')
+ dim2 = alarm.dimensions
+
+ for keys in dim1:
+ if not isinstance(dim1[keys], list):
+ dim1[keys] = [dim1[keys]]
+ if dim1[keys] != dim2[keys]:
+ changed=True
+ setattr(alarm, 'dimensions', dim1)
+
+ for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
+ action = module.params.get(attr) or []
+ if getattr(alarm, attr) != action:
+ changed = True
+ setattr(alarm, attr, module.params.get(attr))
+
+ try:
+ if changed:
+ connection.create_alarm(alarm)
+ module.exit_json(changed=changed)
+ except BotoServerError, e:
+ module.fail_json(msg=str(e))
+
+
+def delete_metric_alarm(connection, module):
+ name = module.params.get('name')
+
+ alarms = connection.describe_alarms(alarm_names=[name])
+
+ if alarms:
+ try:
+ connection.delete_alarms([name])
+ module.exit_json(changed=True)
+ except BotoServerError, e:
+ module.fail_json(msg=str(e))
+ else:
+ module.exit_json(changed=False)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name=dict(required=True, type='str'),
+ metric=dict(type='str'),
+ namespace=dict(type='str', choices=['AWS/AutoScaling', 'AWS/Billing', 'AWS/DynamoDB', 'AWS/ElastiCache', 'AWS/EBS', 'AWS/EC2',
+ 'AWS/ELB', 'AWS/ElasticMapReduce', 'AWS/OpsWorks', 'AWS/Redshift', 'AWS/RDS', 'AWS/Route53', 'AWS/SNS', 'AWS/SQS', 'AWS/StorageGateway']), statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
+ comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
+ threshold=dict(type='float'),
+ period=dict(type='int'),
+ unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
+ evaluation_periods=dict(type='int'),
+ description=dict(type='str'),
+ dimensions=dict(type='dict'),
+ alarm_actions=dict(type='list'),
+ insufficient_data_actions=dict(type='list'),
+ ok_actions=dict(type='list'),
+ state=dict(default='present', choices=['present', 'absent']),
+ region=dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ state = module.params.get('state')
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ try:
+ connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg=str(e))
+
+ if state == 'present':
+ create_metric_alarm(connection, module)
+ elif state == 'absent':
+ delete_metric_alarm(connection, module)
+
+main()
diff --git a/cloud/ec2_scaling_policy b/cloud/ec2_scaling_policy
new file mode 100755
index 00000000000..4e66f463063
--- /dev/null
+++ b/cloud/ec2_scaling_policy
@@ -0,0 +1,180 @@
+#!/usr/bin/python
+
+DOCUMENTATION = """
+module: ec2_scaling_policy
+short_description: Create or delete AWS scaling policies for Autoscaling groups
+description:
+ - Can create or delete scaling policies for autoscaling groups
+ - Referenced autoscaling groups must already exist
+version_added: "1.6"
+author: Zacharie Eakin
+options:
+ state:
+ description:
+ - register or deregister the policy
+ required: true
+ choices: ['present', 'absent']
+ name:
+ description:
+ - Unique name for the scaling policy
+ required: true
+ asg_name:
+ description:
+ - Name of the associated autoscaling group
+ required: true
+ adjustment_type:
+ desciption:
+ - The type of change in capacity of the autoscaling group
+ required: false
+ choices: ['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']
+ scaling_adjustment:
+ description:
+ - The amount by which the autoscaling group is adjusted by the policy
+ required: false
+ min_adjustment_step:
+ description:
+ - Minimum amount of adjustment when policy is triggered
+ required: false
+ cooldown:
+ description:
+ - The minimum period of time between which autoscaling actions can take place
+ required: false
+extends_documentation_fragment: aws
+"""
+
+EXAMPLES = '''
+- ec2_scaling_policy:
+ state: present
+ region: US-XXX
+ name: "scaledown-policy"
+ adjustment_type: "ChangeInCapacity"
+ asg_name: "slave-pool"
+ scaling_adjustment: -1
+ min_adjustment_step: 1
+ cooldown: 300
+'''
+
+
+import sys
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+try:
+ import boto.ec2.autoscale
+ from boto.ec2.autoscale import ScalingPolicy
+ from boto.exception import BotoServerError
+
+except ImportError:
+ print "failed=True msg='boto required for this module'"
+ sys.exit(1)
+
+
+def create_scaling_policy(connection, module):
+ sp_name = module.params.get('name')
+ adjustment_type = module.params.get('adjustment_type')
+ asg_name = module.params.get('asg_name')
+ scaling_adjustment = module.params.get('scaling_adjustment')
+ min_adjustment_step = module.params.get('min_adjustment_step')
+ cooldown = module.params.get('cooldown')
+
+ scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
+
+ if not scalingPolicies:
+ sp = ScalingPolicy(
+ name=sp_name,
+ adjustment_type=adjustment_type,
+ as_name=asg_name,
+ scaling_adjustment=scaling_adjustment,
+ min_adjustment_step=min_adjustment_step,
+ cooldown=cooldown)
+
+ try:
+ connection.create_scaling_policy(sp)
+ module.exit_json(changed=True)
+ except BotoServerError, e:
+ module.fail_json(msg=str(e))
+ else:
+ policy = scalingPolicies[0]
+ changed = False
+
+ #min_adjustment_step attribute is only relevant if the adjustment_type
+ #is set to percentage change in capacity, so it is a special case
+ if getattr(policy, 'adjustment_type') == 'PercentChangeInCapacity':
+ if getattr(policy, 'min_adjustment_step') != module.params.get('min_adjustment_step'):
+ changed = True
+
+ #set the min adjustment step incase the user decided to change their adjustment type to percentage
+ setattr(policy, 'min_adjustment_step', module.params.get('min_adjustment_step'))
+
+ #check the remaining attributes
+ for attr in ('adjustment_type','scaling_adjustment','cooldown'):
+ if getattr(policy, attr) != module.params.get(attr):
+ changed = True
+ setattr(policy, attr, module.params.get(attr))
+
+ try:
+ if changed:
+ connection.create_scaling_policy(policy)
+ policy = connection.get_all_policies(policy_names=[sp_name])[0]
+ module.exit_json(changed=changed, name=policy.name, arn=policy.policy_arn, as_name=policy.as_name, scaling_adjustment=policy.scaling_adjustment, cooldown=policy.cooldown, adjustment_type=policy.adjustment_type, min_adjustment_step=policy.min_adjustment_step)
+ module.exit_json(changed=changed)
+ except BotoServerError, e:
+ module.fail_json(msg=str(e))
+
+
+def delete_scaling_policy(connection, module):
+ sp_name = module.params.get('name')
+ asg_name = module.params.get('asg_name')
+
+ scalingPolicies = connection.get_all_policies(as_group=asg_name,policy_names=[sp_name])
+
+ if scalingPolicies:
+ try:
+ connection.delete_policy(sp_name, asg_name)
+ module.exit_json(changed=True)
+ except BotoServerError, e:
+ module.exit_json(changed=False, msg=str(e))
+ else:
+ module.exit_json(changed=False)
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ name = dict(required=True, type='str'),
+ adjustment_type = dict(type='str', choices=['ChangeInCapacity','ExactCapacity','PercentChangeInCapacity']),
+ asg_name = dict(required=True, type='str'),
+ scaling_adjustment = dict(type='int'),
+ min_adjustment_step = dict(type='int'),
+ cooldown = dict(type='int'),
+ region = dict(aliases=['aws_region', 'ec2_region'], choices=AWS_REGIONS),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+
+ state = module.params.get('state')
+
+ try:
+ connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg = str(e))
+
+ if state == 'present':
+ create_scaling_policy(connection, module)
+ elif state == 'absent':
+ delete_scaling_policy(connection, module)
+
+
+main()
+
+
+
+
+
+
diff --git a/cloud/ec2_snapshot b/cloud/ec2_snapshot
index b5d9df3b525..10aba7963c6 100644
--- a/cloud/ec2_snapshot
+++ b/cloud/ec2_snapshot
@@ -22,24 +22,6 @@ description:
- creates an EC2 snapshot from an existing EBS volume
version_added: "1.5"
options:
- ec2_secret_key:
- description:
- - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
- required: false
- default: None
- aliases: ['aws_secret_key', 'secret_key' ]
- ec2_access_key:
- description:
- - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
- required: false
- default: None
- aliases: ['aws_access_key', 'access_key' ]
- ec2_url:
- description:
- - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
- required: false
- default: null
- aliases: []
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
@@ -59,19 +41,20 @@ options:
default: null
aliases: []
instance_id:
- description:
- - instance that has a the required volume to snapshot mounted
+ description:
+ - instance that has the required volume to snapshot mounted
required: false
default: null
aliases: []
device_name:
- description:
+ description:
- device name of a mounted volume to be snapshotted
required: false
default: null
aliases: []
-requirements: [ "boto" ]
+
author: Will Thames
+extends_documentation_fragment: aws
'''
EXAMPLES = '''
@@ -109,6 +92,9 @@ def main():
ec2_url = dict(),
ec2_secret_key = dict(aliases=['aws_secret_key', 'secret_key'], no_log=True),
ec2_access_key = dict(aliases=['aws_access_key', 'access_key']),
+ wait = dict(type='bool', default='true'),
+ wait_timeout = dict(default=0),
+ snapshot_tags = dict(type='dict', default=dict()),
)
)
@@ -116,6 +102,9 @@ def main():
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+ snapshot_tags = module.params.get('snapshot_tags')
if not volume_id and not instance_id or volume_id and instance_id:
module.fail_json('One and only one of volume_id or instance_id must be specified')
@@ -135,10 +124,22 @@ def main():
try:
snapshot = ec2.create_snapshot(volume_id, description=description)
+ time_waited = 0
+ if wait:
+ snapshot.update()
+ while snapshot.status != 'completed':
+ time.sleep(3)
+ snapshot.update()
+ time_waited += 3
+ if wait_timeout and time_waited > wait_timeout:
+ module.fail_json('Timed out while creating snapshot.')
+ for k, v in snapshot_tags.items():
+ snapshot.add_tag(k, v)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
- module.exit_json(changed=True, snapshot_id=snapshot.id)
+ module.exit_json(changed=True, snapshot_id=snapshot.id, volume_id=snapshot.volume_id,
+ volume_size=snapshot.volume_size, tags=snapshot.tags.copy())
# import module snippets
from ansible.module_utils.basic import *
diff --git a/cloud/ec2_tag b/cloud/ec2_tag
index ca5a337646f..6c6eb94d218 100644
--- a/cloud/ec2_tag
+++ b/cloud/ec2_tag
@@ -19,7 +19,7 @@ DOCUMENTATION = '''
module: ec2_tag
short_description: create and remove tag(s) to ec2 resources.
description:
- - Creates and removes tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
+ - Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
version_added: "1.3"
options:
resource:
@@ -30,7 +30,7 @@ options:
aliases: []
state:
description:
- - Whether the tags should be present or absent on the resource.
+ - Whether the tags should be present or absent on the resource. Use list to interrogate the tags of an instance.
required: false
default: present
choices: ['present', 'absent']
@@ -41,35 +41,9 @@ options:
required: false
default: null
aliases: ['aws_region', 'ec2_region']
- aws_secret_key:
- description:
- - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
- required: false
- default: None
- aliases: ['ec2_secret_key', 'secret_key' ]
- aws_access_key:
- description:
- - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
- required: false
- default: None
- aliases: ['ec2_access_key', 'access_key' ]
- ec2_url:
- description:
- - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used.
- required: false
- default: null
- aliases: []
- validate_certs:
- description:
- - When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
- required: false
- default: "yes"
- choices: ["yes", "no"]
- aliases: []
- version_added: "1.5"
-requirements: [ "boto" ]
author: Lester Wade
+extends_documentation_fragment: aws
'''
EXAMPLES = '''
@@ -115,14 +89,14 @@ def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
resource = dict(required=True),
- tags = dict(required=True),
- state = dict(default='present', choices=['present', 'absent']),
+ tags = dict(),
+ state = dict(default='present', choices=['present', 'absent', 'list']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
resource = module.params.get('resource')
- tags = module.params['tags']
+ tags = module.params.get('tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
@@ -140,6 +114,8 @@ def main():
tagdict[tag.name] = tag.value
if state == 'present':
+ if not tags:
+ module.fail_json(msg="tags argument is required when state is present")
if set(tags.items()).issubset(set(tagdict.items())):
module.exit_json(msg="Tags already exists in %s." %resource, changed=False)
else:
@@ -151,6 +127,8 @@ def main():
module.exit_json(msg="Tags %s created for resource %s." % (dictadd,resource), changed=True)
if state == 'absent':
+ if not tags:
+ module.fail_json(msg="tags argument is required when state is absent")
for (key, value) in set(tags.items()):
if (key, value) not in set(tagdict.items()):
baddict[key] = value
@@ -162,10 +140,9 @@ def main():
tagger = ec2.delete_tags(resource, dictremove)
gettags = ec2.get_all_tags(filters=filters)
module.exit_json(msg="Tags %s removed for resource %s." % (dictremove,resource), changed=True)
-
-# print json.dumps({
-# "current_resource_tags": gettags,
-# })
+
+ if state == 'list':
+ module.exit_json(changed=False, **tagdict)
sys.exit(0)
# import module snippets
diff --git a/cloud/ec2_vol b/cloud/ec2_vol
index bdd2eae3822..152094d9b9b 100644
--- a/cloud/ec2_vol
+++ b/cloud/ec2_vol
@@ -22,34 +22,30 @@ description:
- creates an EBS volume and optionally attaches it to an instance. If both an instance ID and a device name is given and the instance has a device at the device name, then no volume is created and no attachment is made. This module has a dependency on python-boto.
version_added: "1.1"
options:
- aws_secret_key:
- description:
- - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
- required: false
- default: None
- aliases: ['ec2_secret_key', 'secret_key' ]
- aws_access_key:
+ instance:
description:
- - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
+ - instance ID if you wish to attach the volume.
required: false
- default: None
- aliases: ['ec2_access_key', 'access_key' ]
- ec2_url:
+ default: null
+ aliases: []
+ name:
description:
- - Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
+ - volume Name tag if you wish to attach an existing volume (requires instance)
required: false
default: null
aliases: []
- instance:
+ version_added: "1.6"
+ id:
description:
- - instance ID if you wish to attach the volume.
+ - volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
required: false
- default: null
+ default: null
aliases: []
+ version_added: "1.6"
volume_size:
description:
- size of volume (in GB) to create.
- required: true
+ required: false
default: null
aliases: []
iops:
@@ -82,6 +78,7 @@ options:
- snapshot ID on which to base the volume
required: false
default: null
+ version_added: "1.5"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
@@ -90,9 +87,15 @@ options:
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
-
-requirements: [ "boto" ]
+ state:
+ description:
+ - whether to ensure the volume is present or absent
+ required: false
+ default: present
+ choices: ['absent', 'present']
+ version_added: "1.6"
author: Lester Wade
+extends_documentation_fragment: aws
'''
EXAMPLES = '''
@@ -131,6 +134,34 @@ EXAMPLES = '''
volume_size: 5
with_items: ec2.instances
register: ec2_vol
+
+# Example: Launch an instance and then add a volue if not already present
+# * Nothing will happen if the volume is already attached.
+# * Volume must exist in the same zone.
+
+- local_action:
+ module: ec2
+ keypair: "{{ keypair }}"
+ image: "{{ image }}"
+ zone: YYYYYY
+ id: my_instance
+ wait: yes
+ count: 1
+ register: ec2
+
+- local_action:
+ module: ec2_vol
+ instance: "{{ item.id }}"
+ name: my_existing_volume_Name_tag
+ device_name: /dev/xvdf
+ with_items: ec2.instances
+ register: ec2_vol
+
+# Remove a volume
+- location: action
+ module: ec2_vol
+ id: vol-XXXXXXXX
+ state: absent
'''
# Note: this module needs to be made idempotent. Possible solution is to use resource tags with the volumes.
@@ -147,82 +178,104 @@ except ImportError:
print "failed=True msg='boto required for this module'"
sys.exit(1)
-def main():
- argument_spec = ec2_argument_spec()
- argument_spec.update(dict(
- instance = dict(),
- volume_size = dict(required=True),
- iops = dict(),
- device_name = dict(),
- zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
- snapshot = dict(),
- )
- )
- module = AnsibleModule(argument_spec=argument_spec)
-
- instance = module.params.get('instance')
- volume_size = module.params.get('volume_size')
- iops = module.params.get('iops')
- device_name = module.params.get('device_name')
+def get_volume(module, ec2):
+ name = module.params.get('name')
+ id = module.params.get('id')
zone = module.params.get('zone')
- snapshot = module.params.get('snapshot')
-
- ec2 = ec2_connect(module)
+ filters = {}
+ volume_ids = None
+ if zone:
+ filters['availability_zone'] = zone
+ if name:
+ filters = {'tag:Name': name}
+ if id:
+ volume_ids = [id]
+ try:
+ vols = ec2.get_all_volumes(volume_ids=volume_ids, filters=filters)
+ except boto.exception.BotoServerError, e:
+ module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
- # Here we need to get the zone info for the instance. This covers situation where
- # instance is specified but zone isn't.
- # Useful for playbooks chaining instance launch with volume create + attach and where the
- # zone doesn't matter to the user.
+ if not vols:
+ module.fail_json(msg="Could not find volume in zone (if specified): %s" % name or id)
+ if len(vols) > 1:
+ module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
+ return vols[0]
- if instance:
- reservation = ec2.get_all_instances(instance_ids=instance)
- inst = reservation[0].instances[0]
- zone = inst.placement
- # Check if there is a volume already mounted there.
- if device_name:
- if device_name in inst.block_device_mapping:
- module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
- volume_id=inst.block_device_mapping[device_name].volume_id,
- device=device_name,
- changed=False)
+def delete_volume(module, ec2):
+ vol = get_volume(module, ec2)
+ if not vol:
+ module.exit_json(changed=False)
+ else:
+ if vol.attachment_state() is not None:
+ adata = vol.attach_data
+ module.fail_json(msg="Volume %s is attached to an instance %s." % (vol.id, adata.instance_id))
+ ec2.delete_volume(vol.id)
+ module.exit_json(changed=True)
- # If custom iops is defined we use volume_type "io1" rather than the default of "standard"
+def create_volume(module, ec2, zone):
+ name = module.params.get('name')
+ id = module.params.get('id')
+ instance = module.params.get('instance')
+ iops = module.params.get('iops')
+ volume_size = module.params.get('volume_size')
+ snapshot = module.params.get('snapshot')
+ # If custom iops is defined we use volume_type "io1" rather than the default of "standard"
if iops:
volume_type = 'io1'
else:
volume_type = 'standard'
# If no instance supplied, try volume creation based on module parameters.
+ if name or id:
+ if not instance:
+ module.fail_json(msg = "If name or id is specified, instance must also be specified")
+ if iops or volume_size:
+ module.fail_json(msg = "Parameters are not compatible: [id or name] and [iops or volume_size]")
- try:
- volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
- while volume.status != 'available':
- time.sleep(3)
- volume.update()
- except boto.exception.BotoServerError, e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ volume = get_volume(module, ec2)
+ if volume.attachment_state() is not None:
+ adata = volume.attach_data
+ if adata.instance_id != instance:
+ module.fail_json(msg = "Volume %s is already attached to another instance: %s"
+ % (name or id, adata.instance_id))
+ else:
+ module.exit_json(msg="Volume %s is already mapped on instance %s: %s" %
+ (name or id, adata.instance_id, adata.device),
+ volume_id=id,
+ device=adata.device,
+ changed=False)
+ else:
+ try:
+ volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
+ while volume.status != 'available':
+ time.sleep(3)
+ volume.update()
+ except boto.exception.BotoServerError, e:
+ module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+ return volume
- # Attach the created volume.
+
+def attach_volume(module, ec2, volume, instance):
+ device_name = module.params.get('device_name')
if device_name and instance:
try:
- attach = volume.attach(inst.id, device_name)
+ attach = volume.attach(instance.id, device_name)
while volume.attachment_state() != 'attached':
time.sleep(3)
volume.update()
except boto.exception.BotoServerError, e:
- module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
-
+ module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
+
# If device_name isn't set, make a choice based on best practices here:
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
-
+
# In future this needs to be more dynamic but combining block device mapping best practices
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
# Use password data attribute to tell whether the instance is Windows or Linux
-
if device_name is None and instance:
try:
if not ec2.get_password_data(inst.id):
@@ -240,11 +293,65 @@ def main():
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
- print json.dumps({
- "volume_id": volume.id,
- "device": device_name
- })
- sys.exit(0)
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(dict(
+ instance = dict(),
+ id = dict(),
+ name = dict(),
+ volume_size = dict(),
+ iops = dict(),
+ device_name = dict(),
+ zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
+ snapshot = dict(),
+ state = dict(choices=['absent', 'present'], default='present')
+ )
+ )
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ id = module.params.get('id')
+ name = module.params.get('name')
+ instance = module.params.get('instance')
+ volume_size = module.params.get('volume_size')
+ iops = module.params.get('iops')
+ device_name = module.params.get('device_name')
+ zone = module.params.get('zone')
+ snapshot = module.params.get('snapshot')
+ state = module.params.get('state')
+
+ ec2 = ec2_connect(module)
+
+ if id and name:
+ module.fail_json(msg="Both id and name cannot be specified")
+
+ if not (id or name or volume_size):
+ module.fail_json(msg="Cannot specify volume_size and either one of name or id")
+
+ # Here we need to get the zone info for the instance. This covers situation where
+ # instance is specified but zone isn't.
+ # Useful for playbooks chaining instance launch with volume create + attach and where the
+ # zone doesn't matter to the user.
+ if instance:
+ reservation = ec2.get_all_instances(instance_ids=instance)
+ inst = reservation[0].instances[0]
+ zone = inst.placement
+
+ # Check if there is a volume already mounted there.
+ if device_name:
+ if device_name in inst.block_device_mapping:
+ module.exit_json(msg="Volume mapping for %s already exists on instance %s" % (device_name, instance),
+ volume_id=inst.block_device_mapping[device_name].volume_id,
+ device=device_name,
+ changed=False)
+
+ if state == 'absent':
+ delete_volume(module, ec2)
+ else:
+ volume = create_volume(module, ec2, zone)
+ if instance:
+ attach_volume(module, ec2, volume, inst)
+ module.exit_json(volume_id=volume.id, device=device_name)
# import module snippets
from ansible.module_utils.basic import *
diff --git a/cloud/ec2_vpc b/cloud/ec2_vpc
index 9b9fb95a0b2..1bd569f478c 100644
--- a/cloud/ec2_vpc
+++ b/cloud/ec2_vpc
@@ -46,7 +46,7 @@ options:
choices: [ "yes", "no" ]
subnets:
description:
- - "A dictionary array of subnets to add of the form: { cidr: ..., az: ... }. Where az is the desired availability zone of the subnet, but it is not required. All VPC subnets not in this list will be removed."
+ - 'A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed.'
required: false
default: null
aliases: []
@@ -56,6 +56,13 @@ options:
required: false
default: null
aliases: []
+ resource_tags:
+ description:
+ - 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exits, a new VPC will be created. VPC tags not on this list will be ignored.'
+ required: false
+ default: null
+ aliases: []
+ version_added: "1.6"
internet_gateway:
description:
- Toggle whether there should be an Internet gateway attached to the VPC
@@ -65,7 +72,7 @@ options:
aliases: []
route_tables:
description:
- - "A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the 'main' route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly."
+ - 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},] }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly.'
required: false
default: null
aliases: []
@@ -127,6 +134,7 @@ EXAMPLES = '''
module: ec2_vpc
state: present
cidr_block: 172.23.0.0/16
+ resource_tags: { "Environment":"Development" }
region: us-west-2
# Full creation example with subnets and optional availability zones.
# The absence or presense of subnets deletes or creates them respectively.
@@ -134,13 +142,17 @@ EXAMPLES = '''
module: ec2_vpc
state: present
cidr_block: 172.22.0.0/16
+ resource_tags: { "Environment":"Development" }
subnets:
- cidr: 172.22.1.0/24
az: us-west-2c
+ resource_tags: { "Environment":"Dev", "Tier" : "Web" }
- cidr: 172.22.2.0/24
az: us-west-2b
+ resource_tags: { "Environment":"Dev", "Tier" : "App" }
- cidr: 172.22.3.0/24
az: us-west-2a
+ resource_tags: { "Environment":"Dev", "Tier" : "DB" }
internet_gateway: True
route_tables:
- subnets:
@@ -193,9 +205,54 @@ def get_vpc_info(vpc):
'state': vpc.state,
})
+def find_vpc(module, vpc_conn, vpc_id=None, cidr=None):
+ """
+ Finds a VPC that matches a specific id or cidr + tags
+
+ module : AnsibleModule object
+ vpc_conn: authenticated VPCConnection connection object
+
+ Returns:
+ A VPC object that matches either an ID or CIDR and one or more tag values
+ """
+
+ if vpc_id == None and cidr == None:
+ module.fail_json(
+ msg='You must specify either a vpc id or a cidr block + list of unique tags, aborting'
+ )
+
+ found_vpcs = []
+
+ resource_tags = module.params.get('resource_tags')
+
+ # Check for existing VPC by cidr_block or id
+ if vpc_id is not None:
+ found_vpcs = vpc_conn.get_all_vpcs(None, {'vpc-id': vpc_id, 'state': 'available',})
+
+ else:
+ previous_vpcs = vpc_conn.get_all_vpcs(None, {'cidr': cidr, 'state': 'available'})
+
+ for vpc in previous_vpcs:
+ # Get all tags for each of the found VPCs
+ vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
+
+ # If the supplied list of ID Tags match a subset of the VPC Tags, we found our VPC
+ if resource_tags and set(resource_tags.items()).issubset(set(vpc_tags.items())):
+ found_vpcs.append(vpc)
+
+ found_vpc = None
+
+ if len(found_vpcs) == 1:
+ found_vpc = found_vpcs[0]
+
+ if len(found_vpcs) > 1:
+ module.fail_json(msg='Found more than one vpc based on the supplied criteria, aborting')
+
+ return (found_vpc)
+
def create_vpc(module, vpc_conn):
"""
- Creates a new VPC
+ Creates a new or modifies an existing VPC.
module : AnsibleModule object
vpc_conn: authenticated VPCConnection connection object
@@ -217,20 +274,12 @@ def create_vpc(module, vpc_conn):
wait_timeout = int(module.params.get('wait_timeout'))
changed = False
- # Check for existing VPC by cidr_block or id
- if id != None:
- filter_dict = {'vpc-id':id, 'state': 'available',}
- previous_vpcs = vpc_conn.get_all_vpcs(None, filter_dict)
- else:
- filter_dict = {'cidr': cidr_block, 'state': 'available'}
- previous_vpcs = vpc_conn.get_all_vpcs(None, filter_dict)
+ # Check for existing VPC by cidr_block + tags or id
+ previous_vpc = find_vpc(module, vpc_conn, id, cidr_block)
- if len(previous_vpcs) > 1:
- module.fail_json(msg='EC2 returned more than one VPC, aborting')
-
- if len(previous_vpcs) == 1:
+ if previous_vpc is not None:
changed = False
- vpc = previous_vpcs[0]
+ vpc = previous_vpc
else:
changed = True
try:
@@ -255,7 +304,21 @@ def create_vpc(module, vpc_conn):
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# Done with base VPC, now change to attributes and features.
-
+
+ # Add resource tags
+ vpc_spec_tags = module.params.get('resource_tags')
+ vpc_tags = dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': vpc.id}))
+
+ if vpc_spec_tags and not set(vpc_spec_tags.items()).issubset(set(vpc_tags.items())):
+ new_tags = {}
+
+ for (key, value) in set(vpc_spec_tags.items()):
+ if (key, value) not in set(vpc_tags.items()):
+ new_tags[key] = value
+
+ if new_tags:
+ vpc_conn.create_tags(vpc.id, new_tags)
+
# boto doesn't appear to have a way to determine the existing
# value of the dns attributes, so we just set them.
@@ -269,6 +332,7 @@ def create_vpc(module, vpc_conn):
module.fail_json(msg='subnets needs to be a list of cidr blocks')
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
+
# First add all new subnets
for subnet in subnets:
add_subnet = True
@@ -277,10 +341,22 @@ def create_vpc(module, vpc_conn):
add_subnet = False
if add_subnet:
try:
- vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None))
+ new_subnet = vpc_conn.create_subnet(vpc.id, subnet['cidr'], subnet.get('az', None))
+ new_subnet_tags = subnet.get('resource_tags', None)
+ if new_subnet_tags:
+ # Sometimes AWS takes its time to create a subnet and so using new subnets's id
+ # to create tags results in exception.
+ # boto doesn't seem to refresh 'state' of the newly created subnet, i.e.: it's always 'pending'
+ # so i resorted to polling vpc_conn.get_all_subnets with the id of the newly added subnet
+ while len(vpc_conn.get_all_subnets(filters={ 'subnet-id': new_subnet.id })) == 0:
+ time.sleep(0.1)
+
+ vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
+
changed = True
except EC2ResponseError, e:
module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e))
+
# Now delete all absent subnets
for csubnet in current_subnets:
delete_subnet = True
@@ -332,7 +408,7 @@ def create_vpc(module, vpc_conn):
if not isinstance(route_tables, list):
module.fail_json(msg='route tables need to be a list of dictionaries')
- # Work through each route table and update/create to match dictionary array
+# Work through each route table and update/create to match dictionary array
all_route_tables = []
for rt in route_tables:
try:
@@ -350,7 +426,7 @@ def create_vpc(module, vpc_conn):
# Associate with subnets
for sn in rt['subnets']:
- rsn = vpc_conn.get_all_subnets(filters={'cidr': sn})
+ rsn = vpc_conn.get_all_subnets(filters={'cidr': sn, 'vpc_id': vpc.id })
if len(rsn) != 1:
module.fail_json(
msg='The subnet {0} to associate with route_table {1} ' \
@@ -360,7 +436,7 @@ def create_vpc(module, vpc_conn):
# Disassociate then associate since we don't have replace
old_rt = vpc_conn.get_all_route_tables(
- filters={'association.subnet_id': rsn.id}
+ filters={'association.subnet_id': rsn.id, 'vpc_id': vpc.id}
)
if len(old_rt) == 1:
old_rt = old_rt[0]
@@ -405,14 +481,15 @@ def create_vpc(module, vpc_conn):
created_vpc_id = vpc.id
returned_subnets = []
current_subnets = vpc_conn.get_all_subnets(filters={ 'vpc_id': vpc.id })
+
for sn in current_subnets:
returned_subnets.append({
+ 'resource_tags': dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': sn.id})),
'cidr': sn.cidr_block,
'az': sn.availability_zone,
'id': sn.id,
})
-
return (vpc_dict, created_vpc_id, returned_subnets, changed)
def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
@@ -434,23 +511,10 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
vpc_dict = {}
terminated_vpc_id = ''
changed = False
-
- if vpc_id == None and cidr == None:
- module.fail_json(
- msg='You must either specify a vpc id or a cidr '\
- 'block to terminate a VPC, aborting'
- )
- if vpc_id is not None:
- vpc_rs = vpc_conn.get_all_vpcs(vpc_id)
- else:
- vpc_rs = vpc_conn.get_all_vpcs(filters={'cidr': cidr})
- if len(vpc_rs) > 1:
- module.fail_json(
- msg='EC2 returned more than one VPC for id {0} ' \
- 'or cidr {1}, aborting'.format(vpc_id,vidr)
- )
- if len(vpc_rs) == 1:
- vpc = vpc_rs[0]
+
+ vpc = find_vpc(module, vpc_conn, vpc_id, cidr)
+
+ if vpc is not None:
if vpc.state == 'available':
terminated_vpc_id=vpc.id
vpc_dict=get_vpc_info(vpc)
@@ -491,13 +555,14 @@ def main():
argument_spec.update(dict(
cidr_block = dict(),
instance_tenancy = dict(choices=['default', 'dedicated'], default='default'),
- wait = dict(choices=BOOLEANS, default=False),
+ wait = dict(type='bool', default=False),
wait_timeout = dict(default=300),
- dns_support = dict(choices=BOOLEANS, default=True),
- dns_hostnames = dict(choices=BOOLEANS, default=True),
+ dns_support = dict(type='bool', default=True),
+ dns_hostnames = dict(type='bool', default=True),
subnets = dict(type='list'),
vpc_id = dict(),
- internet_gateway = dict(choices=BOOLEANS, default=False),
+ internet_gateway = dict(type='bool', default=False),
+ resource_tags = dict(type='dict'),
route_tables = dict(type='list'),
state = dict(choices=['present', 'absent'], default='present'),
)
@@ -527,11 +592,6 @@ def main():
if module.params.get('state') == 'absent':
vpc_id = module.params.get('vpc_id')
cidr = module.params.get('cidr_block')
- if vpc_id == None and cidr == None:
- module.fail_json(
- msg='You must either specify a vpc id or a cidr '\
- 'block to terminate a VPC, aborting'
- )
(changed, vpc_dict, new_vpc_id) = terminate_vpc(module, vpc_conn, vpc_id, cidr)
subnets_changed = None
elif module.params.get('state') == 'present':
diff --git a/cloud/elasticache b/cloud/elasticache
index 7cbd72d736d..8c82f2fcc20 100644
--- a/cloud/elasticache
+++ b/cloud/elasticache
@@ -58,6 +58,12 @@ options:
- The port number on which each of the cache nodes will accept connections
required: false
default: 11211
+ security_group_ids:
+ description:
+ - A list of vpc security group names to associate with this cache cluster. Only use if inside a vpc
+ required: false
+ default: ['default']
+ version_added: "1.6"
cache_security_groups:
description:
- A list of cache security group names to associate with this cache cluster
@@ -152,7 +158,7 @@ class ElastiCacheManager(object):
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
def __init__(self, module, name, engine, cache_engine_version, node_type,
- num_nodes, cache_port, cache_security_groups, zone, wait,
+ num_nodes, cache_port, cache_security_groups, security_group_ids, zone, wait,
hard_modify, aws_access_key, aws_secret_key, region):
self.module = module
self.name = name
@@ -162,6 +168,7 @@ class ElastiCacheManager(object):
self.num_nodes = num_nodes
self.cache_port = cache_port
self.cache_security_groups = cache_security_groups
+ self.security_group_ids = security_group_ids
self.zone = zone
self.wait = wait
self.hard_modify = hard_modify
@@ -217,6 +224,7 @@ class ElastiCacheManager(object):
engine=self.engine,
engine_version=self.cache_engine_version,
cache_security_group_names=self.cache_security_groups,
+ security_group_ids=self.security_group_ids,
preferred_availability_zone=self.zone,
port=self.cache_port)
except boto.exception.BotoServerError, e:
@@ -291,6 +299,7 @@ class ElastiCacheManager(object):
num_cache_nodes=self.num_nodes,
cache_node_ids_to_remove=nodes_to_remove,
cache_security_group_names=self.cache_security_groups,
+ security_group_ids=self.security_group_ids,
apply_immediately=True,
engine_version=self.cache_engine_version)
except boto.exception.BotoServerError, e:
@@ -377,12 +386,21 @@ class ElastiCacheManager(object):
if self.data[key] != value:
return True
- # Check security groups
+ # Check cache security groups
cache_security_groups = []
for sg in self.data['CacheSecurityGroups']:
cache_security_groups.append(sg['CacheSecurityGroupName'])
if set(cache_security_groups) - set(self.cache_security_groups):
return True
+
+ # check vpc security groups
+ vpc_security_groups = []
+ security_groups = self.data['SecurityGroups'] or []
+ for sg in security_groups:
+ vpc_security_groups.append(sg['SecurityGroupId'])
+ if set(vpc_security_groups) - set(self.security_group_ids):
+ return True
+
return False
def _requires_destroy_and_create(self):
@@ -469,9 +487,11 @@ def main():
cache_port={'required': False, 'default': 11211, 'type': 'int'},
cache_security_groups={'required': False, 'default': ['default'],
'type': 'list'},
+ security_group_ids={'required': False, 'default': [],
+ 'type': 'list'},
zone={'required': False, 'default': None},
- wait={'required': False, 'choices': BOOLEANS, 'default': True},
- hard_modify={'required': False, 'choices': BOOLEANS, 'default': False}
+ wait={'required': False, 'type' : 'bool', 'default': True},
+ hard_modify={'required': False, 'type': 'bool', 'default': False}
)
)
@@ -489,6 +509,7 @@ def main():
num_nodes = module.params['num_nodes']
cache_port = module.params['cache_port']
cache_security_groups = module.params['cache_security_groups']
+ security_group_ids = module.params['security_group_ids']
zone = module.params['zone']
wait = module.params['wait']
hard_modify = module.params['hard_modify']
@@ -502,7 +523,8 @@ def main():
elasticache_manager = ElastiCacheManager(module, name, engine,
cache_engine_version, node_type,
num_nodes, cache_port,
- cache_security_groups, zone, wait,
+ cache_security_groups,
+ security_group_ids, zone, wait,
hard_modify, aws_access_key,
aws_secret_key, region)
diff --git a/cloud/gc_storage b/cloud/gc_storage
index cbf72aa8e92..8696f8e965d 100644
--- a/cloud/gc_storage
+++ b/cloud/gc_storage
@@ -152,11 +152,12 @@ def key_check(module, gs, bucket, obj):
def keysum(module, gs, bucket, obj):
bucket = gs.lookup(bucket)
key_check = bucket.get_key(obj)
- if key_check:
- md5_remote = key_check.etag[1:-1]
- etag_multipart = md5_remote.find('-')!=-1 #Check for multipart, etag is not md5
- if etag_multipart is True:
- module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
+ if not key_check:
+ return None
+ md5_remote = key_check.etag[1:-1]
+ etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
+ if etag_multipart is True:
+ module.fail_json(msg="Files uploaded with multipart of gs are not supported with checksum, unable to compute checksum.")
return md5_remote
def bucket_check(module, gs, bucket):
diff --git a/cloud/gce b/cloud/gce
index b14ce8996da..2d95c8143bc 100755
--- a/cloud/gce
+++ b/cloud/gce
@@ -351,7 +351,7 @@ def main():
metadata = dict(),
name = dict(),
network = dict(default='default'),
- persistent_boot_disk = dict(type='bool', choices=BOOLEANS, default=False),
+ persistent_boot_disk = dict(type='bool', default=False),
state = dict(choices=['active', 'present', 'absent', 'deleted'],
default='present'),
tags = dict(type='list'),
diff --git a/cloud/gce_lb b/cloud/gce_lb
index 3e22c216998..4d7190d8752 100644
--- a/cloud/gce_lb
+++ b/cloud/gce_lb
@@ -111,21 +111,21 @@ options:
choices: ["active", "present", "absent", "deleted"]
aliases: []
service_account_email:
- version_added: 1.5.1
+ version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
- version_added: 1.5.1
+ version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
- version_added: 1.5.1
+ version_added: "1.6"
description:
- your GCE project ID
required: false
diff --git a/cloud/gce_net b/cloud/gce_net
index 4e731f196d3..c2c0b30452d 100644
--- a/cloud/gce_net
+++ b/cloud/gce_net
@@ -74,21 +74,21 @@ options:
choices: ["active", "present", "absent", "deleted"]
aliases: []
service_account_email:
- version_added: 1.5.1
+ version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
- version_added: 1.5.1
+ version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
- version_added: 1.5.1
+ version_added: "1.6"
description:
- your GCE project ID
required: false
diff --git a/cloud/gce_pd b/cloud/gce_pd
index a8e631a5522..e5ea6cc4ad8 100644
--- a/cloud/gce_pd
+++ b/cloud/gce_pd
@@ -76,21 +76,21 @@ options:
default: "us-central1-b"
aliases: []
service_account_email:
- version_added: 1.5.1
+ version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
- version_added: 1.5.1
+ version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
- version_added: 1.5.1
+ version_added: "1.6"
description:
- your GCE project ID
required: false
@@ -127,10 +127,9 @@ except ImportError:
def main():
module = AnsibleModule(
argument_spec = dict(
- detach_only = dict(choice=BOOLEANS),
+ detach_only = dict(type='bool'),
instance_name = dict(),
- mode = dict(default='READ_ONLY',
- choices=['READ_WRITE', 'READ_ONLY']),
+ mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
state = dict(default='present'),
diff --git a/cloud/keystone_user b/cloud/keystone_user
index 206fd68b070..d6529b537ed 100644
--- a/cloud/keystone_user
+++ b/cloud/keystone_user
@@ -26,6 +26,7 @@ options:
- The tenant login_user belongs to
required: false
default: None
+ version_added: "1.3"
token:
description:
- The token to be uses in case the password is not specified
diff --git a/cloud/nova_compute b/cloud/nova_compute
index d0bc79b1a2a..049c8116bbc 100644
--- a/cloud/nova_compute
+++ b/cloud/nova_compute
@@ -107,6 +107,12 @@ options:
- The amount of time the module should wait for the VM to get into active state
required: false
default: 180
+ user_data:
+ description:
+ - Opaque blob of data which is made available to the instance
+ required: false
+ default: None
+ version_added: "1.6"
requirements: ["novaclient"]
'''
@@ -157,6 +163,8 @@ def _create_server(module, nova):
'meta' : module.params['meta'],
'key_name': module.params['key_name'],
'security_groups': module.params['security_groups'].split(','),
+ #userdata is unhyphenated in novaclient, but hyphenated here for consistency with the ec2 module:
+ 'userdata': module.params['user_data'],
}
if not module.params['key_name']:
del bootkwargs['key_name']
@@ -193,7 +201,12 @@ def _get_server_state(module, nova):
try:
servers = nova.servers.list(True, {'name': module.params['name']})
if servers:
- server = [x for x in servers if x.name == module.params['name']][0]
+ # the {'name': module.params['name']} will also return servers
+ # with names that partially match the server name, so we have to
+ # strictly filter here
+ servers = [x for x in servers if x.name == module.params['name']]
+ if servers:
+ server = servers[0]
except Exception, e:
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
if server and module.params['state'] == 'present':
@@ -227,7 +240,8 @@ def main():
meta = dict(default=None),
wait = dict(default='yes', choices=['yes', 'no']),
wait_for = dict(default=180),
- state = dict(default='present', choices=['absent', 'present'])
+ state = dict(default='present', choices=['absent', 'present']),
+ user_data = dict(default=None)
),
)
diff --git a/cloud/nova_keypair b/cloud/nova_keypair
index 19d3fa49b95..18674a1220a 100644
--- a/cloud/nova_keypair
+++ b/cloud/nova_keypair
@@ -18,7 +18,7 @@
# along with this software. If not, see .
try:
- from novaclient.v1_1 import client
+ from novaclient.v1_1 import client as nova_client
from novaclient import exceptions
import time
except ImportError:
diff --git a/cloud/quantum_floating_ip b/cloud/quantum_floating_ip
index c69f2b16587..2ad761ec3b7 100644
--- a/cloud/quantum_floating_ip
+++ b/cloud/quantum_floating_ip
@@ -80,6 +80,7 @@ options:
- The name of the network of the port to associate with the floating ip. Necessary when VM multiple networks.
required: false
default: None
+ version_added: "1.5"
requirements: ["novaclient", "quantumclient", "neutronclient", "keystoneclient"]
'''
diff --git a/cloud/quantum_subnet b/cloud/quantum_subnet
index 489ebb3440c..17f7a6a0056 100644
--- a/cloud/quantum_subnet
+++ b/cloud/quantum_subnet
@@ -98,6 +98,7 @@ options:
- DNS nameservers for this subnet, comma-separated
required: false
default: None
+ version_added: "1.4"
allocation_pool_start:
description:
- From the subnet pool the starting address from which the IP should be allocated
@@ -259,7 +260,7 @@ def main():
tenant_name = dict(default=None),
state = dict(default='present', choices=['absent', 'present']),
ip_version = dict(default='4', choices=['4', '6']),
- enable_dhcp = dict(default='true', choices=BOOLEANS),
+ enable_dhcp = dict(default='true', type='bool'),
gateway_ip = dict(default=None),
dns_nameservers = dict(default=None),
allocation_pool_start = dict(default=None),
diff --git a/cloud/rax b/cloud/rax
index 230f80df5e2..af533bca126 100644
--- a/cloud/rax
+++ b/cloud/rax
@@ -1,4 +1,4 @@
-#!/usr/bin/python -tt
+#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax
@@ -23,52 +25,6 @@ description:
waits for it to be 'running'.
version_added: "1.2"
options:
- api_key:
- description:
- - Rackspace API key (overrides I(credentials))
- aliases:
- - password
- auth_endpoint:
- description:
- - The URI of the authentication service
- default: https://identity.api.rackspacecloud.com/v2.0/
- version_added: 1.5
- credentials:
- description:
- - File to find the Rackspace credentials in (ignored if I(api_key) and
- I(username) are provided)
- default: null
- aliases:
- - creds_file
- env:
- description:
- - Environment as configured in ~/.pyrax.cfg,
- see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration)
- version_added: 1.5
- identity_type:
- description:
- - Authentication machanism to use, such as rackspace or keystone
- default: rackspace
- version_added: 1.5
- region:
- description:
- - Region to create an instance in
- default: DFW
- tenant_id:
- description:
- - The tenant ID used for authentication
- version_added: 1.5
- tenant_name:
- description:
- - The tenant name used for authentication
- version_added: 1.5
- username:
- description:
- - Rackspace username (overrides I(credentials))
- verify_ssl:
- description:
- - Whether or not to require SSL validation of API endpoints
- version_added: 1.5
auto_increment:
description:
- Whether or not to increment a single number with the name of the
@@ -89,7 +45,9 @@ options:
disk_config:
description:
- Disk partitioning strategy
- choices: ['auto', 'manual']
+ choices:
+ - auto
+ - manual
version_added: '1.4'
default: auto
exact_count:
@@ -98,6 +56,17 @@ options:
state=active/present
default: no
version_added: 1.4
+ extra_client_args:
+ description:
+ - A hash of key/value pairs to be used when creating the cloudservers
+ client. This is considered an advanced option, use it wisely and
+ with caution.
+ version_added: 1.6
+ extra_create_args:
+ description:
+ - A hash of key/value pairs to be used when creating a new server.
+ This is considered an advanced option, use it wisely and with caution.
+ version_added: 1.6
files:
description:
- Files to insert into the instance. remotefilename:localcontent
@@ -124,7 +93,8 @@ options:
description:
- key pair to use on the instance
default: null
- aliases: ['keypair']
+ aliases:
+ - keypair
meta:
description:
- A hash of metadata to associate with the instance
@@ -138,31 +108,30 @@ options:
- The network to attach to the instances. If specified, you must include
ALL networks including the public and private interfaces. Can be C(id)
or C(label).
- default: ['public', 'private']
+ default:
+ - public
+ - private
version_added: 1.4
state:
description:
- Indicate desired state of the resource
- choices: ['present', 'absent']
+ choices:
+ - present
+ - absent
default: present
wait:
description:
- wait for the instance to be in state 'running' before returning
default: "no"
- choices: [ "yes", "no" ]
+ choices:
+ - "yes"
+ - "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
-requirements: [ "pyrax" ]
author: Jesse Keating, Matt Martz
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
@@ -206,18 +175,18 @@ EXAMPLES = '''
register: rax
'''
-import sys
-import time
import os
import re
+import time
+
from uuid import UUID
from types import NoneType
try:
import pyrax
+ HAS_PYRAX = True
except ImportError:
- print("failed=True msg='pyrax is required for this module'")
- sys.exit(1)
+ HAS_PYRAX = False
ACTIVE_STATUSES = ('ACTIVE', 'BUILD', 'HARD_REBOOT', 'MIGRATING', 'PASSWORD',
'REBOOT', 'REBUILD', 'RESCUE', 'RESIZE', 'REVERT_RESIZE')
@@ -246,7 +215,8 @@ def pyrax_object_to_dict(obj):
def create(module, names, flavor, image, meta, key_name, files,
- wait, wait_timeout, disk_config, group, nics):
+ wait, wait_timeout, disk_config, group, nics,
+ extra_create_args):
cs = pyrax.cloudservers
changed = False
@@ -266,7 +236,8 @@ def create(module, names, flavor, image, meta, key_name, files,
flavor=flavor, meta=meta,
key_name=key_name,
files=files, nics=nics,
- disk_config=disk_config))
+ disk_config=disk_config,
+ **extra_create_args))
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
@@ -405,11 +376,19 @@ def delete(module, instance_ids, wait, wait_timeout):
def cloudservers(module, state, name, flavor, image, meta, key_name, files,
wait, wait_timeout, disk_config, count, group,
instance_ids, exact_count, networks, count_offset,
- auto_increment):
+ auto_increment, extra_create_args):
cs = pyrax.cloudservers
cnw = pyrax.cloud_networks
+ if not cnw:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
servers = []
+ for key, value in meta.items():
+ meta[key] = repr(value)
+
# Add the group meta key
if group and 'group' not in meta:
meta['group'] = group
@@ -602,7 +581,7 @@ def cloudservers(module, state, name, flavor, image, meta, key_name, files,
names = [name] * (count - len(servers))
create(module, names, flavor, image, meta, key_name, files,
- wait, wait_timeout, disk_config, group, nics)
+ wait, wait_timeout, disk_config, group, nics, extra_create_args)
elif state == 'absent':
if instance_ids is None:
@@ -642,11 +621,13 @@ def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
- auto_increment=dict(choices=BOOLEANS, default=True, type='bool'),
+ auto_increment=dict(default=True, type='bool'),
count=dict(default=1, type='int'),
count_offset=dict(default=1, type='int'),
disk_config=dict(choices=['auto', 'manual']),
- exact_count=dict(choices=BOOLEANS, default=False, type='bool'),
+ exact_count=dict(default=False, type='bool'),
+ extra_client_args=dict(type='dict', default={}),
+ extra_create_args=dict(type='dict', default={}),
files=dict(type='dict', default={}),
flavor=dict(),
group=dict(),
@@ -658,7 +639,7 @@ def main():
networks=dict(type='list', default=['public', 'private']),
service=dict(),
state=dict(default='present', choices=['present', 'absent']),
- wait=dict(choices=BOOLEANS, default=False, type='bool'),
+ wait=dict(default=False, type='bool'),
wait_timeout=dict(default=300),
)
)
@@ -668,6 +649,9 @@ def main():
required_together=rax_required_together(),
)
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
service = module.params.get('service')
if service is not None:
@@ -682,6 +666,8 @@ def main():
if disk_config:
disk_config = disk_config.upper()
exact_count = module.params.get('exact_count', False)
+ extra_client_args = module.params.get('extra_client_args')
+ extra_create_args = module.params.get('extra_create_args')
files = module.params.get('files')
flavor = module.params.get('flavor')
group = module.params.get('group')
@@ -697,10 +683,23 @@ def main():
setup_rax_module(module, pyrax)
+ if extra_client_args:
+ pyrax.cloudservers = pyrax.connect_to_cloudservers(
+ region=pyrax.cloudservers.client.region_name,
+ **extra_client_args)
+ client = pyrax.cloudservers.client
+ if 'bypass_url' in extra_client_args:
+ client.management_url = extra_client_args['bypass_url']
+
+ if pyrax.cloudservers is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
cloudservers(module, state, name, flavor, image, meta, key_name, files,
wait, wait_timeout, disk_config, count, group,
instance_ids, exact_count, networks, count_offset,
- auto_increment)
+ auto_increment, extra_create_args)
# import module snippets
diff --git a/cloud/rax_cbs b/cloud/rax_cbs
new file mode 100644
index 00000000000..443c833e7d0
--- /dev/null
+++ b/cloud/rax_cbs
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_cbs
+short_description: Manipulate Rackspace Cloud Block Storage Volumes
+description:
+ - Manipulate Rackspace Cloud Block Storage Volumes
+version_added: 1.6
+options:
+ description:
+ description:
+ - Description to give the volume being created
+ default: null
+ meta:
+ description:
+ - A hash of metadata to associate with the volume
+ default: null
+ name:
+ description:
+ - Name to give the volume being created
+ default: null
+ required: true
+ size:
+ description:
+ - Size of the volume to create in Gigabytes
+ default: 100
+ required: true
+ snapshot_id:
+ description:
+ - The id of the snapshot to create the volume from
+ default: null
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ required: true
+ volume_type:
+ description:
+ - Type of the volume being created
+ choices:
+ - SATA
+ - SSD
+ default: SATA
+ required: true
+ wait:
+ description:
+ - wait for the volume to be in state 'available' before returning
+ default: "no"
+ choices:
+ - "yes"
+ - "no"
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author: Christopher H. Laco, Matt Martz
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Build a Block Storage Volume
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Storage volume create request
+ local_action:
+ module: rax_cbs
+ credentials: ~/.raxpub
+ name: my-volume
+ description: My Volume
+ volume_type: SSD
+ size: 150
+ region: DFW
+ wait: yes
+ state: present
+ meta:
+ app: my-cool-app
+ register: my_volume
+'''
+
+import sys
+
+from uuid import UUID
+from types import NoneType
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
+VOLUME_STATUS = ('available', 'attaching', 'creating', 'deleting', 'in-use',
+ 'error', 'error_deleting')
+
+
+def cloud_block_storage(module, state, name, description, meta, size,
+ snapshot_id, volume_type, wait, wait_timeout):
+ for arg in (state, name, size, volume_type):
+ if not arg:
+ module.fail_json(msg='%s is required for rax_cbs' % arg)
+
+ if size < 100:
+ module.fail_json(msg='"size" must be greater than or equal to 100')
+
+ changed = False
+ volume = None
+ instance = {}
+
+ cbs = pyrax.cloud_blockstorage
+
+ if cbs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ try:
+ UUID(name)
+ volume = cbs.get(name)
+ except ValueError:
+ try:
+ volume = cbs.find(name=name)
+ except Exception, e:
+ module.fail_json(msg='%s' % e)
+
+ if state == 'present':
+ if not volume:
+ try:
+ volume = cbs.create(name, size=size, volume_type=volume_type,
+ description=description,
+ metadata=meta,
+ snapshot_id=snapshot_id)
+ changed = True
+ except Exception, e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ if wait:
+ attempts = wait_timeout / 5
+ pyrax.utils.wait_for_build(volume, interval=5,
+ attempts=attempts)
+
+ volume.get()
+ for key, value in vars(volume).iteritems():
+ if (isinstance(value, NON_CALLABLES) and
+ not key.startswith('_')):
+ instance[key] = value
+
+ result = dict(changed=changed, volume=instance)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+ elif wait and volume.status not in VOLUME_STATUS:
+ result['msg'] = 'Timeout waiting on %s' % volume.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ if volume:
+ try:
+ volume.delete()
+ changed = True
+ except Exception, e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, volume=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ description=dict(),
+ meta=dict(type='dict', default={}),
+ name=dict(required=True),
+ size=dict(type='int', default=100),
+ snapshot_id=dict(),
+ state=dict(default='present', choices=['present', 'absent']),
+ volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ description = module.params.get('description')
+ meta = module.params.get('meta')
+ name = module.params.get('name')
+ size = module.params.get('size')
+ snapshot_id = module.params.get('snapshot_id')
+ state = module.params.get('state')
+ volume_type = module.params.get('volume_type')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_block_storage(module, state, name, description, meta, size,
+ snapshot_id, volume_type, wait, wait_timeout)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+### invoke the module
+main()
diff --git a/cloud/rax_cbs_attachments b/cloud/rax_cbs_attachments
new file mode 100644
index 00000000000..bc7dba9eec2
--- /dev/null
+++ b/cloud/rax_cbs_attachments
@@ -0,0 +1,268 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_cbs_attachments
+short_description: Manipulate Rackspace Cloud Block Storage Volume Attachments
+description:
+ - Manipulate Rackspace Cloud Block Storage Volume Attachments
+version_added: 1.6
+options:
+ device:
+ description:
+ - The device path to attach the volume to, e.g. /dev/xvde
+ default: null
+ required: true
+ volume:
+ description:
+ - Name or id of the volume to attach/detach
+ default: null
+ required: true
+ server:
+ description:
+ - Name or id of the server to attach/detach
+ default: null
+ required: true
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+ required: true
+ wait:
+ description:
+ - wait for the volume to be in 'in-use'/'available' state before returning
+ default: "no"
+ choices:
+ - "yes"
+ - "no"
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
+author: Christopher H. Laco, Matt Martz
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Attach a Block Storage Volume
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Storage volume attach request
+ local_action:
+ module: rax_cbs_attachments
+ credentials: ~/.raxpub
+ volume: my-volume
+ server: my-server
+ device: /dev/xvdd
+ region: DFW
+ wait: yes
+ state: present
+ register: my_volume
+'''
+
+import sys
+
+from uuid import UUID
+from types import NoneType
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
+
+
+def cloud_block_storage_attachments(module, state, volume, server, device,
+ wait, wait_timeout):
+ for arg in (state, volume, server, device):
+ if not arg:
+ module.fail_json(msg='%s is required for rax_cbs_attachments' %
+ arg)
+
+ cbs = pyrax.cloud_blockstorage
+ cs = pyrax.cloudservers
+
+ if cbs is None or cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ changed = False
+ instance = {}
+
+ try:
+ UUID(volume)
+ volume = cbs.get(volume)
+ except ValueError:
+ try:
+ volume = cbs.find(name=volume)
+ except Exception, e:
+ module.fail_json(msg='%s' % e)
+
+ if not volume:
+ module.fail_json(msg='No matching storage volumes were found')
+
+ if state == 'present':
+ try:
+ UUID(server)
+ server = cs.servers.get(server)
+ except ValueError:
+ servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
+ if not servers:
+ module.fail_json(msg='No Server was matched by name, '
+ 'try using the Server ID instead')
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers matched by name, '
+ 'try using the Server ID instead')
+
+ # We made it this far, grab the first and hopefully only server
+ # in the list
+ server = servers[0]
+
+ if (volume.attachments and
+ volume.attachments[0]['server_id'] == server.id):
+ changed = False
+ elif volume.attachments:
+ module.fail_json(msg='Volume is attached to another server')
+ else:
+ try:
+ volume.attach_to_instance(server, mountpoint=device)
+ changed = True
+ except Exception, e:
+ module.fail_json(msg='%s' % e.message)
+
+ volume.get()
+
+ for key, value in vars(volume).iteritems():
+ if (isinstance(value, NON_CALLABLES) and
+ not key.startswith('_')):
+ instance[key] = value
+
+ result = dict(changed=changed, volume=instance)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+ elif wait:
+ attempts = wait_timeout / 5
+ pyrax.utils.wait_until(volume, 'status', 'in-use',
+ interval=5, attempts=attempts)
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ elif state == 'absent':
+ try:
+ UUID(server)
+ server = cs.servers.get(server)
+ except ValueError:
+ servers = cs.servers.list(search_opts=dict(name='^%s$' % server))
+ if not servers:
+ module.fail_json(msg='No Server was matched by name, '
+ 'try using the Server ID instead')
+ if len(servers) > 1:
+ module.fail_json(msg='Multiple servers matched by name, '
+ 'try using the Server ID instead')
+
+ # We made it this far, grab the first and hopefully only server
+ # in the list
+ server = servers[0]
+
+ if (volume.attachments and
+ volume.attachments[0]['server_id'] == server.id):
+ try:
+ volume.detach()
+ if wait:
+ pyrax.utils.wait_until(volume, 'status', 'available',
+ interval=3, attempts=0,
+ verbose=False)
+ changed = True
+ except Exception, e:
+ module.fail_json(msg='%s' % e.message)
+
+ volume.get()
+ changed = True
+ elif volume.attachments:
+ module.fail_json(msg='Volume is attached to another server')
+
+ for key, value in vars(volume).iteritems():
+ if (isinstance(value, NON_CALLABLES) and
+ not key.startswith('_')):
+ instance[key] = value
+
+ result = dict(changed=changed, volume=instance)
+
+ if volume.status == 'error':
+ result['msg'] = '%s failed to build' % volume.id
+
+ if 'msg' in result:
+ module.fail_json(**result)
+ else:
+ module.exit_json(**result)
+
+ module.exit_json(changed=changed, volume=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ device=dict(required=True),
+ volume=dict(required=True),
+ server=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ wait=dict(type='bool', default=False),
+ wait_timeout=dict(type='int', default=300)
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ device = module.params.get('device')
+ volume = module.params.get('volume')
+ server = module.params.get('server')
+ state = module.params.get('state')
+ wait = module.params.get('wait')
+ wait_timeout = module.params.get('wait_timeout')
+
+ setup_rax_module(module, pyrax)
+
+ cloud_block_storage_attachments(module, state, volume, server, device,
+ wait, wait_timeout)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+### invoke the module
+main()
diff --git a/cloud/rax_clb b/cloud/rax_clb
index bd653eff8e8..85700895c7c 100644
--- a/cloud/rax_clb
+++ b/cloud/rax_clb
@@ -1,4 +1,4 @@
-#!/usr/bin/python -tt
+#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_clb
@@ -25,17 +27,13 @@ options:
algorithm:
description:
- algorithm for the balancer being created
- choices: ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN', 'WEIGHTED_LEAST_CONNECTIONS', 'WEIGHTED_ROUND_ROBIN']
+ choices:
+ - RANDOM
+ - LEAST_CONNECTIONS
+ - ROUND_ROBIN
+ - WEIGHTED_LEAST_CONNECTIONS
+ - WEIGHTED_ROUND_ROBIN
default: LEAST_CONNECTIONS
- api_key:
- description:
- - Rackspace API key (overrides C(credentials))
- credentials:
- description:
- - File to find the Rackspace credentials in (ignored if C(api_key) and
- C(username) are provided)
- default: null
- aliases: ['creds_file']
meta:
description:
- A hash of metadata to associate with the instance
@@ -51,16 +49,32 @@ options:
protocol:
description:
- Protocol for the balancer being created
- choices: ['DNS_TCP', 'DNS_UDP' ,'FTP', 'HTTP', 'HTTPS', 'IMAPS', 'IMAPv4', 'LDAP', 'LDAPS', 'MYSQL', 'POP3', 'POP3S', 'SMTP', 'TCP', 'TCP_CLIENT_FIRST', 'UDP', 'UDP_STREAM', 'SFTP']
+ choices:
+ - DNS_TCP
+ - DNS_UDP
+ - FTP
+ - HTTP
+ - HTTPS
+ - IMAPS
+ - IMAPv4
+ - LDAP
+ - LDAPS
+ - MYSQL
+ - POP3
+ - POP3S
+ - SMTP
+ - TCP
+ - TCP_CLIENT_FIRST
+ - UDP
+ - UDP_STREAM
+ - SFTP
default: HTTP
- region:
- description:
- - Region to create the load balancer in
- default: DFW
state:
description:
- Indicate desired state of the resource
- choices: ['present', 'absent']
+ choices:
+ - present
+ - absent
default: present
timeout:
description:
@@ -69,11 +83,10 @@ options:
type:
description:
- type of interface for the balancer being created
- choices: ['PUBLIC', 'SERVICENET']
+ choices:
+ - PUBLIC
+ - SERVICENET
default: PUBLIC
- username:
- description:
- - Rackspace username (overrides C(credentials))
vip_id:
description:
- Virtual IP ID to use when creating the load balancer for purposes of
@@ -83,20 +96,15 @@ options:
description:
- wait for the balancer to be in state 'running' before returning
default: "no"
- choices: [ "yes", "no" ]
+ choices:
+ - "yes"
+ - "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
-requirements: [ "pyrax" ]
author: Christopher H. Laco, Matt Martz
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
@@ -122,15 +130,13 @@ EXAMPLES = '''
register: my_lb
'''
-import sys
-
from types import NoneType
try:
import pyrax
+ HAS_PYRAX = True
except ImportError:
- print("failed=True msg='pyrax required for this module'")
- sys.exit(1)
+ HAS_PYRAX = False
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
ALGORITHMS = ['RANDOM', 'LEAST_CONNECTIONS', 'ROUND_ROBIN',
@@ -182,6 +188,10 @@ def cloud_load_balancer(module, state, name, meta, algorithm, port, protocol,
balancers = []
clb = pyrax.cloud_loadbalancers
+ if not clb:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
for balancer in clb.list():
if name != balancer.name and name != balancer.id:
@@ -300,6 +310,9 @@ def main():
required_together=rax_required_together(),
)
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
algorithm = module.params.get('algorithm')
meta = module.params.get('meta')
name = module.params.get('name')
diff --git a/cloud/rax_clb_nodes b/cloud/rax_clb_nodes
index f34fe6dde83..dc0950dca58 100644
--- a/cloud/rax_clb_nodes
+++ b/cloud/rax_clb_nodes
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_clb_nodes
@@ -26,21 +28,15 @@ options:
required: false
description:
- IP address or domain name of the node
- api_key:
- required: false
- description:
- - Rackspace API key (overrides C(credentials))
condition:
required: false
- choices: [ "enabled", "disabled", "draining" ]
+ choices:
+ - enabled
+ - disabled
+ - draining
description:
- Condition for the node, which determines its role within the load
balancer
- credentials:
- required: false
- description:
- - File to find the Rackspace credentials in (ignored if C(api_key) and
- C(username) are provided)
load_balancer_id:
required: true
type: integer
@@ -56,35 +52,27 @@ options:
type: integer
description:
- Port number of the load balanced service on the node
- region:
- required: false
- description:
- - Region to authenticate in
state:
required: false
default: "present"
- choices: [ "present", "absent" ]
+ choices:
+ - present
+ - absent
description:
- Indicate desired state of the node
type:
required: false
- choices: [ "primary", "secondary" ]
+ choices:
+ - primary
+ - secondary
description:
- Type of node
- username:
- required: false
- description:
- - Rackspace username (overrides C(credentials))
- virtualenv:
- required: false
- description:
- - Path to a virtualenv that should be activated before doing anything.
- The virtualenv has to already exist. Useful if installing pyrax
- globally is not an option.
wait:
required: false
default: "no"
- choices: [ "yes", "no" ]
+ choices:
+ - "yes"
+ - "no"
description:
- Wait for the load balancer to become active before returning
wait_timeout:
@@ -97,11 +85,8 @@ options:
required: false
description:
- Weight of node
-requirements: [ "pyrax" ]
author: Lukasz Kawczynski
-notes:
- - "The following environment variables can be used: C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDENTIALS) and C(RAX_REGION)."
+extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
@@ -136,13 +121,12 @@ EXAMPLES = '''
'''
import os
-import sys
try:
import pyrax
+ HAS_PYRAX = True
except ImportError:
- print("failed=True msg='pyrax is required for this module'")
- sys.exit(1)
+ HAS_PYRAX = False
def _activate_virtualenv(path):
@@ -151,11 +135,20 @@ def _activate_virtualenv(path):
execfile(activate_this, dict(__file__=activate_this))
-def _get_node(lb, node_id):
- """Return a node with the given `node_id`"""
- for node in lb.nodes:
- if node.id == node_id:
+def _get_node(lb, node_id=None, address=None, port=None):
+ """Return a matching node"""
+ for node in getattr(lb, 'nodes', []):
+ match_list = []
+ if node_id is not None:
+ match_list.append(getattr(node, 'id', None) == node_id)
+ if address is not None:
+ match_list.append(getattr(node, 'address', None) == address)
+ if port is not None:
+ match_list.append(getattr(node, 'port', None) == port)
+
+ if match_list and all(match_list):
return node
+
return None
@@ -211,6 +204,9 @@ def main():
required_together=rax_required_together(),
)
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
address = module.params['address']
condition = (module.params['condition'] and
module.params['condition'].upper())
@@ -234,18 +230,16 @@ def main():
setup_rax_module(module, pyrax)
if not pyrax.cloud_loadbalancers:
- module.fail_json(msg='Failed to instantiate load balancer client '
- '(possibly incorrect region)')
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
try:
lb = pyrax.cloud_loadbalancers.get(load_balancer_id)
except pyrax.exc.PyraxException, e:
module.fail_json(msg='%s' % e.message)
- if node_id:
- node = _get_node(lb, node_id)
- else:
- node = None
+ node = _get_node(lb, node_id, address, port)
result = _node_to_dict(node)
@@ -284,22 +278,12 @@ def main():
except pyrax.exc.PyraxException, e:
module.fail_json(msg='%s' % e.message)
else: # Updating an existing node
- immutable = {
- 'address': address,
- 'port': port,
- }
-
mutable = {
'condition': condition,
'type': typ,
'weight': weight,
}
- for name, value in immutable.items():
- if value:
- module.fail_json(
- msg='Attribute %s cannot be modified' % name)
-
for name, value in mutable.items():
if value is None or value == getattr(node, name):
mutable.pop(name)
diff --git a/cloud/rax_dns b/cloud/rax_dns
index 4c47d55fbbf..c12d09fb1ad 100644
--- a/cloud/rax_dns
+++ b/cloud/rax_dns
@@ -1,4 +1,4 @@
-#!/usr/bin/python -tt
+#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_dns
@@ -22,18 +24,9 @@ description:
- Manage domains on Rackspace Cloud DNS
version_added: 1.5
options:
- api_key:
- description:
- - Rackspace API key (overrides C(credentials))
comment:
description:
- Brief description of the domain. Maximum length of 160 characters
- credentials:
- description:
- - File to find the Rackspace credentials in (ignored if C(api_key) and
- C(username) are provided)
- default: null
- aliases: ['creds_file']
email:
desctiption:
- Email address of the domain administrator
@@ -43,24 +36,16 @@ options:
state:
description:
- Indicate desired state of the resource
- choices: ['present', 'absent']
+ choices:
+ - present
+ - absent
default: present
ttl:
description:
- Time to live of domain in seconds
default: 3600
- username:
- description:
- - Rackspace username (overrides C(credentials))
-requirements: [ "pyrax" ]
author: Matt Martz
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
@@ -77,16 +62,13 @@ EXAMPLES = '''
register: rax_dns
'''
-import sys
-import os
-
from types import NoneType
try:
import pyrax
+ HAS_PYRAX = True
except ImportError:
- print("failed=True msg='pyrax required for this module'")
- sys.exit(1)
+ HAS_PYRAX = False
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
@@ -104,6 +86,10 @@ def rax_dns(module, comment, email, name, state, ttl):
changed = False
dns = pyrax.cloud_dns
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
if state == 'present':
if not email:
@@ -174,6 +160,9 @@ def main():
required_together=rax_required_together(),
)
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
comment = module.params.get('comment')
email = module.params.get('email')
name = module.params.get('name')
diff --git a/cloud/rax_dns_record b/cloud/rax_dns_record
index 3e7f37f0def..d1e79983604 100644
--- a/cloud/rax_dns_record
+++ b/cloud/rax_dns_record
@@ -1,4 +1,4 @@
-#!/usr/bin/python -tt
+#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_dns_record
@@ -22,18 +24,9 @@ description:
- Manage DNS records on Rackspace Cloud DNS
version_added: 1.5
options:
- api_key:
- description:
- - Rackspace API key (overrides C(credentials))
comment:
description:
- Brief description of the domain. Maximum length of 160 characters
- credentials:
- description:
- - File to find the Rackspace credentials in (ignored if C(api_key) and
- C(username) are provided)
- default: null
- aliases: ['creds_file']
data:
description:
- IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
@@ -54,7 +47,9 @@ options:
state:
description:
- Indicate desired state of the resource
- choices: ['present', 'absent']
+ choices:
+ - present
+ - absent
default: present
ttl:
description:
@@ -63,20 +58,17 @@ options:
type:
description:
- DNS record type
- choices: ['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SRV', 'TXT']
+ choices:
+ - A
+ - AAAA
+ - CNAME
+ - MX
+ - NS
+ - SRV
+ - TXT
default: A
- username:
- description:
- - Rackspace username (overrides C(credentials))
-requirements: [ "pyrax" ]
author: Matt Martz
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
@@ -95,16 +87,13 @@ EXAMPLES = '''
register: rax_dns_record
'''
-import sys
-import os
-
from types import NoneType
try:
import pyrax
+ HAS_PYRAX = True
except ImportError:
- print("failed=True msg='pyrax required for this module'")
- sys.exit(1)
+ HAS_PYRAX = False
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
@@ -123,6 +112,10 @@ def rax_dns_record(module, comment, data, domain, name, priority, record_type,
changed = False
dns = pyrax.cloud_dns
+ if not dns:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
if state == 'present':
if not priority and record_type in ['MX', 'SRV']:
@@ -219,6 +212,9 @@ def main():
required_together=rax_required_together(),
)
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
comment = module.params.get('comment')
data = module.params.get('data')
domain = module.params.get('domain')
diff --git a/cloud/rax_facts b/cloud/rax_facts
index ca117a665a1..64711f41519 100644
--- a/cloud/rax_facts
+++ b/cloud/rax_facts
@@ -1,4 +1,4 @@
-#!/usr/bin/python -tt
+#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_facts
@@ -22,52 +24,6 @@ description:
- Gather facts for Rackspace Cloud Servers.
version_added: "1.4"
options:
- api_key:
- description:
- - Rackspace API key (overrides I(credentials))
- aliases:
- - password
- auth_endpoint:
- description:
- - The URI of the authentication service
- default: https://identity.api.rackspacecloud.com/v2.0/
- version_added: 1.5
- credentials:
- description:
- - File to find the Rackspace credentials in (ignored if I(api_key) and
- I(username) are provided)
- default: null
- aliases:
- - creds_file
- env:
- description:
- - Environment as configured in ~/.pyrax.cfg,
- see https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration
- version_added: 1.5
- identity_type:
- description:
- - Authentication machanism to use, such as rackspace or keystone
- default: rackspace
- version_added: 1.5
- region:
- description:
- - Region to create an instance in
- default: DFW
- tenant_id:
- description:
- - The tenant ID used for authentication
- version_added: 1.5
- tenant_name:
- description:
- - The tenant name used for authentication
- version_added: 1.5
- username:
- description:
- - Rackspace username (overrides I(credentials))
- verify_ssl:
- description:
- - Whether or not to require SSL validation of API endpoints
- version_added: 1.5
address:
description:
- Server IP address to retrieve facts for, will match any IP assigned to
@@ -79,15 +35,8 @@ options:
description:
- Server name to retrieve facts for
default: null
-requirements: [ "pyrax" ]
author: Matt Martz
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
@@ -106,16 +55,13 @@ EXAMPLES = '''
ansible_ssh_host: "{{ rax_accessipv4 }}"
'''
-import sys
-import os
-
from types import NoneType
try:
import pyrax
+ HAS_PYRAX = True
except ImportError:
- print("failed=True msg='pyrax required for this module'")
- sys.exit(1)
+ HAS_PYRAX = False
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
@@ -138,6 +84,12 @@ def rax_facts(module, address, name, server_id):
changed = False
cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
ansible_facts = {}
search_opts = {}
@@ -190,6 +142,9 @@ def main():
required_one_of=[['address', 'id', 'name']],
)
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
address = module.params.get('address')
server_id = module.params.get('id')
name = module.params.get('name')
diff --git a/cloud/rax_files b/cloud/rax_files
index 564cdb578d6..68e28a07f74 100644
--- a/cloud/rax_files
+++ b/cloud/rax_files
@@ -1,4 +1,4 @@
-#!/usr/bin/python -tt
+#!/usr/bin/python
# (c) 2013, Paul Durivage
#
@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_files
@@ -25,25 +27,18 @@ description:
- Manipulate Rackspace Cloud Files Containers
version_added: "1.5"
options:
- api_key:
- description:
- - Rackspace API key (overrides I(credentials))
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing containers.
Selecting this option is only appropriate when setting type=meta
- choices: ["yes", "no"]
+ choices:
+ - "yes"
+ - "no"
default: "no"
container:
description:
- The container to use for container or metadata operations.
required: true
- credentials:
- description:
- - File to find the Rackspace credentials in (ignored if I(api_key) and
- I(username) are provided)
- default: null
- aliases: ['creds_file']
meta:
description:
- A hash of items to set as metadata values on a container
@@ -59,6 +54,11 @@ options:
description:
- Region to create an instance in
default: DFW
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
ttl:
description:
- In seconds, set a container-wide TTL for all objects cached on CDN edge nodes.
@@ -66,26 +66,18 @@ options:
type:
description:
- Type of object to do work on, i.e. metadata object or a container object
- choices: ["file", "meta"]
- default: "file"
- username:
- description:
- - Rackspace username (overrides I(credentials))
+ choices:
+ - file
+ - meta
+ default: file
web_error:
description:
- Sets an object to be presented as the HTTP error page when accessed by the CDN URL
web_index:
description:
- Sets an object to be presented as the HTTP index page when accessed by the CDN URL
-requirements: [ "pyrax" ]
author: Paul Durivage
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
@@ -151,9 +143,9 @@ from ansible import __version__
try:
import pyrax
+ HAS_PYRAX = True
except ImportError, e:
- print("failed=True msg='pyrax is required for this module'")
- sys.exit(1)
+ HAS_PYRAX = False
EXIT_DICT = dict(success=True)
META_PREFIX = 'x-container-meta-'
@@ -208,7 +200,8 @@ def meta(cf, module, container_, state, meta_, clear_meta):
module.exit_json(**EXIT_DICT)
-def container(cf, module, container_, state, meta_, clear_meta, ttl, public, private, web_index, web_error):
+def container(cf, module, container_, state, meta_, clear_meta, ttl, public,
+ private, web_index, web_error):
if public and private:
module.fail_json(msg='container cannot be simultaneously '
'set to public and private')
@@ -232,6 +225,7 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, pri
except Exception, e:
module.fail_json(msg=e.message)
else:
+ EXIT_DICT['changed'] = True
EXIT_DICT['created'] = True
else:
module.fail_json(msg=e.message)
@@ -304,11 +298,9 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, pri
EXIT_DICT['container'] = c.name
EXIT_DICT['objs_in_container'] = c.object_count
EXIT_DICT['total_bytes'] = c.total_bytes
-
+
_locals = locals().keys()
-
- if ('cont_created' in _locals
- or 'cont_deleted' in _locals
+ if ('cont_deleted' in _locals
or 'meta_set' in _locals
or 'cont_public' in _locals
or 'cont_private' in _locals
@@ -319,15 +311,23 @@ def container(cf, module, container_, state, meta_, clear_meta, ttl, public, pri
module.exit_json(**EXIT_DICT)
-def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, private, web_index, web_error):
- """ Dispatch from here to work with metadata or file objects """
- cf = pyrax.cloudfiles
- cf.user_agent = USER_AGENT
+def cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
+ private, web_index, web_error):
+ """ Dispatch from here to work with metadata or file objects """
+ cf = pyrax.cloudfiles
- if typ == "container":
- container(cf, module, container_, state, meta_, clear_meta, ttl, public, private, web_index, web_error)
- else:
- meta(cf, module, container_, state, meta_, clear_meta)
+ if cf is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ cf.user_agent = USER_AGENT
+
+ if typ == "container":
+ container(cf, module, container_, state, meta_, clear_meta, ttl,
+ public, private, web_index, web_error)
+ else:
+ meta(cf, module, container_, state, meta_, clear_meta)
def main():
@@ -335,13 +335,14 @@ def main():
argument_spec.update(
dict(
container=dict(),
- state=dict(choices=['present', 'absent', 'list'], default='present'),
+ state=dict(choices=['present', 'absent', 'list'],
+ default='present'),
meta=dict(type='dict', default=dict()),
- clear_meta=dict(choices=BOOLEANS, default=False, type='bool'),
+ clear_meta=dict(default=False, type='bool'),
type=dict(choices=['container', 'meta'], default='container'),
ttl=dict(type='int'),
- public=dict(choices=BOOLEANS, default=False, type='bool'),
- private=dict(choices=BOOLEANS, default=False, type='bool'),
+ public=dict(default=False, type='bool'),
+ private=dict(default=False, type='bool'),
web_index=dict(),
web_error=dict()
)
@@ -352,6 +353,9 @@ def main():
required_together=rax_required_together()
)
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
container_ = module.params.get('container')
state = module.params.get('state')
meta_ = module.params.get('meta')
@@ -366,10 +370,12 @@ def main():
if state in ['present', 'absent'] and not container_:
module.fail_json(msg='please specify a container name')
if clear_meta and not typ == 'meta':
- module.fail_json(msg='clear_meta can only be used when setting metadata')
+ module.fail_json(msg='clear_meta can only be used when setting '
+ 'metadata')
setup_rax_module(module, pyrax)
- cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public, private, web_index, web_error)
+ cloudfiles(module, container_, state, meta_, clear_meta, typ, ttl, public,
+ private, web_index, web_error)
from ansible.module_utils.basic import *
diff --git a/cloud/rax_files_objects b/cloud/rax_files_objects
index b628ff14027..d7f11900ab9 100644
--- a/cloud/rax_files_objects
+++ b/cloud/rax_files_objects
@@ -1,4 +1,4 @@
-#!/usr/bin/python -tt
+#!/usr/bin/python
# (c) 2013, Paul Durivage
#
@@ -17,6 +17,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_files_objects
@@ -25,26 +27,19 @@ description:
- Upload, download, and delete objects in Rackspace Cloud Files
version_added: "1.5"
options:
- api_key:
- description:
- - Rackspace API key (overrides I(credentials))
- default: null
clear_meta:
description:
- Optionally clear existing metadata when applying metadata to existing objects.
Selecting this option is only appropriate when setting type=meta
- choices: ["yes", "no"]
+ choices:
+ - "yes"
+ - "no"
default: "no"
container:
description:
- The container to use for file object operations.
required: true
default: null
- credentials:
- description:
- - File to find the Rackspace credentials in (ignored if I(api_key) and I(username) are provided)
- default: null
- aliases: ['creds_file']
dest:
description:
- The destination of a "get" operation; i.e. a local directory, "/home/user/myfolder".
@@ -64,12 +59,11 @@ options:
- The method of operation to be performed. For example, put to upload files
to Cloud Files, get to download files from Cloud Files or delete to delete
remote objects in Cloud Files
- choices: ["get", "put", "delete"]
- default: "get"
- region:
- description:
- - Region in which to work. Maps to a Rackspace Cloud region, i.e. DFW, ORD, IAD, SYD, LON
- default: DFW
+ choices:
+ - get
+ - put
+ - delete
+ default: get
src:
description:
- Source from which to upload files. Used to specify a remote object as a source for
@@ -81,27 +75,25 @@ options:
- Used to specify whether to maintain nested directory structure when downloading objects
from Cloud Files. Setting to false downloads the contents of a container to a single,
flat directory
- choices: ["yes", "no"]
+ choices:
+ - yes
+ - "no"
default: "yes"
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
type:
description:
- Type of object to do work on
- Metadata object or a file object
- choices: ["file", "meta"]
- default: "file"
- username:
- description:
- - Rackspace username (overrides I(credentials))
- default: null
-requirements: [ "pyrax" ]
+ choices:
+ - file
+ - meta
+ default: file
author: Paul Durivage
-notes:
- - The following environment variables can be used, C(RAX_USERNAME), C(RAX_API_KEY),
- C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file appropriate
- for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
@@ -195,9 +187,9 @@ import os
try:
import pyrax
-except ImportError, e:
- print("failed=True msg='pyrax is required for this module'")
- sys.exit(1)
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
EXIT_DICT = dict(success=False)
META_PREFIX = 'x-object-meta-'
@@ -441,7 +433,6 @@ def get_meta(module, cf, container, src, dest):
meta_key = k.split(META_PREFIX)[-1]
results[obj][meta_key] = v
-
EXIT_DICT['container'] = c.name
if results:
EXIT_DICT['meta_results'] = results
@@ -538,28 +529,33 @@ def delete_meta(module, cf, container, src, dest, meta):
def cloudfiles(module, container, src, dest, method, typ, meta, clear_meta,
structure, expires):
- """ Dispatch from here to work with metadata or file objects """
- cf = pyrax.cloudfiles
+ """ Dispatch from here to work with metadata or file objects """
+ cf = pyrax.cloudfiles
- if typ == "file":
- if method == 'put':
- upload(module, cf, container, src, dest, meta, expires)
+ if cf is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
- elif method == 'get':
- download(module, cf, container, src, dest, structure)
+ if typ == "file":
+ if method == 'put':
+ upload(module, cf, container, src, dest, meta, expires)
- elif method == 'delete':
- delete(module, cf, container, src, dest)
+ elif method == 'get':
+ download(module, cf, container, src, dest, structure)
- else:
- if method == 'get':
- get_meta(module, cf, container, src, dest)
+ elif method == 'delete':
+ delete(module, cf, container, src, dest)
- if method == 'put':
- put_meta(module, cf, container, src, dest, meta, clear_meta)
+ else:
+ if method == 'get':
+ get_meta(module, cf, container, src, dest)
+
+ if method == 'put':
+ put_meta(module, cf, container, src, dest, meta, clear_meta)
- if method == 'delete':
- delete_meta(module, cf, container, src, dest, meta)
+ if method == 'delete':
+ delete_meta(module, cf, container, src, dest, meta)
def main():
@@ -572,8 +568,8 @@ def main():
method=dict(default='get', choices=['put', 'get', 'delete']),
type=dict(default='file', choices=['file', 'meta']),
meta=dict(type='dict', default=dict()),
- clear_meta=dict(choices=BOOLEANS, default=False, type='bool'),
- structure=dict(choices=BOOLEANS, default=True, type='bool'),
+ clear_meta=dict(default=False, type='bool'),
+ structure=dict(default=True, type='bool'),
expires=dict(type='int'),
)
)
@@ -583,6 +579,9 @@ def main():
required_together=rax_required_together()
)
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
container = module.params.get('container')
src = module.params.get('src')
dest = module.params.get('dest')
@@ -603,4 +602,4 @@ def main():
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
-main()
\ No newline at end of file
+main()
diff --git a/cloud/rax_identity b/cloud/rax_identity
new file mode 100644
index 00000000000..591cd018e70
--- /dev/null
+++ b/cloud/rax_identity
@@ -0,0 +1,117 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_identity
+short_description: Load Rackspace Cloud Identity
+description:
+ - Verifies Rackspace Cloud credentials and returns identity information
+version_added: "1.5"
+options:
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+author: Christopher H. Laco, Matt Martz
+extends_documentation_fragment: rackspace.openstack
+'''
+
+EXAMPLES = '''
+- name: Load Rackspace Cloud Identity
+ gather_facts: False
+ hosts: local
+ connection: local
+ tasks:
+ - name: Load Identity
+ local_action:
+ module: rax_identity
+ credentials: ~/.raxpub
+ region: DFW
+ register: rackspace_identity
+'''
+
+from types import NoneType
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+
+NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
+
+
+def cloud_identity(module, state, identity):
+ for arg in (state, identity):
+ if not arg:
+ module.fail_json(msg='%s is required for rax_identity' % arg)
+
+ instance = dict(
+ authenticated=identity.authenticated,
+ credentials=identity._creds_file
+ )
+ changed = False
+
+ for key, value in vars(identity).iteritems():
+ if (isinstance(value, NON_CALLABLES) and
+ not key.startswith('_')):
+ instance[key] = value
+
+ if state == 'present':
+ if not identity.authenticated:
+ module.fail_json(msg='Credentials could not be verified!')
+
+ module.exit_json(changed=changed, identity=instance)
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together()
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ state = module.params.get('state')
+
+ setup_rax_module(module, pyrax)
+
+ if pyrax.identity is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ cloud_identity(module, state, pyrax.identity)
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+### invoke the module
+main()
diff --git a/cloud/rax_keypair b/cloud/rax_keypair
index bd5270b9e3d..458ec5713c4 100644
--- a/cloud/rax_keypair
+++ b/cloud/rax_keypair
@@ -1,4 +1,4 @@
-#!/usr/bin/python -tt
+#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_keypair
@@ -22,52 +24,6 @@ description:
- Create a keypair for use with Rackspace Cloud Servers
version_added: 1.5
options:
- api_key:
- description:
- - Rackspace API key (overrides I(credentials))
- aliases:
- - password
- auth_endpoint:
- description:
- - The URI of the authentication service
- default: https://identity.api.rackspacecloud.com/v2.0/
- version_added: 1.5
- credentials:
- description:
- - File to find the Rackspace credentials in (ignored if I(api_key) and
- I(username) are provided)
- default: null
- aliases:
- - creds_file
- env:
- description:
- - Environment as configured in ~/.pyrax.cfg,
- see https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration
- version_added: 1.5
- identity_type:
- description:
- - Authentication machanism to use, such as rackspace or keystone
- default: rackspace
- version_added: 1.5
- region:
- description:
- - Region to create an instance in
- default: DFW
- tenant_id:
- description:
- - The tenant ID used for authentication
- version_added: 1.5
- tenant_name:
- description:
- - The tenant name used for authentication
- version_added: 1.5
- username:
- description:
- - Rackspace username (overrides I(credentials))
- verify_ssl:
- description:
- - Whether or not to require SSL validation of API endpoints
- version_added: 1.5
name:
description:
- Name of keypair
@@ -79,24 +35,20 @@ options:
state:
description:
- Indicate desired state of the resource
- choices: ['present', 'absent']
+ choices:
+ - present
+ - absent
default: present
-requirements: [ "pyrax" ]
author: Matt Martz
notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
- Keypairs cannot be manipulated, only created and deleted. To "update" a
keypair you must first delete and then recreate.
+extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Create a keypair
- hosts: local
+ hosts: localhost
gather_facts: False
tasks:
- name: keypair request
@@ -116,17 +68,28 @@ EXAMPLES = '''
module: copy
content: "{{ keypair.keypair.private_key }}"
dest: "{{ inventory_dir }}/{{ keypair.keypair.name }}"
-'''
-import sys
+- name: Create a keypair
+ hosts: localhost
+ gather_facts: False
+ tasks:
+ - name: keypair request
+ local_action:
+ module: rax_keypair
+ credentials: ~/.raxpub
+ name: my_keypair
+ public_key: "{{ lookup('file', 'authorized_keys/id_rsa.pub') }}"
+ region: DFW
+ register: keypair
+'''
from types import NoneType
try:
import pyrax
+ HAS_PYRAX = True
except ImportError:
- print("failed=True msg='pyrax required for this module'")
- sys.exit(1)
+ HAS_PYRAX = False
NON_CALLABLES = (basestring, bool, dict, int, list, NoneType)
@@ -144,6 +107,12 @@ def rax_keypair(module, name, public_key, state):
changed = False
cs = pyrax.cloudservers
+
+ if cs is None:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
keypair = {}
if state == 'present':
@@ -189,6 +158,9 @@ def main():
required_together=rax_required_together(),
)
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
name = module.params.get('name')
public_key = module.params.get('public_key')
state = module.params.get('state')
diff --git a/cloud/rax_network b/cloud/rax_network
index 05f3f554e36..bc4745a7a84 100644
--- a/cloud/rax_network
+++ b/cloud/rax_network
@@ -1,4 +1,4 @@
-#!/usr/bin/python -tt
+#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_network
@@ -25,20 +27,10 @@ options:
state:
description:
- Indicate desired state of the resource
- choices: ['present', 'absent']
+ choices:
+ - present
+ - absent
default: present
- credentials:
- description:
- - File to find the Rackspace credentials in (ignored if C(api_key) and
- C(username) are provided)
- default: null
- aliases: ['creds_file']
- api_key:
- description:
- - Rackspace API key (overrides C(credentials))
- username:
- description:
- - Rackspace username (overrides C(credentials))
label:
description:
- Label (name) to give the network
@@ -47,19 +39,8 @@ options:
description:
- cidr of the network being created
default: null
- region:
- description:
- - Region to create the network in
- default: DFW
-requirements: [ "pyrax" ]
author: Christopher H. Laco, Jesse Keating
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS) points to a credentials file
- appropriate for pyrax
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
@@ -76,16 +57,11 @@ EXAMPLES = '''
state: present
'''
-import sys
-import os
-
try:
import pyrax
- import pyrax.utils
- from pyrax import exc
+ HAS_PYRAX = True
except ImportError:
- print("failed=True msg='pyrax required for this module'")
- sys.exit(1)
+ HAS_PYRAX = False
def cloud_network(module, state, label, cidr):
@@ -97,10 +73,15 @@ def cloud_network(module, state, label, cidr):
network = None
networks = []
+ if not pyrax.cloud_networks:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
if state == 'present':
try:
network = pyrax.cloud_networks.find_network_by_label(label)
- except exc.NetworkNotFound:
+ except pyrax.exceptions.NetworkNotFound:
try:
network = pyrax.cloud_networks.create(label, cidr=cidr)
changed = True
@@ -114,7 +95,7 @@ def cloud_network(module, state, label, cidr):
network = pyrax.cloud_networks.find_network_by_label(label)
network.delete()
changed = True
- except exc.NetworkNotFound:
+ except pyrax.exceptions.NetworkNotFound:
pass
except Exception, e:
module.fail_json(msg='%s' % e.message)
@@ -144,6 +125,9 @@ def main():
required_together=rax_required_together(),
)
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
state = module.params.get('state')
label = module.params.get('label')
cidr = module.params.get('cidr')
diff --git a/cloud/rax_queue b/cloud/rax_queue
index ee873739a34..d3e5ac3f81e 100644
--- a/cloud/rax_queue
+++ b/cloud/rax_queue
@@ -1,4 +1,4 @@
-#!/usr/bin/python -tt
+#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_queue
@@ -22,40 +24,19 @@ description:
- creates / deletes a Rackspace Public Cloud queue.
version_added: "1.5"
options:
- api_key:
- description:
- - Rackspace API key (overrides C(credentials))
- credentials:
- description:
- - File to find the Rackspace credentials in (ignored if C(api_key) and
- C(username) are provided)
- default: null
- aliases: ['creds_file']
name:
description:
- Name to give the queue
default: null
- region:
- description:
- - Region to create the load balancer in
- default: DFW
state:
description:
- Indicate desired state of the resource
- choices: ['present', 'absent']
+ choices:
+ - present
+ - absent
default: present
- username:
- description:
- - Rackspace username (overrides C(credentials))
-requirements: [ "pyrax" ]
author: Christopher H. Laco, Matt Martz
-notes:
- - The following environment variables can be used, C(RAX_USERNAME),
- C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
- - C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
- appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
- - C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
- - C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
+extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
@@ -68,22 +49,17 @@ EXAMPLES = '''
local_action:
module: rax_queue
credentials: ~/.raxpub
- client_id: unique-client-name
name: my-queue
region: DFW
state: present
register: my_queue
'''
-import sys
-import os
-
-
try:
import pyrax
+ HAS_PYRAX = True
except ImportError:
- print("failed=True msg='pyrax is required for this module'")
- sys.exit(1)
+ HAS_PYRAX = False
def cloud_queue(module, state, name):
@@ -96,6 +72,10 @@ def cloud_queue(module, state, name):
instance = {}
cq = pyrax.queues
+ if not cq:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
for queue in cq.list():
if name != queue.name:
@@ -146,6 +126,9 @@ def main():
required_together=rax_required_together()
)
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
name = module.params.get('name')
state = module.params.get('state')
diff --git a/cloud/rds b/cloud/rds
index d0eeaf35ba5..cde7c5bcf20 100644
--- a/cloud/rds
+++ b/cloud/rds
@@ -60,7 +60,7 @@ options:
required: false
default: null
aliases: []
- choices: [ 'db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge' ]
+ choices: [ 'db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge', 'db.m3.medium', 'db.m3.large', 'db.m3.xlarge', 'db.m3.2xlarge', 'db.cr1.8xlarge' ]
username:
description:
- Master database username. Used only when command=create.
@@ -131,7 +131,7 @@ options:
aliases: []
port:
description:
- - Port number that the DB instance uses for connections. Defaults to 3306 for mysql, 1521 for Oracle, 1443 for SQL Server. Used only when command=create or command=replicate.
+ - Port number that the DB instance uses for connections. Defaults to 3306 for mysql. Must be changed to 1521 for Oracle, 1443 for SQL Server, 5432 for PostgreSQL. Used only when command=create or command=replicate.
required: false
default: null
aliases: []
@@ -290,7 +290,7 @@ def main():
source_instance = dict(required=False),
db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False),
size = dict(required=False),
- instance_type = dict(aliases=['type'], choices=['db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge'], required=False),
+ instance_type = dict(aliases=['type'], choices=['db.t1.micro', 'db.m1.small', 'db.m1.medium', 'db.m1.large', 'db.m1.xlarge', 'db.m2.xlarge', 'db.m2.2xlarge', 'db.m2.4xlarge', 'db.m3.medium', 'db.m3.large', 'db.m3.xlarge', 'db.m3.2xlarge', 'db.cr1.8xlarge'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(required=False),
@@ -343,7 +343,7 @@ def main():
maint_window = module.params.get('maint_window')
subnet = module.params.get('subnet')
backup_window = module.params.get('backup_window')
- backup_retention = module.params.get('module_retention')
+ backup_retention = module.params.get('backup_retention')
region = module.params.get('region')
zone = module.params.get('zone')
aws_secret_key = module.params.get('aws_secret_key')
diff --git a/cloud/route53 b/cloud/route53
index 2ff22ded9dc..49344ee2061 100644
--- a/cloud/route53
+++ b/cloud/route53
@@ -157,7 +157,7 @@ def commit(changes):
time.sleep(500)
def main():
- argument_spec = ec2_argument_keys_spec()
+ argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['get', 'create', 'delete'], required=True),
zone = dict(required=True),
@@ -220,11 +220,16 @@ def main():
found_record = False
sets = conn.get_all_rrsets(zones[zone_in])
for rset in sets:
- if rset.type == type_in and rset.name == record_in:
+ # Due to a bug in either AWS or Boto, "special" characters are returned as octals, preventing round
+ # tripping of things like * and @.
+ decoded_name = rset.name.replace(r'\052', '*')
+ decoded_name = rset.name.replace(r'\100', '@')
+
+ if rset.type == type_in and decoded_name == record_in:
found_record = True
record['zone'] = zone_in
record['type'] = rset.type
- record['record'] = rset.name
+ record['record'] = decoded_name
record['ttl'] = rset.ttl
record['value'] = ','.join(sorted(rset.resource_records))
record['values'] = sorted(rset.resource_records)
diff --git a/cloud/s3 b/cloud/s3
index 6e566e4b8dc..715c0e00ab9 100644
--- a/cloud/s3
+++ b/cloud/s3
@@ -68,7 +68,7 @@ options:
aliases: []
s3_url:
description:
- - S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined.
+ - "S3 URL endpoint. If not specified then the S3_URL environment variable is used, if that variable is defined. Ansible tries to guess if fakes3 (https://github.com/jubos/fake-s3) or Eucalyptus Walrus (https://github.com/eucalyptus/eucalyptus/wiki/Walrus) is used and configure connection accordingly. Current heuristic is: everything with scheme fakes3:// is fakes3, everything else not ending with amazonaws.com is Walrus."
default: null
aliases: [ S3_URL ]
aws_secret_key:
@@ -83,6 +83,13 @@ options:
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
+ metadata:
+ description:
+ - Metadata for PUT operation, as a dictionary of 'key=value' and 'key=value,key=value'.
+ required: false
+ default: null
+ version_added: "1.6"
+
requirements: [ "boto" ]
author: Lester Wade, Ralph Tice
'''
@@ -97,7 +104,11 @@ EXAMPLES = '''
# GET/download and do not overwrite local file (trust remote)
- s3: bucket=mybucket object=/my/desired/key.txt dest=/usr/local/myfile.txt mode=get force=false
# PUT/upload and overwrite remote file (trust local)
-- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
+- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put
+# PUT/upload with metadata
+- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip'
+# PUT/upload with multiple metadata
+- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put metadata='Content-Encoding=gzip,Cache-Control=no-cache'
# PUT/upload and do not overwrite remote file (trust local)
- s3: bucket=mybucket object=/my/desired/key.txt src=/usr/local/myfile.txt mode=put force=false
# Download an object as a string to use else where in your playbook
@@ -134,11 +145,12 @@ def key_check(module, s3, bucket, obj):
def keysum(module, s3, bucket, obj):
bucket = s3.lookup(bucket)
key_check = bucket.get_key(obj)
- if key_check:
- md5_remote = key_check.etag[1:-1]
- etag_multipart = md5_remote.find('-')!=-1 #Check for multipart, etag is not md5
- if etag_multipart is True:
- module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.")
+ if not key_check:
+ return None
+ md5_remote = key_check.etag[1:-1]
+ etag_multipart = '-' in md5_remote # Check for multipart, etag is not md5
+ if etag_multipart is True:
+ module.fail_json(msg="Files uploaded with multipart of s3 are not supported with checksum, unable to compute checksum.")
return md5_remote
def bucket_check(module, s3, bucket):
@@ -201,10 +213,14 @@ def path_check(path):
else:
return False
-def upload_s3file(module, s3, bucket, obj, src, expiry):
+def upload_s3file(module, s3, bucket, obj, src, expiry, metadata):
try:
bucket = s3.lookup(bucket)
- key = bucket.new_key(obj)
+ key = bucket.new_key(obj)
+ if metadata:
+ for meta_key in metadata.keys():
+ key.set_metadata(meta_key, metadata[meta_key])
+
key.set_contents_from_filename(src)
url = key.generate_url(expiry)
module.exit_json(msg="PUT operation complete", url=url, changed=True)
@@ -238,6 +254,13 @@ def get_download_url(module, s3, bucket, obj, expiry, changed=True):
except s3.provider.storage_response_error, e:
module.fail_json(msg= str(e))
+def is_fakes3(s3_url):
+ """ Return True if s3_url has scheme fakes3:// """
+ if s3_url is not None:
+ return urlparse.urlparse(s3_url).scheme == 'fakes3'
+ else:
+ return False
+
def is_walrus(s3_url):
""" Return True if it's Walrus endpoint, not S3
@@ -249,7 +272,7 @@ def is_walrus(s3_url):
return False
def main():
- argument_spec = ec2_argument_keys_spec()
+ argument_spec = ec2_argument_spec()
argument_spec.update(dict(
bucket = dict(required=True),
object = dict(),
@@ -259,7 +282,8 @@ def main():
expiry = dict(default=600, aliases=['expiration']),
s3_url = dict(aliases=['S3_URL']),
overwrite = dict(aliases=['force'], default=True, type='bool'),
- )
+ metadata = dict(type='dict'),
+ ),
)
module = AnsibleModule(argument_spec=argument_spec)
@@ -272,6 +296,7 @@ def main():
expiry = int(module.params['expiry'])
s3_url = module.params.get('s3_url')
overwrite = module.params.get('overwrite')
+ metadata = module.params.get('metadata')
ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
@@ -282,8 +307,22 @@ def main():
if not s3_url and 'S3_URL' in os.environ:
s3_url = os.environ['S3_URL']
- # If we have an S3_URL env var set, this is likely to be Walrus, so change connection method
- if is_walrus(s3_url):
+ # Look at s3_url and tweak connection settings
+ # if connecting to Walrus or fakes3
+ if is_fakes3(s3_url):
+ try:
+ fakes3 = urlparse.urlparse(s3_url)
+ from boto.s3.connection import OrdinaryCallingFormat
+ s3 = boto.connect_s3(
+ aws_access_key,
+ aws_secret_key,
+ is_secure=False,
+ host=fakes3.hostname,
+ port=fakes3.port,
+ calling_format=OrdinaryCallingFormat())
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg = str(e))
+ elif is_walrus(s3_url):
try:
walrus = urlparse.urlparse(s3_url).hostname
s3 = boto.connect_walrus(walrus, aws_access_key, aws_secret_key)
@@ -364,24 +403,24 @@ def main():
if md5_local == md5_remote:
sum_matches = True
if overwrite is True:
- upload_s3file(module, s3, bucket, obj, src, expiry)
+ upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
else:
get_download_url(module, s3, bucket, obj, expiry, changed=False)
else:
sum_matches = False
if overwrite is True:
- upload_s3file(module, s3, bucket, obj, src, expiry)
+ upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
else:
module.exit_json(msg="WARNING: Checksums do not match. Use overwrite parameter to force upload.", failed=True)
# If neither exist (based on bucket existence), we can create both.
if bucketrtn is False and pathrtn is True:
create_bucket(module, s3, bucket)
- upload_s3file(module, s3, bucket, obj, src, expiry)
+ upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
# If bucket exists but key doesn't, just upload.
if bucketrtn is True and pathrtn is True and keyrtn is False:
- upload_s3file(module, s3, bucket, obj, src, expiry)
+ upload_s3file(module, s3, bucket, obj, src, expiry, metadata)
# Support for deleting an object if we have both params.
if mode == 'delete':
diff --git a/cloud/virt b/cloud/virt
index 42e99209b14..f1d36fc1964 100644
--- a/cloud/virt
+++ b/cloud/virt
@@ -36,7 +36,7 @@ options:
since these refer only to VM states. After starting a guest, it may not
be immediately accessible.
required: false
- choices: [ "running", "shutdown" ]
+ choices: [ "running", "shutdown", "destroyed", "paused" ]
default: "no"
command:
description:
@@ -108,18 +108,19 @@ VIRT_STATE_NAME_MAP = {
6 : "crashed"
}
-class VMNotFound(Exception):
+class VMNotFound(Exception):
pass
class LibvirtConnection(object):
- def __init__(self, uri):
+ def __init__(self, uri, module):
- cmd = subprocess.Popen("uname -r", shell=True, stdout=subprocess.PIPE,
- close_fds=True)
- output = cmd.communicate()[0]
+ self.module = module
- if output.find("xen") != -1:
+ cmd = "uname -r"
+ rc, stdout, stderr = self.module.run_command(cmd)
+
+ if "xen" in stdout:
conn = libvirt.open(None)
else:
conn = libvirt.open(uri)
@@ -196,6 +197,10 @@ class LibvirtConnection(object):
def get_type(self):
return self.conn.getType()
+ def get_xml(self, vmid):
+ vm = self.conn.lookupByName(vmid)
+ return vm.XMLDesc(0)
+
def get_maxVcpus(self, vmid):
vm = self.conn.lookupByName(vmid)
return vm.maxVcpus()
@@ -221,11 +226,12 @@ class LibvirtConnection(object):
class Virt(object):
- def __init__(self, uri):
+ def __init__(self, uri, module):
+ self.module = module
self.uri = uri
def __get_conn(self):
- self.conn = LibvirtConnection(self.uri)
+ self.conn = LibvirtConnection(self.uri, self.module)
return self.conn
def get_vm(self, vmid):
@@ -359,14 +365,8 @@ class Virt(object):
Return an xml describing vm config returned by a libvirt call
"""
- conn = libvirt.openReadOnly(None)
- if not conn:
- return (-1,'Failed to open connection to the hypervisor')
- try:
- domV = conn.lookupByName(vmid)
- except:
- return (-1,'Failed to find the main domain')
- return domV.XMLDesc(0)
+ self.__get_conn()
+ return self.conn.get_xml(vmid)
def get_maxVcpus(self, vmid):
"""
@@ -399,7 +399,7 @@ def core(module):
uri = module.params.get('uri', None)
xml = module.params.get('xml', None)
- v = Virt(uri)
+ v = Virt(uri, module)
res = {}
if state and command=='list_vms':
@@ -414,13 +414,24 @@ def core(module):
res['changed'] = False
if state == 'running':
- if v.status(guest) is not 'running':
+ if v.status(guest) is 'paused':
+ res['changed'] = True
+ res['msg'] = v.unpause(guest)
+ elif v.status(guest) is not 'running':
res['changed'] = True
res['msg'] = v.start(guest)
elif state == 'shutdown':
if v.status(guest) is not 'shutdown':
res['changed'] = True
res['msg'] = v.shutdown(guest)
+ elif state == 'destroyed':
+ if v.status(guest) is not 'shutdown':
+ res['changed'] = True
+ res['msg'] = v.destroy(guest)
+ elif state == 'paused':
+ if v.status(guest) is 'running':
+ res['changed'] = True
+ res['msg'] = v.pause(guest)
else:
module.fail_json(msg="unexpected state")
@@ -459,7 +470,7 @@ def main():
module = AnsibleModule(argument_spec=dict(
name = dict(aliases=['guest']),
- state = dict(choices=['running', 'shutdown']),
+ state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']),
command = dict(choices=ALL_COMMANDS),
uri = dict(default='qemu:///system'),
xml = dict(),
diff --git a/commands/command b/commands/command
index 76d2f828d0c..f1a48922122 100644
--- a/commands/command
+++ b/commands/command
@@ -39,7 +39,8 @@ description:
options:
free_form:
description:
- - the command module takes a free form command to run
+ - the command module takes a free form command to run. There is no parameter actually named 'free form'.
+ See the examples!
required: true
default: null
aliases: []
@@ -136,7 +137,7 @@ def main():
args = shlex.split(args)
startd = datetime.datetime.now()
- rc, out, err = module.run_command(args, executable=executable)
+ rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell)
endd = datetime.datetime.now()
delta = endd - startd
@@ -180,7 +181,7 @@ class CommandModule(AnsibleModule):
params['removes'] = None
params['shell'] = False
params['executable'] = None
- if args.find("#USE_SHELL") != -1:
+ if "#USE_SHELL" in args:
args = args.replace("#USE_SHELL", "")
params['shell'] = True
diff --git a/commands/shell b/commands/shell
index 03299b967cc..639d4a14b09 100644
--- a/commands/shell
+++ b/commands/shell
@@ -14,7 +14,8 @@ version_added: "0.2"
options:
free_form:
description:
- - The shell module takes a free form command to run
+ - The shell module takes a free form command to run, as a string. There's not an actual
+ option named "free form". See the examples!
required: true
default: null
creates:
diff --git a/database/mongodb_user b/database/mongodb_user
index 63bc6b5400d..5d7e0897b68 100644
--- a/database/mongodb_user
+++ b/database/mongodb_user
@@ -2,6 +2,7 @@
# (c) 2012, Elliott Foster
# Sponsored by Four Kitchens http://fourkitchens.com.
+# (c) 2014, Epic Games, Inc.
#
# This file is part of Ansible
#
@@ -46,6 +47,12 @@ options:
- The port to connect to
required: false
default: 27017
+ replica_set:
+ version_added: "1.6"
+ description:
+ - Replica set to connect to (automatically connects to primary for writes)
+ required: false
+ default: null
database:
description:
- The name of the database to add/remove the user from
@@ -92,12 +99,17 @@ EXAMPLES = '''
- mongodb_user: database=burgers name=ben password=12345 roles='read' state=present
- mongodb_user: database=burgers name=jim password=12345 roles='readWrite,dbAdmin,userAdmin' state=present
- mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present
+
+# add a user to database in a replica set, the primary server is automatically discovered and written to
+- mongodb_user: database=burgers name=bob replica_set=blecher password=12345 roles='readWriteAnyDatabase' state=present
'''
import ConfigParser
+from distutils.version import LooseVersion
try:
from pymongo.errors import ConnectionFailure
from pymongo.errors import OperationFailure
+ from pymongo import version as PyMongoVersion
from pymongo import MongoClient
except ImportError:
try: # for older PyMongo 2.2
@@ -114,34 +126,25 @@ else:
#
def user_add(module, client, db_name, user, password, roles):
- try:
- db = client[db_name]
- if roles is None:
- db.add_user(user, password, False)
- else:
- try:
- db.add_user(user, password, None, roles=roles)
- except:
- module.fail_json(msg='"problem adding user; you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param"')
- except OperationFailure:
- return False
-
- return True
+ db = client[db_name]
+ if roles is None:
+ db.add_user(user, password, False)
+ else:
+ try:
+ db.add_user(user, password, None, roles=roles)
+ except OperationFailure, e:
+ err_msg = str(e)
+ if LooseVersion(PyMongoVersion) <= LooseVersion('2.5'):
+ err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)'
+ module.fail_json(msg=err_msg)
def user_remove(client, db_name, user):
- try:
- db = client[db_name]
- db.remove_user(user)
- except OperationFailure:
- return False
-
- return True
+ db = client[db_name]
+ db.remove_user(user)
def load_mongocnf():
config = ConfigParser.RawConfigParser()
mongocnf = os.path.expanduser('~/.mongodb.cnf')
- if not os.path.exists(mongocnf):
- return False
try:
config.readfp(open(mongocnf))
@@ -165,6 +168,7 @@ def main():
login_password=dict(default=None),
login_host=dict(default='localhost'),
login_port=dict(default='27017'),
+ replica_set=dict(default=None),
database=dict(required=True, aliases=['db']),
user=dict(required=True, aliases=['name']),
password=dict(aliases=['pass']),
@@ -180,6 +184,7 @@ def main():
login_password = module.params['login_password']
login_host = module.params['login_host']
login_port = module.params['login_port']
+ replica_set = module.params['replica_set']
db_name = module.params['database']
user = module.params['user']
password = module.params['password']
@@ -187,7 +192,20 @@ def main():
state = module.params['state']
try:
- client = MongoClient(login_host, int(login_port))
+ if replica_set:
+ client = MongoClient(login_host, int(login_port), replicaset=replica_set)
+ else:
+ client = MongoClient(login_host, int(login_port))
+
+ # try to authenticate as a target user to check if it already exists
+ try:
+ client[db_name].authenticate(user, password)
+ if state == 'present':
+ module.exit_json(changed=False, user=user)
+ except OperationFailure:
+ if state == 'absent':
+ module.exit_json(changed=False, user=user)
+
if login_user is None and login_password is None:
mongocnf_creds = load_mongocnf()
if mongocnf_creds is not False:
@@ -200,16 +218,22 @@ def main():
client.admin.authenticate(login_user, login_password)
except ConnectionFailure, e:
- module.fail_json(msg='unable to connect to database, check login_user and login_password are correct')
+ module.fail_json(msg='unable to connect to database: %s' % str(e))
if state == 'present':
if password is None:
module.fail_json(msg='password parameter required when adding a user')
- if user_add(module, client, db_name, user, password, roles) is not True:
- module.fail_json(msg='Unable to add or update user, check login_user and login_password are correct and that this user has access to the admin collection')
+
+ try:
+ user_add(module, client, db_name, user, password, roles)
+ except OperationFailure, e:
+ module.fail_json(msg='Unable to add or update user: %s' % str(e))
+
elif state == 'absent':
- if user_remove(client, db_name, user) is not True:
- module.fail_json(msg='Unable to remove user, check login_user and login_password are correct and that this user has access to the admin collection')
+ try:
+ user_remove(client, db_name, user)
+ except OperationFailure, e:
+ module.fail_json(msg='Unable to remove user: %s' % str(e))
module.exit_json(changed=True, user=user)
diff --git a/database/mysql_db b/database/mysql_db
index 622bf59a39f..8eec1005893 100644
--- a/database/mysql_db
+++ b/database/mysql_db
@@ -101,6 +101,7 @@ EXAMPLES = '''
import ConfigParser
import os
+import pipes
try:
import MySQLdb
except ImportError:
@@ -123,36 +124,36 @@ def db_delete(cursor, db):
def db_dump(module, host, user, password, db_name, target, port, socket=None):
cmd = module.get_bin_path('mysqldump', True)
- cmd += " --quick --user=%s --password='%s'" %(user, password)
+ cmd += " --quick --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password))
if socket is not None:
- cmd += " --socket=%s" % socket
+ cmd += " --socket=%s" % pipes.quote(socket)
else:
- cmd += " --host=%s --port=%s" % (host, port)
- cmd += " %s" % db_name
+ cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port))
+ cmd += " %s" % pipes.quote(db_name)
if os.path.splitext(target)[-1] == '.gz':
- cmd = cmd + ' | gzip > ' + target
+ cmd = cmd + ' | gzip > ' + pipes.quote(target)
elif os.path.splitext(target)[-1] == '.bz2':
- cmd = cmd + ' | bzip2 > ' + target
+ cmd = cmd + ' | bzip2 > ' + pipes.quote(target)
else:
- cmd += " > %s" % target
- rc, stdout, stderr = module.run_command(cmd)
+ cmd += " > %s" % pipes.quote(target)
+ rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
def db_import(module, host, user, password, db_name, target, port, socket=None):
cmd = module.get_bin_path('mysql', True)
- cmd += " --user=%s --password='%s'" %(user, password)
+ cmd += " --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password))
if socket is not None:
- cmd += " --socket=%s" % socket
+ cmd += " --socket=%s" % pipes.quote(socket)
else:
- cmd += " --host=%s --port=%s" % (host, port)
- cmd += " -D %s" % db_name
+ cmd += " --host=%s --port=%s" % (pipes.quote(host), pipes.quote(port))
+ cmd += " -D %s" % pipes.quote(db_name)
if os.path.splitext(target)[-1] == '.gz':
- cmd = 'gunzip < ' + target + ' | ' + cmd
+ cmd = 'gunzip < ' + pipes.quote(target) + ' | ' + cmd
elif os.path.splitext(target)[-1] == '.bz2':
- cmd = 'bunzip2 < ' + target + ' | ' + cmd
+ cmd = 'bunzip2 < ' + pipes.quote(target) + ' | ' + cmd
else:
- cmd += " < %s" % target
- rc, stdout, stderr = module.run_command(cmd)
+ cmd += " < %s" % pipes.quote(target)
+ rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
return rc, stdout, stderr
def db_create(cursor, db, encoding, collation):
diff --git a/database/mysql_replication b/database/mysql_replication
index f18060e9556..fdbb379371a 100644
--- a/database/mysql_replication
+++ b/database/mysql_replication
@@ -325,7 +325,7 @@ def main():
if master_password:
chm.append("MASTER_PASSWORD='" + master_password + "'")
if master_port:
- chm.append("MASTER_PORT='" + master_port + "'")
+ chm.append("MASTER_PORT=" + master_port)
if master_connect_retry:
chm.append("MASTER_CONNECT_RETRY='" + master_connect_retry + "'")
if master_log_file:
diff --git a/database/mysql_user b/database/mysql_user
index e7fad3d77c6..b7c84fd1c3e 100644
--- a/database/mysql_user
+++ b/database/mysql_user
@@ -259,7 +259,7 @@ def privileges_unpack(priv):
output = {}
for item in priv.split('/'):
pieces = item.split(':')
- if pieces[0].find('.') != -1:
+ if '.' in pieces[0]:
pieces[0] = pieces[0].split('.')
for idx, piece in enumerate(pieces):
if pieces[0][idx] != "*":
diff --git a/database/mysql_variables b/database/mysql_variables
index 720478cc005..595e0bbb55d 100644
--- a/database/mysql_variables
+++ b/database/mysql_variables
@@ -76,14 +76,48 @@ else:
mysqldb_found = True
+def typedvalue(value):
+ """
+ Convert value to number whenever possible, return same value
+ otherwise.
+
+ >>> typedvalue('3')
+ 3
+ >>> typedvalue('3.0')
+ 3.0
+ >>> typedvalue('foobar')
+ 'foobar'
+
+ """
+ try:
+ return int(value)
+ except ValueError:
+ pass
+
+ try:
+ return float(value)
+ except ValueError:
+ pass
+
+ return value
+
+
def getvariable(cursor, mysqlvar):
cursor.execute("SHOW VARIABLES LIKE '" + mysqlvar + "'")
mysqlvar_val = cursor.fetchall()
return mysqlvar_val
+
def setvariable(cursor, mysqlvar, value):
+ """ Set a global mysql variable to a given value
+
+ The DB driver will handle quoting of the given value based on its
+ type, thus numeric strings like '3.0' or '8' are illegal, they
+ should be passed as numeric literals.
+
+ """
try:
- cursor.execute("SET GLOBAL " + mysqlvar + "=" + value)
+ cursor.execute("SET GLOBAL " + mysqlvar + " = %s", (value,))
cursor.fetchall()
result = True
except Exception, e:
@@ -203,11 +237,14 @@ def main():
else:
if len(mysqlvar_val) < 1:
module.fail_json(msg="Variable not available", changed=False)
- if value == mysqlvar_val[0][1]:
+ # Type values before using them
+ value_wanted = typedvalue(value)
+ value_actual = typedvalue(mysqlvar_val[0][1])
+ if value_wanted == value_actual:
module.exit_json(msg="Variable already set to requested value", changed=False)
- result = setvariable(cursor, mysqlvar, value)
+ result = setvariable(cursor, mysqlvar, value_wanted)
if result is True:
- module.exit_json(msg="Variable change succeeded", changed=True)
+ module.exit_json(msg="Variable change succeeded prev_value=%s" % value_actual, changed=True)
else:
module.fail_json(msg=result, changed=False)
diff --git a/database/postgresql_privs b/database/postgresql_privs
index 2f3db9a93f1..de5fa94fa48 100644
--- a/database/postgresql_privs
+++ b/database/postgresql_privs
@@ -597,7 +597,8 @@ def main():
except psycopg2.Error, e:
conn.rollback()
# psycopg2 errors come in connection encoding, reencode
- msg = e.message.decode(conn.encoding).encode(errors='replace')
+ msg = e.message.decode(conn.encoding).encode(sys.getdefaultencoding(),
+ 'replace')
module.fail_json(msg=msg)
if module.check_mode:
diff --git a/database/postgresql_user b/database/postgresql_user
index b6383006cb4..1dda1a6dc57 100644
--- a/database/postgresql_user
+++ b/database/postgresql_user
@@ -443,9 +443,9 @@ def main():
priv=dict(default=None),
db=dict(default=''),
port=dict(default='5432'),
- fail_on_user=dict(type='bool', choices=BOOLEANS, default='yes'),
+ fail_on_user=dict(type='bool', default='yes'),
role_attr_flags=dict(default=''),
- encrypted=dict(type='bool', choices=BOOLEANS, default='no'),
+ encrypted=dict(type='bool', default='no'),
expires=dict(default=None)
),
supports_check_mode = True
diff --git a/database/redis b/database/redis
index 4e3793daa09..59a1bde7277 100644
--- a/database/redis
+++ b/database/redis
@@ -22,8 +22,9 @@ module: redis
short_description: Various redis commands, slave and flush
description:
- Unified utility to interact with redis instances.
- 'slave' Sets a redis instance in slave or master mode.
- 'flush' Flushes all the instance or a specified db.
+ 'slave' sets a redis instance in slave or master mode.
+ 'flush' flushes all the instance or a specified db.
+ 'config' (new in 1.6), ensures a configuration setting on an instance.
version_added: "1.3"
options:
command:
@@ -31,7 +32,7 @@ options:
- The selected redis command
required: true
default: null
- choices: [ "slave", "flush" ]
+ choices: [ "slave", "flush", "config" ]
login_password:
description:
- The password used to authenticate with (usually not used)
@@ -75,6 +76,18 @@ options:
required: false
default: all
choices: [ "all", "db" ]
+ name:
+ version_added: 1.6
+ description:
+ - A redis config key.
+ required: false
+ default: null
+ value:
+ version_added: 1.6
+ description:
+ - A redis config value.
+ required: false
+ default: null
notes:
@@ -100,6 +113,12 @@ EXAMPLES = '''
# Flush only one db in a redis instance
- redis: command=flush db=1 flush_mode=db
+
+# Configure local redis to have 10000 max clients
+- redis: command=config name=maxclients value=10000
+
+# Configure local redis to have lua time limit of 100 ms
+- redis: command=config name=lua-time-limit value=100
'''
try:
@@ -146,7 +165,7 @@ def flush(client, db=None):
def main():
module = AnsibleModule(
argument_spec = dict(
- command=dict(default=None, choices=['slave', 'flush']),
+ command=dict(default=None, choices=['slave', 'flush', 'config']),
login_password=dict(default=None),
login_host=dict(default='localhost'),
login_port=dict(default='6379'),
@@ -155,6 +174,8 @@ def main():
slave_mode=dict(default='slave', choices=['master', 'slave']),
db=dict(default=None),
flush_mode=dict(default='all', choices=['all', 'db']),
+ name=dict(default=None),
+ value=dict(default=None)
),
supports_check_mode = True
)
@@ -272,7 +293,34 @@ def main():
module.exit_json(changed=True, flushed=True, db=db)
else: # Flush never fails :)
module.fail_json(msg="Unable to flush '%d' database" % db)
+ elif command == 'config':
+ name = module.params['name']
+ value = module.params['value']
+ r = redis.StrictRedis(host=login_host,
+ port=login_port,
+ password=login_password)
+
+ try:
+ r.ping()
+ except Exception, e:
+ module.fail_json(msg="unable to connect to database: %s" % e)
+
+
+ try:
+ old_value = r.config_get(name)[name]
+ except Exception, e:
+ module.fail_json(msg="unable to read config: %s" % e)
+ changed = old_value != value
+
+ if module.check_mode or not changed:
+ module.exit_json(changed=changed, name=name, value=value)
+ else:
+ try:
+ r.config_set(name, value)
+ except Exception, e:
+ module.fail_json(msg="unable to write config: %s" % e)
+ module.exit_json(changed=changed, name=name, value=value)
else:
module.fail_json(msg='A valid command must be provided')
diff --git a/database/riak b/database/riak
index 53faba6e983..b30e7dc485d 100644
--- a/database/riak
+++ b/database/riak
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, James Martin , Drew Kerrigan
@@ -73,6 +73,14 @@ options:
default: None
aliases: []
choices: ['kv']
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
'''
EXAMPLES = '''
@@ -97,7 +105,7 @@ except ImportError:
def ring_check(module, riak_admin_bin):
- cmd = '%s ringready 2> /dev/null' % riak_admin_bin
+ cmd = '%s ringready' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if rc == 0 and 'TRUE All nodes agree on the ring' in out:
return True
@@ -116,8 +124,8 @@ def main():
wait_for_handoffs=dict(default=False, type='int'),
wait_for_ring=dict(default=False, type='int'),
wait_for_service=dict(
- required=False, default=None, choices=['kv'])
- )
+ required=False, default=None, choices=['kv']),
+ validate_certs = dict(default='yes', type='bool'))
)
@@ -128,6 +136,7 @@ def main():
wait_for_handoffs = module.params.get('wait_for_handoffs')
wait_for_ring = module.params.get('wait_for_ring')
wait_for_service = module.params.get('wait_for_service')
+ validate_certs = module.params.get('validate_certs')
#make sure riak commands are on the path
@@ -138,24 +147,13 @@ def main():
while True:
if time.time() > timeout:
module.fail_json(msg='Timeout, could not fetch Riak stats.')
- try:
- if sys.version_info<(2,6,0):
- stats_raw = urllib2.urlopen(
- 'http://%s/stats' % (http_conn), None).read()
- else:
- stats_raw = urllib2.urlopen(
- 'http://%s/stats' % (http_conn), None, 5).read()
+ (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5)
+ if info['status'] == 200:
+ stats_raw = response.read()
break
- except urllib2.HTTPError, e:
- time.sleep(5)
- except urllib2.URLError, e:
- time.sleep(5)
- except socket.timeout:
- time.sleep(5)
- except Exception, e:
- module.fail_json(msg='Could not fetch Riak stats: %s' % e)
+ time.sleep(5)
-# here we attempt to load those stats,
+ # here we attempt to load those stats,
try:
stats = json.loads(stats_raw)
except:
@@ -223,7 +221,7 @@ def main():
if wait_for_handoffs:
timeout = time.time() + wait_for_handoffs
while True:
- cmd = '%s transfers 2> /dev/null' % riak_admin_bin
+ cmd = '%s transfers' % riak_admin_bin
rc, out, err = module.run_command(cmd)
if 'No transfers active' in out:
result['handoffs'] = 'No transfers active.'
@@ -233,7 +231,7 @@ def main():
module.fail_json(msg='Timeout waiting for handoffs.')
if wait_for_service:
- cmd = '%s wait_for_service riak_%s %s' % ( riak_admin_bin, wait_for_service, node_name)
+ cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name ]
rc, out, err = module.run_command(cmd)
result['service'] = out
@@ -252,5 +250,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
main()
diff --git a/files/acl b/files/acl
index b8d2b85cb65..93431ecf472 100644
--- a/files/acl
+++ b/files/acl
@@ -95,7 +95,7 @@ EXAMPLES = '''
- acl: name=/etc/foo.d entity=joe etype=user permissions=rw default=yes state=present
# Same as previous but using entry shorthand
-- acl: name=/etc/foo.d entrty="default:user:joe:rw-" state=present
+- acl: name=/etc/foo.d entry="default:user:joe:rw-" state=present
# Obtain the acl for a specific file
- acl: name=/etc/foo.conf
@@ -115,6 +115,9 @@ def split_entry(entry):
print "wtf?? %s => %s" % (entry,a)
raise e
+ if d:
+ d = True
+
if t.startswith("u"):
t = "user"
elif t.startswith("g"):
@@ -215,10 +218,10 @@ def main():
if state in ['present','absent']:
if not entry and not etype:
- module.fail_json(msg="%s requries to have ither either etype and permissions or entry to be set" % state)
+ module.fail_json(msg="%s requires either etype and permissions or just entry be set" % state)
if entry:
- if etype or entity or permissions:
+ if etype or entity or permissions:
module.fail_json(msg="entry and another incompatible field (entity, etype or permissions) are also set")
if entry.count(":") not in [2,3]:
module.fail_json(msg="Invalid entry: '%s', it requires 3 or 4 sections divided by ':'" % entry)
@@ -248,7 +251,6 @@ def main():
if not old_permissions == permissions:
changed = True
break
- break
if not matched:
changed=True
diff --git a/files/assemble b/files/assemble
index a8c78256e23..7f0a9d1e0a1 100644
--- a/files/assemble
+++ b/files/assemble
@@ -59,7 +59,7 @@ options:
default: "no"
delimiter:
description:
- - A delimiter to seperate the file contents.
+ - A delimiter to separate the file contents.
version_added: "1.4"
required: false
default: null
@@ -102,19 +102,38 @@ def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None):
tmpfd, temp_path = tempfile.mkstemp()
tmp = os.fdopen(tmpfd,'w')
delimit_me = False
+ add_newline = False
+
for f in sorted(os.listdir(src_path)):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = "%s/%s" % (src_path, f)
- if delimit_me and delimiter:
- tmp.write(delimiter)
- # always make sure there's a newline after the
- # delimiter, so lines don't run together
- if delimiter[-1] != '\n':
- tmp.write('\n')
- if os.path.isfile(fragment):
- tmp.write(file(fragment).read())
+ if not os.path.isfile(fragment):
+ continue
+ fragment_content = file(fragment).read()
+
+ # always put a newline between fragments if the previous fragment didn't end with a newline.
+ if add_newline:
+ tmp.write('\n')
+
+ # delimiters should only appear between fragments
+ if delimit_me:
+ if delimiter:
+ # un-escape anything like newlines
+ delimiter = delimiter.decode('unicode-escape')
+ tmp.write(delimiter)
+ # always make sure there's a newline after the
+ # delimiter, so lines don't run together
+ if delimiter[-1] != '\n':
+ tmp.write('\n')
+
+ tmp.write(fragment_content)
delimit_me = True
+ if fragment_content.endswith('\n'):
+ add_newline = False
+ else:
+ add_newline = True
+
tmp.close()
return temp_path
diff --git a/files/copy b/files/copy
index dbf9c71b4f6..08aa1d71a40 100644
--- a/files/copy
+++ b/files/copy
@@ -73,6 +73,7 @@ options:
description:
- The validation command to run before copying into place. The path to the file to
validate is passed in via '%s' which must be present as in the visudo example below.
+ The command is passed securely so shell features like expansion and pipes won't work.
required: false
default: ""
version_added: "1.2"
@@ -82,10 +83,6 @@ options:
defaults.
required: false
version_added: "1.5"
- others:
- description:
- - all arguments accepted by the M(file) module also work here
- required: false
author: Michael DeHaan
notes:
- The "copy" module recursively copy facility does not scale to lots (>hundreds) of files.
diff --git a/files/file b/files/file
index 8e4e30a99b7..3b4aaa9e235 100644
--- a/files/file
+++ b/files/file
@@ -33,99 +33,11 @@ DOCUMENTATION = '''
module: file
version_added: "historical"
short_description: Sets attributes of files
+extends_documentation_fragment: files
description:
- Sets attributes of files, symlinks, and directories, or removes
files/symlinks/directories. Many other modules support the same options as
the M(file) module - including M(copy), M(template), and M(assemble).
-options:
- path:
- description:
- - 'path to the file being managed. Aliases: I(dest), I(name)'
- required: true
- default: []
- aliases: ['dest', 'name']
- state:
- description:
- - If C(directory), all immediate subdirectories will be created if they
- do not exist. If C(file), the file will NOT be created if it does not
- exist, see the M(copy) or M(template) module if you want that behavior.
- If C(link), the symbolic link will be created or changed. Use C(hard)
- for hardlinks. If C(absent), directories will be recursively deleted,
- and files or symlinks will be unlinked. If C(touch) (new in 1.4), an empty file will
- be created if the c(dest) does not exist, while an existing file or
- directory will receive updated file access and modification times (similar
- to the way `touch` works from the command line).
- required: false
- default: file
- choices: [ file, link, directory, hard, touch, absent ]
- mode:
- required: false
- default: null
- choices: []
- description:
- - mode the file or directory should be, such as 0644 as would be fed to I(chmod)
- owner:
- required: false
- default: null
- choices: []
- description:
- - name of the user that should own the file/directory, as would be fed to I(chown)
- group:
- required: false
- default: null
- choices: []
- description:
- - name of the group that should own the file/directory, as would be fed to I(chown)
- src:
- required: false
- default: null
- choices: []
- description:
- - path of the file to link to (applies only to C(state=link)). Will accept absolute,
- relative and nonexisting paths. Relative paths are not expanded.
- seuser:
- required: false
- default: null
- choices: []
- description:
- - user part of SELinux file context. Will default to system policy, if
- applicable. If set to C(_default), it will use the C(user) portion of the
- policy if available
- serole:
- required: false
- default: null
- choices: []
- description:
- - role part of SELinux file context, C(_default) feature works as for I(seuser).
- setype:
- required: false
- default: null
- choices: []
- description:
- - type part of SELinux file context, C(_default) feature works as for I(seuser).
- selevel:
- required: false
- default: "s0"
- choices: []
- description:
- - level part of the SELinux file context. This is the MLS/MCS attribute,
- sometimes known as the C(range). C(_default) feature works as for
- I(seuser).
- recurse:
- required: false
- default: "no"
- choices: [ "yes", "no" ]
- version_added: "1.1"
- description:
- - recursively set the specified file attributes (applies only to state=directory)
- force:
- required: false
- default: "no"
- choices: [ "yes", "no" ]
- description:
- - 'force the creation of the symlinks in two cases: the source file does
- not exist (but will appear later); the destination exists and is a file (so, we need to unlink the
- "path" file and create symlink to the "src" file in place of it).'
notes:
- See also M(copy), M(template), M(assemble)
requirements: [ ]
@@ -135,13 +47,14 @@ author: Michael DeHaan
EXAMPLES = '''
- file: path=/etc/foo.conf owner=foo group=foo mode=0644
- file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link
+- file: path=/tmp/{{ item.path }} dest={{ item.dest }} state=link
+ with_items:
+ - { path: 'x', dest: 'y' }
+ - { path: 'z', dest: 'k' }
'''
def main():
- # FIXME: pass this around, should not use global
- global module
-
module = AnsibleModule(
argument_spec = dict(
state = dict(choices=['file','directory','link','hard','touch','absent'], default=None),
@@ -151,6 +64,7 @@ def main():
force = dict(required=False,default=False,type='bool'),
diff_peek = dict(default=None),
validate = dict(required=False, default=None),
+ src = dict(required=False, default=None),
),
add_file_common_args=True,
supports_check_mode=True
@@ -159,23 +73,27 @@ def main():
params = module.params
state = params['state']
force = params['force']
+ diff_peek = params['diff_peek']
+ src = params['src']
+
+ # modify source as we later reload and pass, specially relevant when used by other modules.
params['path'] = path = os.path.expanduser(params['path'])
# short-circuit for diff_peek
- if params.get('diff_peek', None) is not None:
+ if diff_peek is not None:
appears_binary = False
try:
f = open(path)
b = f.read(8192)
f.close()
- if b.find("\x00") != -1:
+ if "\x00" in b:
appears_binary = True
except:
pass
module.exit_json(path=path, changed=False, appears_binary=appears_binary)
+ # Find out current state
prev_state = 'absent'
-
if os.path.lexists(path):
if os.path.islink(path):
prev_state = 'link'
@@ -187,76 +105,60 @@ def main():
# could be many other things, but defaulting to file
prev_state = 'file'
- if prev_state is not None and state is None:
- # set state to current type of file
- state = prev_state
- elif state is None:
- # set default state to file
- state = 'file'
+ # state should default to file, but since that creates many conflicts,
+ # default to 'current' when it exists.
+ if state is None:
+ if prev_state != 'absent':
+ state = prev_state
+ else:
+ state = 'file'
# source is both the source of a symlink or an informational passing of the src for a template module
# or copy module, even if this module never uses it, it is needed to key off some things
-
- src = params.get('src', None)
- if src:
+ if src is not None:
src = os.path.expanduser(src)
- if src is not None and os.path.isdir(path) and state not in ["link", "absent"]:
- if params['original_basename']:
- basename = params['original_basename']
- else:
- basename = os.path.basename(src)
- params['path'] = path = os.path.join(path, basename)
+ # original_basename is used by other modules that depend on file.
+ if os.path.isdir(path) and state not in ["link", "absent"]:
+ if params['original_basename']:
+ basename = params['original_basename']
+ else:
+ basename = os.path.basename(src)
+ params['path'] = path = os.path.join(path, basename)
+ else:
+ if state in ['link','hard']:
+ module.fail_json(msg='src and dest are required for creating links')
file_args = module.load_file_common_arguments(params)
-
- if state in ['link','hard'] and (src is None or path is None):
- module.fail_json(msg='src and dest are required for creating links')
- elif path is None:
- module.fail_json(msg='path is required')
-
changed = False
recurse = params['recurse']
+ if recurse and state != 'directory':
+ module.fail_json(path=path, msg="recurse option requires state to be 'directory'")
- if recurse and state == 'file' and prev_state == 'directory':
- state = 'directory'
-
- if prev_state != 'absent' and state == 'absent':
- try:
- if prev_state == 'directory':
- if os.path.islink(path):
- if module.check_mode:
- module.exit_json(changed=True)
- os.unlink(path)
- else:
+ if state == 'absent':
+ if state != prev_state:
+ if not module.check_mode:
+ if prev_state == 'directory':
try:
- if module.check_mode:
- module.exit_json(changed=True)
shutil.rmtree(path, ignore_errors=False)
except Exception, e:
module.fail_json(msg="rmtree failed: %s" % str(e))
- else:
- if module.check_mode:
- module.exit_json(changed=True)
- os.unlink(path)
- except Exception, e:
- module.fail_json(path=path, msg=str(e))
- module.exit_json(path=path, changed=True)
-
- if prev_state != 'absent' and prev_state != state:
- if not (force and (prev_state == 'file' or prev_state == 'hard' or prev_state == 'directory') and state == 'link') and state != 'touch':
- module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, src))
-
- if prev_state == 'absent' and state == 'absent':
- module.exit_json(path=path, changed=False)
-
- if state == 'file':
+ else:
+ try:
+ os.unlink(path)
+ except Exception, e:
+ module.fail_json(path=path, msg="unlinking failed: %s " % str(e))
+ module.exit_json(path=path, changed=True)
+ else:
+ module.exit_json(path=path, changed=False)
- if prev_state != 'file':
- module.fail_json(path=path, msg='file (%s) does not exist, use copy or template module to create' % path)
+ elif state == 'file':
+ if state != prev_state:
+ # file is not absent and any other state is a conflict
+ module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state))
- changed = module.set_file_attributes_if_different(file_args, changed)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
module.exit_json(path=path, changed=changed)
elif state == 'directory':
@@ -266,31 +168,33 @@ def main():
os.makedirs(path)
changed = True
- changed = module.set_directory_attributes_if_different(file_args, changed)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
+
if recurse:
for root,dirs,files in os.walk( file_args['path'] ):
- for dir in dirs:
- dirname=os.path.join(root,dir)
- tmp_file_args = file_args.copy()
- tmp_file_args['path']=dirname
- changed = module.set_directory_attributes_if_different(tmp_file_args, changed)
- for file in files:
- filename=os.path.join(root,file)
+ for fsobj in dirs + files:
+ fsname=os.path.join(root, fsobj)
tmp_file_args = file_args.copy()
- tmp_file_args['path']=filename
- changed = module.set_file_attributes_if_different(tmp_file_args, changed)
+ tmp_file_args['path']=fsname
+ changed = module.set_fs_attributes_if_different(tmp_file_args, changed)
+
module.exit_json(path=path, changed=changed)
elif state in ['link','hard']:
+ absrc = src
+ if not os.path.isabs(absrc):
+ absrc = os.path.normpath('%s/%s' % (os.path.dirname(path), absrc))
+
+ if not os.path.exists(absrc) and not force:
+ module.fail_json(path=path, src=src, msg='src file does not exist, use "force=yes" if you really want to create the link: %s' % absrc)
+
if state == 'hard':
- if os.path.isabs(src):
- abs_src = src
- else:
+ if not os.path.isabs(src):
module.fail_json(msg="absolute paths are required")
- if not os.path.exists(abs_src) and not force:
- module.fail_json(path=path, src=src, msg='src file does not exist')
+ elif prev_state in ['file', 'hard', 'directory'] and not force:
+ module.fail_json(path=path, msg='refusing to convert between %s and %s for %s' % (prev_state, state, src))
if prev_state == 'absent':
changed = True
@@ -300,58 +204,63 @@ def main():
changed = True
elif prev_state == 'hard':
if not (state == 'hard' and os.stat(path).st_ino == os.stat(src).st_ino):
+ changed = True
if not force:
module.fail_json(dest=path, src=src, msg='Cannot link, different hard link exists at destination')
- changed = True
- elif prev_state == 'file':
- if not force:
- module.fail_json(dest=path, src=src, msg='Cannot link, file exists at destination')
+ elif prev_state in ['file', 'directory']:
changed = True
- elif prev_state == 'directory':
if not force:
- module.fail_json(dest=path, src=src, msg='Cannot link, directory exists at destination')
- changed = True
+ module.fail_json(dest=path, src=src, msg='Cannot link, %s exists at destination' % prev_state)
else:
module.fail_json(dest=path, src=src, msg='unexpected position reached')
if changed and not module.check_mode:
if prev_state != 'absent':
+ # try to replace atomically
+ tmppath = '/'.join([os.path.dirname(path), ".%s.%s.tmp" % (os.getpid(),time.time())])
try:
- os.unlink(path)
+ if state == 'hard':
+ os.link(src,tmppath)
+ else:
+ os.symlink(src, tmppath)
+ os.rename(tmppath, path)
except OSError, e:
- module.fail_json(path=path, msg='Error while removing existing target: %s' % str(e))
- try:
- if state == 'hard':
- os.link(src,path)
- else:
- os.symlink(src, path)
- except OSError, e:
- module.fail_json(path=path, msg='Error while linking: %s' % str(e))
+ if os.path.exists(tmppath):
+ os.unlink(tmppath)
+ module.fail_json(path=path, msg='Error while replacing: %s' % str(e))
+ else:
+ try:
+ if state == 'hard':
+ os.link(src,path)
+ else:
+ os.symlink(src, path)
+ except OSError, e:
+ module.fail_json(path=path, msg='Error while linking: %s' % str(e))
- changed = module.set_file_attributes_if_different(file_args, changed)
+ changed = module.set_fs_attributes_if_different(file_args, changed)
module.exit_json(dest=path, src=src, changed=changed)
elif state == 'touch':
- if module.check_mode:
- module.exit_json(path=path, skipped=True)
+ if not module.check_mode:
+
+ if prev_state == 'absent':
+ try:
+ open(path, 'w').close()
+ except OSError, e:
+ module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e))
+ elif prev_state in ['file', 'directory']:
+ try:
+ os.utime(path, None)
+ except OSError, e:
+ module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e))
+ else:
+ module.fail_json(msg='Cannot touch other than files and directories')
+
+ module.set_fs_attributes_if_different(file_args, True)
- if prev_state not in ['file', 'directory', 'absent']:
- module.fail_json(msg='Cannot touch other than files and directories')
- if prev_state != 'absent':
- try:
- os.utime(path, None)
- except OSError, e:
- module.fail_json(path=path, msg='Error while touching existing target: %s' % str(e))
- else:
- try:
- open(path, 'w').close()
- except OSError, e:
- module.fail_json(path=path, msg='Error, could not touch target: %s' % str(e))
- module.set_file_attributes_if_different(file_args, True)
module.exit_json(dest=path, changed=True)
- else:
- module.fail_json(path=path, msg='unexpected position reached')
+ module.fail_json(path=path, msg='unexpected position reached')
# import module snippets
from ansible.module_utils.basic import *
diff --git a/files/lineinfile b/files/lineinfile
index 73c9e88cb8c..f781911ccd1 100644
--- a/files/lineinfile
+++ b/files/lineinfile
@@ -110,7 +110,8 @@ options:
validate:
required: false
description:
- - validation to run before copying into place
+ - validation to run before copying into place. The command is passed
+ securely so shell features like expansion and pipes won't work.
required: false
default: None
version_added: "1.4"
@@ -137,7 +138,7 @@ EXAMPLES = r"""
# Fully quoted because of the ': ' on the line. See the Gotchas in the YAML docs.
- lineinfile: "dest=/etc/sudoers state=present regexp='^%wheel' line='%wheel ALL=(ALL) NOPASSWD: ALL'"
-- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\1Xms${xms}m\3' backrefs=yes
+- lineinfile: dest=/opt/jboss-as/bin/standalone.conf regexp='^(.*)Xms(\d+)m(.*)$' line='\\1Xms${xms}m\\3' backrefs=yes
# Validate a the sudoers file before saving
- lineinfile: dest=/etc/sudoers state=present regexp='^%ADMIN ALL\=' line='%ADMIN ALL=(ALL) NOPASSWD:ALL' validate='visudo -cf %s'
diff --git a/files/replace b/files/replace
new file mode 100644
index 00000000000..f4193ae9f30
--- /dev/null
+++ b/files/replace
@@ -0,0 +1,160 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Evan Kaufman .
+
+import re
+import os
+import tempfile
+
+DOCUMENTATION = """
+---
+module: replace
+author: Evan Kaufman
+short_description: Replace all instances of a particular string in a
+ file using a back-referenced regular expression.
+description:
+ - This module will replace all instances of a pattern within a file.
+ - It is up to the user to maintain idempotence by ensuring that the
+ same pattern would never match any replacements made.
+version_added: "1.6"
+options:
+ dest:
+ required: true
+ aliases: [ name, destfile ]
+ description:
+ - The file to modify.
+ regexp:
+ required: true
+ description:
+ - The regular expression to look for in the contents of the file.
+ Uses Python regular expressions; see
+ U(http://docs.python.org/2/library/re.html).
+ Uses multiline mode, which means C(^) and C($) match the beginning
+ and end respectively of I(each line) of the file.
+ replace:
+ required: false
+ description:
+ - The string to replace regexp matches. May contain backreferences
+ that will get expanded with the regexp capture groups if the regexp
+ matches. If not set, matches are removed entirely.
+ backup:
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ description:
+ - Create a backup file including the timestamp information so you can
+ get the original file back if you somehow clobbered it incorrectly.
+ validate:
+ required: false
+ description:
+ - validation to run before copying into place
+ required: false
+ default: None
+ others:
+ description:
+ - All arguments accepted by the M(file) module also work here.
+ required: false
+"""
+
+EXAMPLES = r"""
+- replace: dest=/etc/hosts regexp='(\s+)old\.host\.name(\s+.*)?$' replace='\1new.host.name\2' backup=yes
+
+- replace: dest=/home/jdoe/.ssh/known_hosts regexp='^old\.host\.name[^\n]*\n' owner=jdoe group=jdoe mode=644
+
+- replace: dest=/etc/apache/ports regexp='^(NameVirtualHost|Listen)\s+80\s*$' replace='\1 127.0.0.1:8080' validate='/usr/sbin/apache2ctl -f %s -t'
+"""
+
+def write_changes(module,contents,dest):
+
+ tmpfd, tmpfile = tempfile.mkstemp()
+ f = os.fdopen(tmpfd,'wb')
+ f.write(contents)
+ f.close()
+
+ validate = module.params.get('validate', None)
+ valid = not validate
+ if validate:
+ (rc, out, err) = module.run_command(validate % tmpfile)
+ valid = rc == 0
+ if rc != 0:
+ module.fail_json(msg='failed to validate: '
+ 'rc:%s error:%s' % (rc,err))
+ if valid:
+ module.atomic_move(tmpfile, dest)
+
+def check_file_attrs(module, changed, message):
+
+ file_args = module.load_file_common_arguments(module.params)
+ if module.set_file_attributes_if_different(file_args, False):
+
+ if changed:
+ message += " and "
+ changed = True
+ message += "ownership, perms or SE linux context changed"
+
+ return message, changed
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ dest=dict(required=True, aliases=['name', 'destfile']),
+ regexp=dict(required=True),
+ replace=dict(default='', type='str'),
+ backup=dict(default=False, type='bool'),
+ validate=dict(default=None, type='str'),
+ ),
+ add_file_common_args=True,
+ supports_check_mode=True
+ )
+
+ params = module.params
+ dest = os.path.expanduser(params['dest'])
+
+ if os.path.isdir(dest):
+ module.fail_json(rc=256, msg='Destination %s is a directory !' % dest)
+
+ if not os.path.exists(dest):
+ module.fail_json(rc=257, msg='Destination %s does not exist !' % dest)
+ else:
+ f = open(dest, 'rb')
+ contents = f.read()
+ f.close()
+
+ mre = re.compile(params['regexp'], re.MULTILINE)
+ result = re.subn(mre, params['replace'], contents, 0)
+
+ if result[1] > 0:
+ msg = '%s replacements made' % result[1]
+ changed = True
+ else:
+ msg = ''
+ changed = False
+
+ if changed and not module.check_mode:
+ if params['backup'] and os.path.exists(dest):
+ module.backup_local(dest)
+ write_changes(module, result[0], dest)
+
+ msg, changed = check_file_attrs(module, changed, msg)
+ module.exit_json(changed=changed, msg=msg)
+
+# this is magic, see lib/ansible/module_common.py
+#<>
+
+main()
diff --git a/files/stat b/files/stat
index 2839ca8e06f..8c717a395c4 100644
--- a/files/stat
+++ b/files/stat
@@ -132,8 +132,9 @@ def main():
if S_ISLNK(mode):
d['lnk_source'] = os.path.realpath(path)
- if S_ISREG(mode) and get_md5:
- d['md5'] = module.md5(path)
+ if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK):
+ d['md5'] = module.md5(path)
+
try:
pw = pwd.getpwuid(st.st_uid)
diff --git a/files/synchronize b/files/synchronize
index 493322393bc..8d67ce9bac1 100644
--- a/files/synchronize
+++ b/files/synchronize
@@ -16,8 +16,6 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import subprocess
-
DOCUMENTATION = '''
---
module: synchronize
@@ -51,6 +49,13 @@ options:
choices: [ 'yes', 'no' ]
default: 'yes'
required: false
+ checksum:
+ description:
+ - Skip based on checksum, rather than mod-time & size; Note that that "archive" option is still enabled by default - the "checksum" option will not disable it.
+ choices: [ 'yes', 'no' ]
+ default: 'no'
+ required: false
+ version_added: "1.6"
existing_only:
description:
- Skip creating new files on receiver.
@@ -60,7 +65,7 @@ options:
version_added: "1.5"
delete:
description:
- - Delete files that don't exist (after transfer, not before) in the C(src) path.
+ - Delete files that don't exist (after transfer, not before) in the C(src) path. This option requires C(recursive=yes).
choices: [ 'yes', 'no' ]
default: 'no'
required: false
@@ -121,6 +126,17 @@ options:
- Specify a --timeout for the rsync command in seconds.
default: 10
required: false
+ set_remote_user:
+ description:
+ - put user@ for the remote paths. If you have a custom ssh config to define the remote user for a host
+ that does not match the inventory user, you should set this parameter to "no".
+ default: yes
+ rsync_opts:
+ description:
+ - Specify additional rsync options by passing in an array.
+ default:
+ required: false
+ version_added: "1.6"
notes:
- Inspect the verbose output to validate the destination user/host/path
are what was expected.
@@ -144,6 +160,9 @@ synchronize: src=some/relative/path dest=/some/absolute/path archive=no
# Synchronization with --archive options enabled except for --recursive
synchronize: src=some/relative/path dest=/some/absolute/path recursive=no
+# Synchronization with --archive options enabled except for --times, with --checksum option enabled
+synchronize: src=some/relative/path dest=/some/absolute/path checksum=yes times=no
+
# Synchronization without --archive options enabled except use --links
synchronize: src=some/relative/path dest=/some/absolute/path archive=no links=yes
@@ -169,6 +188,9 @@ synchronize: src=some/relative/path dest=/some/absolute/path rsync_path="sudo rs
- var # exclude any path whose last part is 'var'
- /var # exclude any path starting with 'var' starting at the source directory
+ /var/conf # include /var/conf even though it was previously excluded
+
+# Synchronize passing in extra rsync options
+synchronize: src=/tmp/helloworld dest=/var/www/helloword rsync_opts=--no-motd,--exclude=.git
'''
@@ -182,6 +204,7 @@ def main():
private_key = dict(default=None),
rsync_path = dict(default=None),
archive = dict(default='yes', type='bool'),
+ checksum = dict(default='no', type='bool'),
existing_only = dict(default='no', type='bool'),
dirs = dict(default='no', type='bool'),
recursive = dict(type='bool'),
@@ -191,7 +214,9 @@ def main():
times = dict(type='bool'),
owner = dict(type='bool'),
group = dict(type='bool'),
- rsync_timeout = dict(type='int', default=10)
+ set_remote_user = dict(default='yes', type='bool'),
+ rsync_timeout = dict(type='int', default=10),
+ rsync_opts = dict(type='list')
),
supports_check_mode = True
)
@@ -205,6 +230,7 @@ def main():
rsync = module.params.get('local_rsync_path', 'rsync')
rsync_timeout = module.params.get('rsync_timeout', 'rsync_timeout')
archive = module.params['archive']
+ checksum = module.params['checksum']
existing_only = module.params['existing_only']
dirs = module.params['dirs']
# the default of these params depends on the value of archive
@@ -215,6 +241,7 @@ def main():
times = module.params['times']
owner = module.params['owner']
group = module.params['group']
+ rsync_opts = module.params['rsync_opts']
cmd = '%s --delay-updates -FF --compress --timeout=%s' % (rsync, rsync_timeout)
if module.check_mode:
@@ -223,6 +250,8 @@ def main():
cmd = cmd + ' --delete-after'
if existing_only:
cmd = cmd + ' --existing'
+ if checksum:
+ cmd = cmd + ' --checksum'
if archive:
cmd = cmd + ' --archive'
if recursive is False:
@@ -270,8 +299,17 @@ def main():
if rsync_path:
cmd = cmd + " --rsync-path '%s'" %(rsync_path)
+ if rsync_opts:
+ cmd = cmd + " " + " ".join(rsync_opts)
changed_marker = '<>'
cmd = cmd + " --out-format='" + changed_marker + "%i %n%L'"
+
+ # expand the paths
+ if '@' not in source:
+ source = os.path.expanduser(source)
+ if '@' not in dest:
+ dest = os.path.expanduser(dest)
+
cmd = ' '.join([cmd, source, dest])
cmdstr = cmd
(rc, out, err) = module.run_command(cmd)
@@ -279,8 +317,12 @@ def main():
return module.fail_json(msg=err, rc=rc, cmd=cmdstr)
else:
changed = changed_marker in out
- return module.exit_json(changed=changed, msg=out.replace(changed_marker,''),
- rc=rc, cmd=cmdstr)
+ out_clean=out.replace(changed_marker,'')
+ out_lines=out_clean.split('\n')
+ while '' in out_lines:
+ out_lines.remove('')
+ return module.exit_json(changed=changed, msg=out_clean,
+ rc=rc, cmd=cmdstr, stdout_lines=out_lines)
# import module snippets
from ansible.module_utils.basic import *
diff --git a/files/template b/files/template
index 29fa905207f..3c21f3f1170 100644
--- a/files/template
+++ b/files/template
@@ -17,7 +17,7 @@ description:
the template's machine, C(template_uid) the owner, C(template_path) the
absolute path of the template, C(template_fullpath) is the absolute path of the
template, and C(template_run_date) is the date that the template was rendered. Note that including
- a string that uses a date in the template will resort in the template being marked 'changed'
+ a string that uses a date in the template will result in the template being marked 'changed'
each time."
options:
src:
@@ -40,14 +40,13 @@ options:
default: "no"
validate:
description:
- - validation to run before copying into place
+ - The validation command to run before copying into place.
+ - The path to the file to validate is passed in via '%s' which must be present as in the visudo example below.
+ - validation to run before copying into place. The command is passed
+ securely so shell features like expansion and pipes won't work.
required: false
default: ""
version_added: "1.2"
- others:
- description:
- - all arguments accepted by the M(file) module also work here, as well as the M(copy) module (except the the 'content' parameter).
- required: false
notes:
- "Since Ansible version 0.9, templates are loaded with C(trim_blocks=True)."
@@ -63,6 +62,6 @@ EXAMPLES = '''
# Example from Ansible Playbooks
- template: src=/mytemplates/foo.j2 dest=/etc/file.conf owner=bin group=wheel mode=0644
-# Copy a new "sudoers file into place, after passing validation with visudo
-- action: template src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s'
+# Copy a new "sudoers" file into place, after passing validation with visudo
+- template: src=/mine/sudoers dest=/etc/sudoers validate='visudo -cf %s'
'''
diff --git a/files/unarchive b/files/unarchive
index 661f3899690..29e9ddb9e48 100644
--- a/files/unarchive
+++ b/files/unarchive
@@ -43,7 +43,13 @@ options:
required: false
choices: [ "yes", "no" ]
default: "yes"
-author: Dylan Martin
+ creates:
+ description:
+ - a filename, when it already exists, this step will B(not) be run.
+ required: no
+ default: null
+ version_added: "1.6"
+author: Dylan Martin
todo:
- detect changed/unchanged for .zip files
- handle common unarchive args, like preserve owner/timestamp etc...
@@ -75,17 +81,20 @@ class ZipFile(object):
self.src = src
self.dest = dest
self.module = module
+ self.cmd_path = self.module.get_bin_path('unzip')
def is_unarchived(self):
return dict(unarchived=False)
def unarchive(self):
- cmd = 'unzip -o "%s" -d "%s"' % (self.src, self.dest)
+ cmd = '%s -o "%s" -d "%s"' % (self.cmd_path, self.src, self.dest)
rc, out, err = self.module.run_command(cmd)
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
- cmd = 'unzip -l "%s"' % self.src
+ if not self.cmd_path:
+ return False
+ cmd = '%s -l "%s"' % (self.cmd_path, self.src)
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return True
@@ -99,23 +108,26 @@ class TgzFile(object):
self.src = src
self.dest = dest
self.module = module
+ self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = 'z'
def is_unarchived(self):
dirof = os.path.dirname(self.dest)
destbase = os.path.basename(self.dest)
- cmd = 'tar -v -C "%s" --diff -%sf "%s"' % (self.dest, self.zipflag, self.src)
+ cmd = '%s -v -C "%s" --diff -%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src)
rc, out, err = self.module.run_command(cmd)
unarchived = (rc == 0)
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
def unarchive(self):
- cmd = 'tar -C "%s" -x%sf "%s"' % (self.dest, self.zipflag, self.src)
+ cmd = '%s -C "%s" -x%sf "%s"' % (self.cmd_path, self.dest, self.zipflag, self.src)
rc, out, err = self.module.run_command(cmd)
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
- cmd = 'tar -t%sf "%s"' % (self.zipflag, self.src)
+ if not self.cmd_path:
+ return False
+ cmd = '%s -t%sf "%s"' % (self.cmd_path, self.zipflag, self.src)
rc, out, err = self.module.run_command(cmd)
if rc == 0:
if len(out.splitlines(True)) > 0:
@@ -129,6 +141,7 @@ class TarFile(TgzFile):
self.src = src
self.dest = dest
self.module = module
+ self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = ''
@@ -138,6 +151,7 @@ class TarBzip(TgzFile):
self.src = src
self.dest = dest
self.module = module
+ self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = 'j'
@@ -147,6 +161,7 @@ class TarXz(TgzFile):
self.src = src
self.dest = dest
self.module = module
+ self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = 'J'
@@ -157,7 +172,7 @@ def pick_handler(src, dest, module):
obj = handler(src, dest, module)
if obj.can_handle_archive():
return obj
- raise RuntimeError('Failed to find handler to unarchive "%s"' % src)
+ module.fail_json(msg='Failed to find handler to unarchive. Make sure the required command to extract the file is installed.')
def main():
@@ -168,6 +183,7 @@ def main():
original_basename = dict(required=False), # used to handle 'dest is a directory' via template, a slight hack
dest = dict(required=True),
copy = dict(default=True, type='bool'),
+ creates = dict(required=False),
),
add_file_common_args=True,
)
@@ -175,6 +191,7 @@ def main():
src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest'])
copy = module.params['copy']
+ creates = module.params['creates']
# did tar file arrive?
if not os.path.exists(src):
@@ -185,6 +202,20 @@ def main():
if not os.access(src, os.R_OK):
module.fail_json(msg="Source '%s' not readable" % src)
+ if creates:
+ # do not run the command if the line contains creates=filename
+ # and the filename already exists. This allows idempotence
+ # of command executions.
+ v = os.path.expanduser(creates)
+ if os.path.exists(v):
+ module.exit_json(
+ stdout="skipped, since %s exists" % v,
+ skipped=True,
+ changed=False,
+ stderr=False,
+ rc=0
+ )
+
# is dest OK to receive tar file?
if not os.path.exists(os.path.dirname(dest)):
module.fail_json(msg="Destination directory '%s' does not exist" % (os.path.dirname(dest)))
diff --git a/internal/async_wrapper b/internal/async_wrapper
index 278280ef1a8..2bc2dc21823 100644
--- a/internal/async_wrapper
+++ b/internal/async_wrapper
@@ -72,7 +72,7 @@ if len(sys.argv) < 3:
})
sys.exit(1)
-jid = sys.argv[1]
+jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
diff --git a/messaging/rabbitmq_parameter b/messaging/rabbitmq_parameter
index 2b540cbfdee..2f78bd4ee15 100644
--- a/messaging/rabbitmq_parameter
+++ b/messaging/rabbitmq_parameter
@@ -52,6 +52,7 @@ options:
- erlang node name of the rabbit we wish to configure
required: false
default: rabbit
+ version_added: "1.2"
state:
description:
- Specify if user is to be added or removed
diff --git a/messaging/rabbitmq_user b/messaging/rabbitmq_user
index 175bc0c1624..1cbee360dff 100644
--- a/messaging/rabbitmq_user
+++ b/messaging/rabbitmq_user
@@ -55,6 +55,7 @@ options:
- erlang node name of the rabbit we wish to configure
required: false
default: rabbit
+ version_added: "1.2"
configure_priv:
description:
- Regular expression to restrict configure actions on a resource
diff --git a/messaging/rabbitmq_vhost b/messaging/rabbitmq_vhost
index 122f84e5761..fd4b04a683f 100644
--- a/messaging/rabbitmq_vhost
+++ b/messaging/rabbitmq_vhost
@@ -39,6 +39,7 @@ options:
- erlang node name of the rabbit we wish to configure
required: false
default: rabbit
+ version_added: "1.2"
tracing:
description:
- Enable/disable tracing for a vhost
diff --git a/monitoring/airbrake_deployment b/monitoring/airbrake_deployment
index 8a4a834be7c..e1c490b881b 100644
--- a/monitoring/airbrake_deployment
+++ b/monitoring/airbrake_deployment
@@ -51,7 +51,15 @@ options:
description:
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
required: false
- default: https://airbrake.io/deploys
+ default: "https://airbrake.io/deploys"
+ version_added: "1.5"
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
# informational: requirements for nodes
requirements: [ urllib, urllib2 ]
@@ -64,29 +72,12 @@ EXAMPLES = '''
revision=4.2
'''
-HAS_URLLIB = True
-try:
- import urllib
-except ImportError:
- HAS_URLLIB = False
-
-HAS_URLLIB2 = True
-try:
- import urllib2
-except ImportError:
- HAS_URLLIB2 = False
-
# ===========================================
# Module execution.
#
def main():
- if not HAS_URLLIB:
- module.fail_json(msg="urllib is not installed")
- if not HAS_URLLIB2:
- module.fail_json(msg="urllib2 is not installed")
-
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
@@ -94,7 +85,8 @@ def main():
user=dict(required=False),
repo=dict(required=False),
revision=dict(required=False),
- url=dict(required=False, default='https://api.airbrake.io/deploys.txt')
+ url=dict(required=False, default='https://api.airbrake.io/deploys.txt'),
+ validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
@@ -123,18 +115,16 @@ def main():
module.exit_json(changed=True)
# Send the data to airbrake
- try:
- req = urllib2.Request(url, urllib.urlencode(params))
- result=urllib2.urlopen(req)
- except Exception, e:
- module.fail_json(msg="unable to update airbrake via %s?%s : %s" % (url, urllib.urlencode(params), e))
+ data = urllib.urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] == 200:
+ module.exit_json(changed=True)
else:
- if result.code == 200:
- module.exit_json(changed=True)
- else:
- module.fail_json(msg="HTTP result code: %d connecting to %s" % (result.code, url))
+ module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
main()
diff --git a/monitoring/boundary_meter b/monitoring/boundary_meter
index 202dfd03ae3..da739d4306f 100644
--- a/monitoring/boundary_meter
+++ b/monitoring/boundary_meter
@@ -24,7 +24,6 @@ along with Ansible. If not, see .
import json
import datetime
-import urllib2
import base64
import os
@@ -59,6 +58,14 @@ options:
description:
- Organizations boundary API KEY
required: true
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
notes:
- This module does not yet support boundary tags.
@@ -74,12 +81,6 @@ EXAMPLES='''
'''
-try:
- import urllib2
- HAS_URLLIB2 = True
-except ImportError:
- HAS_URLLIB2 = False
-
api_host = "api.boundary.com"
config_directory = "/etc/bprobe"
@@ -101,7 +102,7 @@ def build_url(name, apiid, action, meter_id=None, cert_type=None):
elif action == "delete":
return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id)
-def http_request(name, apiid, apikey, action, meter_id=None, cert_type=None):
+def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None):
if meter_id is None:
url = build_url(name, apiid, action)
@@ -111,11 +112,11 @@ def http_request(name, apiid, apikey, action, meter_id=None, cert_type=None):
else:
url = build_url(name, apiid, action, meter_id, cert_type)
- auth = auth_encode(apikey)
- request = urllib2.Request(url)
- request.add_header("Authorization", "Basic %s" % (auth))
- request.add_header("Content-Type", "application/json")
- return request
+ headers = dict()
+ headers["Authorization"] = "Basic %s" % auth_encode(apikey)
+ headers["Content-Type"] = "application/json"
+
+ return fetch_url(module, url, data=data, headers=headers)
def create_meter(module, name, apiid, apikey):
@@ -126,14 +127,10 @@ def create_meter(module, name, apiid, apikey):
module.exit_json(status="Meter " + name + " already exists",changed=False)
else:
# If it doesn't exist, create it
- request = http_request(name, apiid, apikey, action="create")
- # A create request seems to need a json body with the name of the meter in it
body = '{"name":"' + name + '"}'
- request.add_data(body)
+ response, info = http_request(module, name, apiid, apikey, data=body, action="create")
- try:
- result = urllib2.urlopen(request)
- except urllib2.URLError, e:
+ if info['status'] != 200:
module.fail_json(msg="Failed to connect to api host to create meter")
# If the config directory doesn't exist, create it
@@ -160,15 +157,13 @@ def create_meter(module, name, apiid, apikey):
def search_meter(module, name, apiid, apikey):
- request = http_request(name, apiid, apikey, action="search")
+ response, info = http_request(module, name, apiid, apikey, action="search")
- try:
- result = urllib2.urlopen(request)
- except urllib2.URLError, e:
+ if info['status'] != 200:
module.fail_json("Failed to connect to api host to search for meter")
# Return meters
- return json.loads(result.read())
+ return json.loads(response.read())
def get_meter_id(module, name, apiid, apikey):
# In order to delete the meter we need its id
@@ -186,16 +181,9 @@ def delete_meter(module, name, apiid, apikey):
if meter_id is None:
return 1, "Meter does not exist, so can't delete it"
else:
- action = "delete"
- request = http_request(name, apiid, apikey, action, meter_id)
- # See http://stackoverflow.com/questions/4511598/how-to-make-http-delete-method-using-urllib2
- # urllib2 only does GET or POST I believe, but here we need delete
- request.get_method = lambda: 'DELETE'
-
- try:
- result = urllib2.urlopen(request)
- except urllib2.URLError, e:
- module.fail_json("Failed to connect to api host to delete meter")
+ response, info = http_request(module, name, apiid, apikey, action, meter_id)
+ if info['status'] != 200:
+ module.fail_json("Failed to delete meter")
# Each new meter gets a new key.pem and ca.pem file, so they should be deleted
types = ['cert', 'key']
@@ -214,17 +202,14 @@ def download_request(module, name, apiid, apikey, cert_type):
if meter_id is not None:
action = "certificates"
- request = http_request(name, apiid, apikey, action, meter_id, cert_type)
-
- try:
- result = urllib2.urlopen(request)
- except urllib2.URLError, e:
+ response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type)
+ if info['status'] != 200:
module.fail_json("Failed to connect to api host to download certificate")
if result:
try:
cert_file_path = '%s/%s.pem' % (config_directory,cert_type)
- body = result.read()
+ body = response.read()
cert_file = open(cert_file_path, 'w')
cert_file.write(body)
cert_file.close
@@ -238,15 +223,13 @@ def download_request(module, name, apiid, apikey, cert_type):
def main():
- if not HAS_URLLIB2:
- module.fail_json(msg="urllib2 is not installed")
-
module = AnsibleModule(
argument_spec=dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=False),
apikey=dict(required=True),
apiid=dict(required=True),
+ validate_certs = dict(default='yes', type='bool'),
)
)
@@ -268,5 +251,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
main()
diff --git a/monitoring/datadog_event b/monitoring/datadog_event
index 629e86e98ab..5d38dd4c31d 100644
--- a/monitoring/datadog_event
+++ b/monitoring/datadog_event
@@ -54,6 +54,14 @@ options:
description: ["An arbitrary string to use for aggregation."]
required: false
default: null
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
'''
EXAMPLES = '''
@@ -67,7 +75,6 @@ datadog_event: title="Testing from ansible" text="Test!"
'''
import socket
-from urllib2 import urlopen, Request, URLError
def main():
module = AnsibleModule(
@@ -90,15 +97,15 @@ def main():
choices=['nagios', 'hudson', 'jenkins', 'user', 'my apps',
'feed', 'chef', 'puppet', 'git', 'bitbucket', 'fabric',
'capistrano']
- )
+ ),
+ validate_certs = dict(default='yes', type='bool'),
)
)
post_event(module)
def post_event(module):
- uri = "https://app.datadoghq.com/api/v1/events?api_key=" + \
- module.params['api_key']
+ uri = "https://app.datadoghq.com/api/v1/events?api_key=%s" % module.params['api_key']
body = dict(
title=module.params['title'],
@@ -117,22 +124,20 @@ def post_event(module):
json_body = module.jsonify(body)
headers = {"Content-Type": "application/json"}
- request = Request(uri, json_body, headers, unverifiable=True)
- try:
- response = urlopen(request)
+ (response, info) = fetch_url(module, uri, data=json_body, headers=headers)
+ if info['status'] == 200:
response_body = response.read()
response_json = module.from_json(response_body)
if response_json['status'] == 'ok':
module.exit_json(changed=True)
else:
module.fail_json(msg=response)
-
- except URLError, e:
- module.fail_json(msg="URL error: %s." % e)
- except socket.error, e:
- module.fail_json(msg="Socket error: %s to %s" % (e, uri))
+ else:
+ module.fail_json(**info)
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
main()
diff --git a/monitoring/librato_annotation b/monitoring/librato_annotation
new file mode 100644
index 00000000000..63979f41bfb
--- /dev/null
+++ b/monitoring/librato_annotation
@@ -0,0 +1,169 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# (C) Seth Edwards, 2014
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+
+import base64
+
+DOCUMENTATION = '''
+---
+module: librato_annotation
+short_description: create an annotation in librato
+description:
+ - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
+version_added: "1.6"
+author: Seth Edwards
+requirements:
+ - urllib2
+ - base64
+options:
+ user:
+ description:
+ - Librato account username
+ required: true
+ api_key:
+ description:
+ - Librato account api key
+ required: true
+ name:
+ description:
+ - The annotation stream name
+ - If the annotation stream does not exist, it will be created automatically
+ required: false
+ title:
+ description:
+ - The title of an annotation is a string and may contain spaces
+ - The title should be a short, high-level summary of the annotation e.g. v45 Deployment
+ required: true
+ source:
+ description:
+ - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
+ required: false
+ description:
+ description:
+ - The description contains extra meta-data about a particular annotation
+ - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
+ required: false
+ start_time:
+ description:
+ - The unix timestamp indicating the the time at which the event referenced by this annotation started
+ required: false
+ end_time:
+ description:
+ - The unix timestamp indicating the the time at which the event referenced by this annotation ended
+ - For events that have a duration, this is a useful way to annotate the duration of the event
+ required: false
+ links:
+ description:
+ - See examples
+ required: true
+'''
+
+EXAMPLES = '''
+# Create a simple annotation event with a source
+- librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXX
+ title: 'App Config Change'
+ source: 'foo.bar'
+ description: 'This is a detailed description of the config change'
+
+# Create an annotation that includes a link
+- librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: 'code.deploy'
+ title: 'app code deploy'
+ description: 'this is a detailed description of a deployment'
+ links:
+ - { rel: 'example', href: 'http://www.example.com/deploy' }
+
+# Create an annotation with a start_time and end_time
+- librato_annotation:
+ user: user@example.com
+ api_key: XXXXXXXXXXXXXXXXXX
+ name: 'maintenance'
+ title: 'Maintenance window'
+ description: 'This is a detailed description of maintenance'
+ start_time: 1395940006
+ end_time: 1395954406
+'''
+
+
+try:
+ import urllib2
+ HAS_URLLIB2 = True
+except ImportError:
+ HAS_URLLIB2 = False
+
+def post_annotation(module):
+ user = module.params['user']
+ api_key = module.params['api_key']
+ name = module.params['name']
+ title = module.params['title']
+
+ url = 'https://metrics-api.librato.com/v1/annotations/%s' % name
+ params = {}
+ params['title'] = title
+
+ if module.params['source'] != None:
+ params['source'] = module.params['source']
+ if module.params['description'] != None:
+ params['description'] = module.params['description']
+ if module.params['start_time'] != None:
+ params['start_time'] = module.params['start_time']
+ if module.params['end_time'] != None:
+ params['end_time'] = module.params['end_time']
+ if module.params['links'] != None:
+ params['links'] = module.params['links']
+
+ json_body = module.jsonify(params)
+
+ headers = {}
+ headers['Content-Type'] = 'application/json'
+ headers['Authorization'] = b"Basic " + base64.b64encode(user + b":" + api_key).strip()
+ req = urllib2.Request(url, json_body, headers)
+ try:
+ response = urllib2.urlopen(req)
+ except urllib2.HTTPError as e:
+ module.fail_json(msg="Request Failed", reason=e.reason)
+ response = response.read()
+ module.exit_json(changed=True, annotation=response)
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ user = dict(required=True),
+ api_key = dict(required=True),
+ name = dict(required=False),
+ title = dict(required=True),
+ source = dict(required=False),
+ description = dict(required=False),
+ start_time = dict(required=False, default=None, type='int'),
+ end_time = dict(require=False, default=None, type='int'),
+ links = dict(type='list')
+ )
+ )
+
+ post_annotation(module)
+
+from ansible.module_utils.basic import *
+main()
diff --git a/monitoring/logentries b/monitoring/logentries
new file mode 100644
index 00000000000..373f4f777ff
--- /dev/null
+++ b/monitoring/logentries
@@ -0,0 +1,130 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Ivan Vanderbyl
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see .
+
+DOCUMENTATION = '''
+---
+module: logentries
+author: Ivan Vanderbyl
+short_description: Module for tracking logs via logentries.com
+description:
+ - Sends logs to LogEntries in realtime
+version_added: "1.6"
+options:
+ path:
+ description:
+ - path to a log file
+ required: true
+ state:
+ description:
+ - following state of the log
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: present
+notes:
+ - Requires the LogEntries agent which can be installed following the instructions at logentries.com
+'''
+EXAMPLES = '''
+- logentries: path=/var/log/nginx/access.log state=present
+- logentries: path=/var/log/nginx/error.log state=absent
+'''
+
+def query_log_status(module, le_path, path, state="present"):
+ """ Returns whether a log is followed or not. """
+
+ if state == "present":
+ rc, out, err = module.run_command("%s followed %s" % (le_path, path))
+ if rc == 0:
+ return True
+
+ return False
+
+def follow_log(module, le_path, logs):
+ """ Follows one or more logs if not already followed. """
+
+ followed_count = 0
+
+ for log in logs:
+ if query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = module.run_command([le_path, 'follow', log])
+
+ if not query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip()))
+
+ followed_count += 1
+
+ if followed_count > 0:
+ module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,))
+
+ module.exit_json(changed=False, msg="logs(s) already followed")
+
+def unfollow_log(module, le_path, logs):
+ """ Unfollows one or more logs if followed. """
+
+ removed_count = 0
+
+ # Using a for loop incase of error, we can report the package that failed
+ for log in logs:
+ # Query the log first, to see if we even need to remove.
+ if not query_log_status(module, le_path, log):
+ continue
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+ rc, out, err = module.run_command([le_path, 'rm', log])
+
+ if query_log_status(module, le_path, log):
+ module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip()))
+
+ removed_count += 1
+
+ if removed_count > 0:
+ module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
+
+ module.exit_json(changed=False, msg="logs(s) already unfollowed")
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ path = dict(aliases=["name"], required=True),
+ state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"])
+ ),
+ supports_check_mode=True
+ )
+
+ le_path = module.get_bin_path('le', True, ['/usr/local/bin'])
+
+ p = module.params
+
+ # Handle multiple log files
+ logs = p["path"].split(",")
+ logs = filter(None, logs)
+
+ if p["state"] in ["present", "followed"]:
+ follow_log(module, le_path, logs)
+
+ elif p["state"] in ["absent", "unfollowed"]:
+ unfollow_log(module, le_path, logs)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/monitoring/monit b/monitoring/monit
index 32e3e058121..0705b714315 100644
--- a/monitoring/monit
+++ b/monitoring/monit
@@ -47,6 +47,7 @@ EXAMPLES = '''
- monit: name=httpd state=started
'''
+import pipes
def main():
arg_spec = dict(
@@ -67,7 +68,7 @@ def main():
rc, out, err = module.run_command('%s reload' % MONIT)
module.exit_json(changed=True, name=name, state=state)
- rc, out, err = module.run_command('%s summary | grep "Process \'%s\'"' % (MONIT, name))
+ rc, out, err = module.run_command('%s summary | grep "Process \'%s\'"' % (MONIT, pipes.quote(name)), use_unsafe_shell=True)
present = name in out
if not present and not state == 'present':
@@ -78,7 +79,7 @@ def main():
if module.check_mode:
module.exit_json(changed=True)
module.run_command('%s reload' % MONIT, check_rc=True)
- rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, name))
+ rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, pipes.quote(name)), use_unsafe_shell=True)
if name in out:
module.exit_json(changed=True, name=name, state=state)
else:
@@ -86,7 +87,7 @@ def main():
module.exit_json(changed=False, name=name, state=state)
- rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, name))
+ rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, pipes.quote(name)), use_unsafe_shell=True)
running = 'running' in out.lower()
if running and (state == 'started' or state == 'monitored'):
@@ -99,7 +100,7 @@ def main():
if module.check_mode:
module.exit_json(changed=True)
module.run_command('%s stop %s' % (MONIT, name))
- rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, name))
+ rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, pipes.quote(name)), use_unsafe_shell=True)
if 'not monitored' in out.lower() or 'stop pending' in out.lower():
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg=out)
@@ -108,7 +109,8 @@ def main():
if module.check_mode:
module.exit_json(changed=True)
module.run_command('%s unmonitor %s' % (MONIT, name))
- rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, name))
+ # FIXME: DRY FOLKS!
+ rc, out, err = module.run_command('%s summary | grep %s' % (MONIT, pipes.quote(name)), use_unsafe_shell=True)
if 'not monitored' in out.lower():
module.exit_json(changed=True, name=name, state=state)
module.fail_json(msg=out)
diff --git a/monitoring/newrelic_deployment b/monitoring/newrelic_deployment
index de64651969c..93d55832fd3 100644
--- a/monitoring/newrelic_deployment
+++ b/monitoring/newrelic_deployment
@@ -63,6 +63,14 @@ options:
description:
- The environment for this deployment
required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
# informational: requirements for nodes
requirements: [ urllib, urllib2 ]
@@ -75,29 +83,12 @@ EXAMPLES = '''
revision=1.0
'''
-HAS_URLLIB = True
-try:
- import urllib
-except ImportError:
- HAS_URLLIB = False
-
-HAS_URLLIB2 = True
-try:
- import urllib2
-except ImportError:
- HAS_URLLIB2 = False
-
# ===========================================
# Module execution.
#
def main():
- if not HAS_URLLIB:
- module.fail_json(msg="urllib is not installed")
- if not HAS_URLLIB2:
- module.fail_json(msg="urllib2 is not installed")
-
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
@@ -109,6 +100,7 @@ def main():
user=dict(required=False),
appname=dict(required=False),
environment=dict(required=False),
+ validate_certs = dict(default='yes', type='bool'),
),
supports_check_mode=True
)
@@ -134,29 +126,20 @@ def main():
module.exit_json(changed=True)
# Send the data to NewRelic
- try:
- req = urllib2.Request("https://rpm.newrelic.com/deployments.xml", urllib.urlencode(params))
- req.add_header('x-api-key',module.params["token"])
- result=urllib2.urlopen(req)
- # urlopen behaves differently in python 2.4 and 2.6 so we handle
- # both cases here. In python 2.4 it throws an exception if the
- # return code is anything other than a 200. In python 2.6 it
- # doesn't throw an exception for any 2xx return codes. In both
- # cases we expect newrelic should return a 201 on success. So
- # to handle both cases, both the except & else cases below are
- # effectively identical.
- except Exception, e:
- if e.code == 201:
- module.exit_json(changed=True)
- else:
- module.fail_json(msg="unable to update newrelic: %s" % e)
+ url = "https://rpm.newrelic.com/deployments.xml"
+ data = urllib.urlencode(params)
+ headers = {
+ 'x-api-key': module.params["token"],
+ }
+ response, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] in (200, 201):
+ module.exit_json(changed=True)
else:
- if result.code == 201:
- module.exit_json(changed=True)
- else:
- module.fail_json(msg="result code: %d" % result.code)
+ module.fail_json(msg="unable to update newrelic: %s" % info['msg'])
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
main()
diff --git a/monitoring/pagerduty b/monitoring/pagerduty
index d2f630ae82a..90771a818bd 100644
--- a/monitoring/pagerduty
+++ b/monitoring/pagerduty
@@ -85,6 +85,15 @@ options:
default: Created by Ansible
choices: []
aliases: []
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+
notes:
- This module does not yet have support to end maintenance windows.
'''
@@ -124,9 +133,15 @@ EXAMPLES='''
import json
import datetime
-import urllib2
import base64
+def auth_header(user, passwd, token):
+ if token:
+ return "Token token=%s" % token
+
+ auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
+ return "Basic %s" % auth
+
def create_req(url, data, name, user, passwd, token):
req = urllib2.Request(url, data)
if token:
@@ -134,39 +149,42 @@ def create_req(url, data, name, user, passwd, token):
else:
auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
req.add_header("Authorization", "Basic %s" % auth)
-
return req
-def ongoing(name, user, passwd, token):
+def ongoing(module, name, user, passwd, token):
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing"
- req = create_req(url, None, name, user, passwd, token)
- res = urllib2.urlopen(req)
- out = res.read()
+ headers = {"Authorization": auth_header(user, passwd, token)}
- return False, out
+ response, info = fetch_url(module, url, headers=headers)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg'])
+ return False, response.read()
-def create(name, user, passwd, token, requester_id, service, hours, minutes, desc):
+def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc):
now = datetime.datetime.utcnow()
later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes))
start = now.strftime("%Y-%m-%dT%H:%M:%SZ")
end = later.strftime("%Y-%m-%dT%H:%M:%SZ")
url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows"
+ auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
+ headers = {
+ 'Authorization': auth_header(user, passwd, token),
+ 'Content-Type' : 'application/json',
+ }
request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': [service]}}
if requester_id:
request_data['requester_id'] = requester_id
data = json.dumps(request_data)
- req = create_req(url, data, name, user, passwd, token)
- req.add_header('Content-Type', 'application/json')
+ response, info = fetch_url(module, url, data=data, headers=headers, method='POST')
+ if info['status'] != 200:
+ module.fail_json(msg="failed to create the window: %s" % info['msg'])
- res = urllib2.urlopen(req)
- out = res.read()
-
- return False, out
+ return False, response.read()
def main():
@@ -182,7 +200,8 @@ def main():
requester_id=dict(required=False),
hours=dict(default='1', required=False),
minutes=dict(default='0', required=False),
- desc=dict(default='Created by Ansible', required=False)
+ desc=dict(default='Created by Ansible', required=False),
+ validate_certs = dict(default='yes', type='bool'),
)
)
@@ -204,10 +223,10 @@ def main():
if state == "running" or state == "started":
if not service:
module.fail_json(msg="service not specified")
- (rc, out) = create(name, user, passwd, token, requester_id, service, hours, minutes, desc)
+ (rc, out) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc)
if state == "ongoing":
- (rc, out) = ongoing(name, user, passwd, token)
+ (rc, out) = ongoing(module, name, user, passwd, token)
if rc != 0:
module.fail_json(msg="failed", result=out)
@@ -216,4 +235,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
main()
diff --git a/monitoring/rollbar_deployment b/monitoring/rollbar_deployment
new file mode 100644
index 00000000000..772e78fc5c2
--- /dev/null
+++ b/monitoring/rollbar_deployment
@@ -0,0 +1,133 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright 2014, Max Riveiro,
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+DOCUMENTATION = '''
+---
+module: rollbar_deployment
+version_added: 1.6
+author: Max Riveiro
+short_description: Notify Rollbar about app deployments
+description:
+ - Notify Rollbar about app deployments
+ (see https://rollbar.com/docs/deploys_other/)
+options:
+ token:
+ description:
+ - Your project access token.
+ required: true
+ environment:
+ description:
+ - Name of the environment being deployed, e.g. 'production'.
+ required: true
+ revision:
+ description:
+ - Revision number/sha being deployed.
+ required: true
+ user:
+ description:
+ - User who deployed.
+ required: false
+ rollbar_user:
+ description:
+ - Rollbar username of the user who deployed.
+ required: false
+ comment:
+ description:
+ - Deploy comment (e.g. what is being deployed).
+ required: false
+ url:
+ description:
+ - Optional URL to submit the notification to.
+ required: false
+ default: 'https://api.rollbar.com/api/1/deploy/'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated.
+ This should only be used on personally controlled sites using
+ self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+'''
+
+EXAMPLES = '''
+- rollbar_deployment: token=AAAAAA
+ environment='staging'
+ user='ansible'
+ revision=4.2,
+ rollbar_user='admin',
+ comment='Test Deploy'
+'''
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ token=dict(required=True),
+ environment=dict(required=True),
+ revision=dict(required=True),
+ user=dict(required=False),
+ rollbar_user=dict(required=False),
+ comment=dict(required=False),
+ url=dict(
+ required=False,
+ default='https://api.rollbar.com/api/1/deploy/'
+ ),
+ validate_certs=dict(default='yes', type='bool'),
+ ),
+ supports_check_mode=True
+ )
+
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ params = dict(
+ access_token=module.params['token'],
+ environment=module.params['environment'],
+ revision=module.params['revision']
+ )
+
+ if module.params['user']:
+ params['local_username'] = module.params['user']
+
+ if module.params['rollbar_user']:
+ params['rollbar_username'] = module.params['rollbar_user']
+
+ if module.params['comment']:
+ params['comment'] = module.params['comment']
+
+ url = module.params.get('url')
+
+ try:
+ data = urllib.urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ except Exception, e:
+ module.fail_json(msg='Unable to notify Rollbar: %s' % e)
+ else:
+ if info['status'] == 200:
+ module.exit_json(changed=True)
+ else:
+ module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url))
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
diff --git a/net_infrastructure/bigip_facts b/net_infrastructure/bigip_facts
new file mode 100644
index 00000000000..3a7a4533f69
--- /dev/null
+++ b/net_infrastructure/bigip_facts
@@ -0,0 +1,1670 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Matt Hite
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+DOCUMENTATION = '''
+---
+module: bigip_facts
+short_description: "Collect facts from F5 BIG-IP devices"
+description:
+ - "Collect facts from F5 BIG-IP devices via iControl SOAP API"
+version_added: "1.6"
+author: Matt Hite
+notes:
+ - "Requires BIG-IP software version >= 11.4"
+ - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
+ - "Best run as a local_action in your playbook"
+ - "Tested with manager and above account privilege level"
+
+requirements:
+ - bigsuds
+options:
+ server:
+ description:
+ - BIG-IP host
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ user:
+ description:
+ - BIG-IP username
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ password:
+ description:
+ - BIG-IP password
+ required: true
+ default: null
+ choices: []
+ aliases: []
+ session:
+ description:
+ - BIG-IP session support; may be useful to avoid concurrency
+ issues in certain circumstances.
+ required: false
+ default: true
+ choices: []
+ aliases: []
+ include:
+ description:
+ - Fact category or list of categories to collect
+ required: true
+ default: null
+ choices: ['address_class', 'certificate', 'client_ssl_profile',
+ 'device_group', 'interface', 'key', 'node', 'pool', 'rule',
+ 'self_ip', 'software', 'system_info', 'traffic_group',
+ 'trunk', 'virtual_address', 'virtual_server', 'vlan']
+ aliases: []
+ filter:
+ description:
+ - Shell-style glob matching string used to filter fact keys. Not
+ applicable for software and system_info fact categories.
+ required: false
+ default: null
+ choices: []
+ aliases: []
+'''
+
+EXAMPLES = '''
+
+## playbook task examples:
+
+---
+# file bigip-test.yml
+# ...
+- hosts: bigip-test
+ tasks:
+ - name: Collect BIG-IP facts
+ local_action: >
+ bigip_facts
+ server=lb.mydomain.com
+ user=admin
+ password=mysecret
+ include=interface,vlan
+
+'''
+
+try:
+ import bigsuds
+except ImportError:
+ bigsuds_found = False
+else:
+ bigsuds_found = True
+
+import fnmatch
+import traceback
+import re
+from suds import MethodNotFound
+
+# ===========================================
+# bigip_facts module specific support methods.
+#
+
+class F5(object):
+ """F5 iControl class.
+
+ F5 BIG-IP iControl API class.
+
+ Attributes:
+ api: iControl API instance.
+ """
+
+ def __init__(self, host, user, password, session=False):
+ self.api = bigsuds.BIGIP(hostname=host, username=user, password=password)
+ if session:
+ self.start_session()
+
+ def start_session(self):
+ self.api = self.api.with_session_id()
+
+ def get_api(self):
+ return self.api
+
+ def set_recursive_query_state(self, state):
+ self.api.System.Session.set_recursive_query_state(state)
+
+ def get_recursive_query_state(self):
+ return self.api.System.Session.get_recursive_query_state()
+
+ def enable_recursive_query_state(self):
+ self.set_recursive_query_state('STATE_ENABLED')
+
+ def disable_recursive_query_state(self):
+ self.set_recursive_query_state('STATE_DISABLED')
+
+ def set_active_folder(self, folder):
+ self.api.System.Session.set_active_folder(folder=folder)
+
+ def get_active_folder(self):
+ return self.api.System.Session.get_active_folder()
+
+
+class Interfaces(object):
+ """Interfaces class.
+
+ F5 BIG-IP interfaces class.
+
+ Attributes:
+ api: iControl API instance.
+ interfaces: A list of BIG-IP interface names.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.interfaces = api.Networking.Interfaces.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.interfaces = filter(re_filter.search, self.interfaces)
+
+ def get_list(self):
+ return self.interfaces
+
+ def get_active_media(self):
+ return self.api.Networking.Interfaces.get_active_media(self.interfaces)
+
+ def get_actual_flow_control(self):
+ return self.api.Networking.Interfaces.get_actual_flow_control(self.interfaces)
+
+ def get_bundle_state(self):
+ return self.api.Networking.Interfaces.get_bundle_state(self.interfaces)
+
+ def get_description(self):
+ return self.api.Networking.Interfaces.get_description(self.interfaces)
+
+ def get_dual_media_state(self):
+ return self.api.Networking.Interfaces.get_dual_media_state(self.interfaces)
+
+ def get_enabled_state(self):
+ return self.api.Networking.Interfaces.get_enabled_state(self.interfaces)
+
+ def get_if_index(self):
+ return self.api.Networking.Interfaces.get_if_index(self.interfaces)
+
+ def get_learning_mode(self):
+ return self.api.Networking.Interfaces.get_learning_mode(self.interfaces)
+
+ def get_lldp_admin_status(self):
+ return self.api.Networking.Interfaces.get_lldp_admin_status(self.interfaces)
+
+ def get_lldp_tlvmap(self):
+ return self.api.Networking.Interfaces.get_lldp_tlvmap(self.interfaces)
+
+ def get_mac_address(self):
+ return self.api.Networking.Interfaces.get_mac_address(self.interfaces)
+
+ def get_media(self):
+ return self.api.Networking.Interfaces.get_media(self.interfaces)
+
+ def get_media_option(self):
+ return self.api.Networking.Interfaces.get_media_option(self.interfaces)
+
+ def get_media_option_sfp(self):
+ return self.api.Networking.Interfaces.get_media_option_sfp(self.interfaces)
+
+ def get_media_sfp(self):
+ return self.api.Networking.Interfaces.get_media_sfp(self.interfaces)
+
+ def get_media_speed(self):
+ return self.api.Networking.Interfaces.get_media_speed(self.interfaces)
+
+ def get_media_status(self):
+ return self.api.Networking.Interfaces.get_media_status(self.interfaces)
+
+ def get_mtu(self):
+ return self.api.Networking.Interfaces.get_mtu(self.interfaces)
+
+ def get_phy_master_slave_mode(self):
+ return self.api.Networking.Interfaces.get_phy_master_slave_mode(self.interfaces)
+
+ def get_prefer_sfp_state(self):
+ return self.api.Networking.Interfaces.get_prefer_sfp_state(self.interfaces)
+
+ def get_flow_control(self):
+ return self.api.Networking.Interfaces.get_requested_flow_control(self.interfaces)
+
+ def get_sflow_poll_interval(self):
+ return self.api.Networking.Interfaces.get_sflow_poll_interval(self.interfaces)
+
+ def get_sflow_poll_interval_global(self):
+ return self.api.Networking.Interfaces.get_sflow_poll_interval_global(self.interfaces)
+
+ def get_sfp_media_state(self):
+ return self.api.Networking.Interfaces.get_sfp_media_state(self.interfaces)
+
+ def get_stp_active_edge_port_state(self):
+ return self.api.Networking.Interfaces.get_stp_active_edge_port_state(self.interfaces)
+
+ def get_stp_enabled_state(self):
+ return self.api.Networking.Interfaces.get_stp_enabled_state(self.interfaces)
+
+ def get_stp_link_type(self):
+ return self.api.Networking.Interfaces.get_stp_link_type(self.interfaces)
+
+ def get_stp_protocol_detection_reset_state(self):
+ return self.api.Networking.Interfaces.get_stp_protocol_detection_reset_state(self.interfaces)
+
+
+class SelfIPs(object):
+ """Self IPs class.
+
+ F5 BIG-IP Self IPs class.
+
+ Attributes:
+ api: iControl API instance.
+ self_ips: List of self IPs.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.self_ips = api.Networking.SelfIPV2.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.self_ips = filter(re_filter.search, self.self_ips)
+
+ def get_list(self):
+ return self.self_ips
+
+ def get_address(self):
+ return self.api.Networking.SelfIPV2.get_address(self.self_ips)
+
+ def get_allow_access_list(self):
+ return self.api.Networking.SelfIPV2.get_allow_access_list(self.self_ips)
+
+ def get_description(self):
+ return self.api.Networking.SelfIPV2.get_description(self.self_ips)
+
+ def get_enforced_firewall_policy(self):
+ return self.api.Networking.SelfIPV2.get_enforced_firewall_policy(self.self_ips)
+
+ def get_floating_state(self):
+ return self.api.Networking.SelfIPV2.get_floating_state(self.self_ips)
+
+ def get_fw_rule(self):
+ return self.api.Networking.SelfIPV2.get_fw_rule(self.self_ips)
+
+ def get_netmask(self):
+ return self.api.Networking.SelfIPV2.get_netmask(self.self_ips)
+
+ def get_staged_firewall_policy(self):
+ return self.api.Networking.SelfIPV2.get_staged_firewall_policy(self.self_ips)
+
+ def get_traffic_group(self):
+ return self.api.Networking.SelfIPV2.get_traffic_group(self.self_ips)
+
+ def get_vlan(self):
+ return self.api.Networking.SelfIPV2.get_vlan(self.self_ips)
+
+ def get_is_traffic_group_inherited(self):
+ return self.api.Networking.SelfIPV2.is_traffic_group_inherited(self.self_ips)
+
+
+class Trunks(object):
+ """Trunks class.
+
+ F5 BIG-IP trunks class.
+
+ Attributes:
+ api: iControl API instance.
+ trunks: List of trunks.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.trunks = api.Networking.Trunk.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.trunks = filter(re_filter.search, self.trunks)
+
+ def get_list(self):
+ return self.trunks
+
+ def get_active_lacp_state(self):
+ return self.api.Networking.Trunk.get_active_lacp_state(self.trunks)
+
+ def get_configured_member_count(self):
+ return self.api.Networking.Trunk.get_configured_member_count(self.trunks)
+
+ def get_description(self):
+ return self.api.Networking.Trunk.get_description(self.trunks)
+
+ def get_distribution_hash_option(self):
+ return self.api.Networking.Trunk.get_distribution_hash_option(self.trunks)
+
+ def get_interface(self):
+ return self.api.Networking.Trunk.get_interface(self.trunks)
+
+ def get_lacp_enabled_state(self):
+ return self.api.Networking.Trunk.get_lacp_enabled_state(self.trunks)
+
+ def get_lacp_timeout_option(self):
+ return self.api.Networking.Trunk.get_lacp_timeout_option(self.trunks)
+
+ def get_link_selection_policy(self):
+ return self.api.Networking.Trunk.get_link_selection_policy(self.trunks)
+
+ def get_media_speed(self):
+ return self.api.Networking.Trunk.get_media_speed(self.trunks)
+
+ def get_media_status(self):
+ return self.api.Networking.Trunk.get_media_status(self.trunks)
+
+ def get_operational_member_count(self):
+ return self.api.Networking.Trunk.get_operational_member_count(self.trunks)
+
+ def get_stp_enabled_state(self):
+ return self.api.Networking.Trunk.get_stp_enabled_state(self.trunks)
+
+ def get_stp_protocol_detection_reset_state(self):
+ return self.api.Networking.Trunk.get_stp_protocol_detection_reset_state(self.trunks)
+
+
+class Vlans(object):
+ """Vlans class.
+
+ F5 BIG-IP Vlans class.
+
+ Attributes:
+ api: iControl API instance.
+ vlans: List of VLANs.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.vlans = api.Networking.VLAN.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.vlans = filter(re_filter.search, self.vlans)
+
+ def get_list(self):
+ return self.vlans
+
+ def get_auto_lasthop(self):
+ return self.api.Networking.VLAN.get_auto_lasthop(self.vlans)
+
+ def get_cmp_hash_algorithm(self):
+ return self.api.Networking.VLAN.get_cmp_hash_algorithm(self.vlans)
+
+ def get_description(self):
+ return self.api.Networking.VLAN.get_description(self.vlans)
+
+ def get_dynamic_forwarding(self):
+ return self.api.Networking.VLAN.get_dynamic_forwarding(self.vlans)
+
+ def get_failsafe_action(self):
+ return self.api.Networking.VLAN.get_failsafe_action(self.vlans)
+
+ def get_failsafe_state(self):
+ return self.api.Networking.VLAN.get_failsafe_state(self.vlans)
+
+ def get_failsafe_timeout(self):
+ return self.api.Networking.VLAN.get_failsafe_timeout(self.vlans)
+
+ def get_if_index(self):
+ return self.api.Networking.VLAN.get_if_index(self.vlans)
+
+ def get_learning_mode(self):
+ return self.api.Networking.VLAN.get_learning_mode(self.vlans)
+
+ def get_mac_masquerade_address(self):
+ return self.api.Networking.VLAN.get_mac_masquerade_address(self.vlans)
+
+ def get_member(self):
+ return self.api.Networking.VLAN.get_member(self.vlans)
+
+ def get_mtu(self):
+ return self.api.Networking.VLAN.get_mtu(self.vlans)
+
+ def get_sflow_poll_interval(self):
+ return self.api.Networking.VLAN.get_sflow_poll_interval(self.vlans)
+
+ def get_sflow_poll_interval_global(self):
+ return self.api.Networking.VLAN.get_sflow_poll_interval_global(self.vlans)
+
+ def get_sflow_sampling_rate(self):
+ return self.api.Networking.VLAN.get_sflow_sampling_rate(self.vlans)
+
+ def get_sflow_sampling_rate_global(self):
+ return self.api.Networking.VLAN.get_sflow_sampling_rate_global(self.vlans)
+
+ def get_source_check_state(self):
+ return self.api.Networking.VLAN.get_source_check_state(self.vlans)
+
+ def get_true_mac_address(self):
+ return self.api.Networking.VLAN.get_true_mac_address(self.vlans)
+
+ def get_vlan_id(self):
+ return self.api.Networking.VLAN.get_vlan_id(self.vlans)
+
+
+class Software(object):
+ """Software class.
+
+ F5 BIG-IP software class.
+
+ Attributes:
+ api: iControl API instance.
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def get_all_software_status(self):
+ return self.api.System.SoftwareManagement.get_all_software_status()
+
+
+class VirtualServers(object):
+ """Virtual servers class.
+
+ F5 BIG-IP virtual servers class.
+
+ Attributes:
+ api: iControl API instance.
+ virtual_servers: List of virtual servers.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.virtual_servers = api.LocalLB.VirtualServer.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.virtual_servers = filter(re_filter.search, self.virtual_servers)
+
+ def get_list(self):
+ return self.virtual_servers
+
+ def get_actual_hardware_acceleration(self):
+ return self.api.LocalLB.VirtualServer.get_actual_hardware_acceleration(self.virtual_servers)
+
+ def get_authentication_profile(self):
+ return self.api.LocalLB.VirtualServer.get_authentication_profile(self.virtual_servers)
+
+ def get_auto_lasthop(self):
+ return self.api.LocalLB.VirtualServer.get_auto_lasthop(self.virtual_servers)
+
+ def get_bw_controller_policy(self):
+ return self.api.LocalLB.VirtualServer.get_bw_controller_policy(self.virtual_servers)
+
+ def get_clone_pool(self):
+ return self.api.LocalLB.VirtualServer.get_clone_pool(self.virtual_servers)
+
+ def get_cmp_enable_mode(self):
+ return self.api.LocalLB.VirtualServer.get_cmp_enable_mode(self.virtual_servers)
+
+ def get_connection_limit(self):
+ return self.api.LocalLB.VirtualServer.get_connection_limit(self.virtual_servers)
+
+ def get_connection_mirror_state(self):
+ return self.api.LocalLB.VirtualServer.get_connection_mirror_state(self.virtual_servers)
+
+ def get_default_pool_name(self):
+ return self.api.LocalLB.VirtualServer.get_default_pool_name(self.virtual_servers)
+
+ def get_description(self):
+ return self.api.LocalLB.VirtualServer.get_description(self.virtual_servers)
+
+ def get_destination(self):
+ return self.api.LocalLB.VirtualServer.get_destination_v2(self.virtual_servers)
+
+ def get_enabled_state(self):
+ return self.api.LocalLB.VirtualServer.get_enabled_state(self.virtual_servers)
+
+ def get_enforced_firewall_policy(self):
+ return self.api.LocalLB.VirtualServer.get_enforced_firewall_policy(self.virtual_servers)
+
+ def get_fallback_persistence_profile(self):
+ return self.api.LocalLB.VirtualServer.get_fallback_persistence_profile(self.virtual_servers)
+
+ def get_fw_rule(self):
+ return self.api.LocalLB.VirtualServer.get_fw_rule(self.virtual_servers)
+
+ def get_gtm_score(self):
+ return self.api.LocalLB.VirtualServer.get_gtm_score(self.virtual_servers)
+
+ def get_last_hop_pool(self):
+ return self.api.LocalLB.VirtualServer.get_last_hop_pool(self.virtual_servers)
+
+ def get_nat64_state(self):
+ return self.api.LocalLB.VirtualServer.get_nat64_state(self.virtual_servers)
+
+ def get_object_status(self):
+ return self.api.LocalLB.VirtualServer.get_object_status(self.virtual_servers)
+
+ def get_persistence_profile(self):
+ return self.api.LocalLB.VirtualServer.get_persistence_profile(self.virtual_servers)
+
+ def get_profile(self):
+ return self.api.LocalLB.VirtualServer.get_profile(self.virtual_servers)
+
+ def get_protocol(self):
+ return self.api.LocalLB.VirtualServer.get_protocol(self.virtual_servers)
+
+ def get_rate_class(self):
+ return self.api.LocalLB.VirtualServer.get_rate_class(self.virtual_servers)
+
+ def get_rate_limit(self):
+ return self.api.LocalLB.VirtualServer.get_rate_limit(self.virtual_servers)
+
+ def get_rate_limit_destination_mask(self):
+ return self.api.LocalLB.VirtualServer.get_rate_limit_destination_mask(self.virtual_servers)
+
+ def get_rate_limit_mode(self):
+ return self.api.LocalLB.VirtualServer.get_rate_limit_mode(self.virtual_servers)
+
+ def get_rate_limit_source_mask(self):
+ return self.api.LocalLB.VirtualServer.get_rate_limit_source_mask(self.virtual_servers)
+
+ def get_related_rule(self):
+ return self.api.LocalLB.VirtualServer.get_related_rule(self.virtual_servers)
+
+ def get_rule(self):
+ return self.api.LocalLB.VirtualServer.get_rule(self.virtual_servers)
+
+ def get_security_log_profile(self):
+ return self.api.LocalLB.VirtualServer.get_security_log_profile(self.virtual_servers)
+
+ def get_snat_pool(self):
+ return self.api.LocalLB.VirtualServer.get_snat_pool(self.virtual_servers)
+
+ def get_snat_type(self):
+ return self.api.LocalLB.VirtualServer.get_snat_type(self.virtual_servers)
+
+ def get_source_address(self):
+ return self.api.LocalLB.VirtualServer.get_source_address(self.virtual_servers)
+
+ def get_source_address_translation_lsn_pool(self):
+ return self.api.LocalLB.VirtualServer.get_source_address_translation_lsn_pool(self.virtual_servers)
+
+ def get_source_address_translation_snat_pool(self):
+ return self.api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(self.virtual_servers)
+
+ def get_source_address_translation_type(self):
+ return self.api.LocalLB.VirtualServer.get_source_address_translation_type(self.virtual_servers)
+
+ def get_source_port_behavior(self):
+ return self.api.LocalLB.VirtualServer.get_source_port_behavior(self.virtual_servers)
+
+ def get_staged_firewall_policy(self):
+ return self.api.LocalLB.VirtualServer.get_staged_firewall_policy(self.virtual_servers)
+
+ def get_translate_address_state(self):
+ return self.api.LocalLB.VirtualServer.get_translate_address_state(self.virtual_servers)
+
+ def get_translate_port_state(self):
+ return self.api.LocalLB.VirtualServer.get_translate_port_state(self.virtual_servers)
+
+ def get_type(self):
+ return self.api.LocalLB.VirtualServer.get_type(self.virtual_servers)
+
+ def get_vlan(self):
+ return self.api.LocalLB.VirtualServer.get_vlan(self.virtual_servers)
+
+ def get_wildmask(self):
+ return self.api.LocalLB.VirtualServer.get_wildmask(self.virtual_servers)
+
+
+class Pools(object):
+ """Pools class.
+
+ F5 BIG-IP pools class.
+
+ Attributes:
+ api: iControl API instance.
+ pool_names: List of pool names.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.pool_names = api.LocalLB.Pool.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.pool_names = filter(re_filter.search, self.pool_names)
+
+ def get_list(self):
+ return self.pool_names
+
+ def get_action_on_service_down(self):
+ return self.api.LocalLB.Pool.get_action_on_service_down(self.pool_names)
+
+ def get_active_member_count(self):
+ return self.api.LocalLB.Pool.get_active_member_count(self.pool_names)
+
+ def get_aggregate_dynamic_ratio(self):
+ return self.api.LocalLB.Pool.get_aggregate_dynamic_ratio(self.pool_names)
+
+ def get_allow_nat_state(self):
+ return self.api.LocalLB.Pool.get_allow_nat_state(self.pool_names)
+
+ def get_allow_snat_state(self):
+ return self.api.LocalLB.Pool.get_allow_snat_state(self.pool_names)
+
+ def get_client_ip_tos(self):
+ return self.api.LocalLB.Pool.get_client_ip_tos(self.pool_names)
+
+ def get_client_link_qos(self):
+ return self.api.LocalLB.Pool.get_client_link_qos(self.pool_names)
+
+ def get_description(self):
+ return self.api.LocalLB.Pool.get_description(self.pool_names)
+
+ def get_gateway_failsafe_device(self):
+ return self.api.LocalLB.Pool.get_gateway_failsafe_device(self.pool_names)
+
+ def get_ignore_persisted_weight_state(self):
+ return self.api.LocalLB.Pool.get_ignore_persisted_weight_state(self.pool_names)
+
+ def get_lb_method(self):
+ return self.api.LocalLB.Pool.get_lb_method(self.pool_names)
+
+ def get_member(self):
+ return self.api.LocalLB.Pool.get_member_v2(self.pool_names)
+
+ def get_minimum_active_member(self):
+ return self.api.LocalLB.Pool.get_minimum_active_member(self.pool_names)
+
+ def get_minimum_up_member(self):
+ return self.api.LocalLB.Pool.get_minimum_up_member(self.pool_names)
+
+ def get_minimum_up_member_action(self):
+ return self.api.LocalLB.Pool.get_minimum_up_member_action(self.pool_names)
+
+ def get_minimum_up_member_enabled_state(self):
+ return self.api.LocalLB.Pool.get_minimum_up_member_enabled_state(self.pool_names)
+
+ def get_monitor_association(self):
+ return self.api.LocalLB.Pool.get_monitor_association(self.pool_names)
+
+ def get_monitor_instance(self):
+ return self.api.LocalLB.Pool.get_monitor_instance(self.pool_names)
+
+ def get_object_status(self):
+ return self.api.LocalLB.Pool.get_object_status(self.pool_names)
+
+ def get_profile(self):
+ return self.api.LocalLB.Pool.get_profile(self.pool_names)
+
+ def get_queue_depth_limit(self):
+ return self.api.LocalLB.Pool.get_queue_depth_limit(self.pool_names)
+
+ def get_queue_on_connection_limit_state(self):
+ return self.api.LocalLB.Pool.get_queue_on_connection_limit_state(self.pool_names)
+
+ def get_queue_time_limit(self):
+ return self.api.LocalLB.Pool.get_queue_time_limit(self.pool_names)
+
+ def get_reselect_tries(self):
+ return self.api.LocalLB.Pool.get_reselect_tries(self.pool_names)
+
+ def get_server_ip_tos(self):
+ return self.api.LocalLB.Pool.get_server_ip_tos(self.pool_names)
+
+ def get_server_link_qos(self):
+ return self.api.LocalLB.Pool.get_server_link_qos(self.pool_names)
+
+ def get_simple_timeout(self):
+ return self.api.LocalLB.Pool.get_simple_timeout(self.pool_names)
+
+ def get_slow_ramp_time(self):
+ return self.api.LocalLB.Pool.get_slow_ramp_time(self.pool_names)
+
+
+class Devices(object):
+ """Devices class.
+
+ F5 BIG-IP devices class.
+
+ Attributes:
+ api: iControl API instance.
+ devices: List of devices.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.devices = api.Management.Device.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.devices = filter(re_filter.search, self.devices)
+
+ def get_list(self):
+ return self.devices
+
+ def get_active_modules(self):
+ return self.api.Management.Device.get_active_modules(self.devices)
+
+ def get_base_mac_address(self):
+ return self.api.Management.Device.get_base_mac_address(self.devices)
+
+ def get_blade_addresses(self):
+ return self.api.Management.Device.get_blade_addresses(self.devices)
+
+ def get_build(self):
+ return self.api.Management.Device.get_build(self.devices)
+
+ def get_chassis_id(self):
+ return self.api.Management.Device.get_chassis_id(self.devices)
+
+ def get_chassis_type(self):
+ return self.api.Management.Device.get_chassis_type(self.devices)
+
+ def get_comment(self):
+ return self.api.Management.Device.get_comment(self.devices)
+
+ def get_configsync_address(self):
+ return self.api.Management.Device.get_configsync_address(self.devices)
+
+ def get_contact(self):
+ return self.api.Management.Device.get_contact(self.devices)
+
+ def get_description(self):
+ return self.api.Management.Device.get_description(self.devices)
+
+ def get_edition(self):
+ return self.api.Management.Device.get_edition(self.devices)
+
+ def get_failover_state(self):
+ return self.api.Management.Device.get_failover_state(self.devices)
+
+ def get_local_device(self):
+ return self.api.Management.Device.get_local_device()
+
+ def get_hostname(self):
+ return self.api.Management.Device.get_hostname(self.devices)
+
+ def get_inactive_modules(self):
+ return self.api.Management.Device.get_inactive_modules(self.devices)
+
+ def get_location(self):
+ return self.api.Management.Device.get_location(self.devices)
+
+ def get_management_address(self):
+ return self.api.Management.Device.get_management_address(self.devices)
+
+ def get_marketing_name(self):
+ return self.api.Management.Device.get_marketing_name(self.devices)
+
+ def get_multicast_address(self):
+ return self.api.Management.Device.get_multicast_address(self.devices)
+
+ def get_optional_modules(self):
+ return self.api.Management.Device.get_optional_modules(self.devices)
+
+ def get_platform_id(self):
+ return self.api.Management.Device.get_platform_id(self.devices)
+
+ def get_primary_mirror_address(self):
+ return self.api.Management.Device.get_primary_mirror_address(self.devices)
+
+ def get_product(self):
+ return self.api.Management.Device.get_product(self.devices)
+
+ def get_secondary_mirror_address(self):
+ return self.api.Management.Device.get_secondary_mirror_address(self.devices)
+
+ def get_software_version(self):
+ return self.api.Management.Device.get_software_version(self.devices)
+
+ def get_timelimited_modules(self):
+ return self.api.Management.Device.get_timelimited_modules(self.devices)
+
+ def get_timezone(self):
+ return self.api.Management.Device.get_timezone(self.devices)
+
+ def get_unicast_addresses(self):
+ return self.api.Management.Device.get_unicast_addresses(self.devices)
+
+
+class DeviceGroups(object):
+ """Device groups class.
+
+ F5 BIG-IP device groups class.
+
+ Attributes:
+ api: iControl API instance.
+ device_groups: List of device groups.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.device_groups = api.Management.DeviceGroup.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.device_groups = filter(re_filter.search, self.device_groups)
+
+ def get_list(self):
+ return self.device_groups
+
+ def get_all_preferred_active(self):
+ return self.api.Management.DeviceGroup.get_all_preferred_active(self.device_groups)
+
+ def get_autosync_enabled_state(self):
+ return self.api.Management.DeviceGroup.get_autosync_enabled_state(self.device_groups)
+
+ def get_description(self):
+ return self.api.Management.DeviceGroup.get_description(self.device_groups)
+
+ def get_device(self):
+ return self.api.Management.DeviceGroup.get_device(self.device_groups)
+
+ def get_full_load_on_sync_state(self):
+ return self.api.Management.DeviceGroup.get_full_load_on_sync_state(self.device_groups)
+
+ def get_incremental_config_sync_size_maximum(self):
+ return self.api.Management.DeviceGroup.get_incremental_config_sync_size_maximum(self.device_groups)
+
+ def get_network_failover_enabled_state(self):
+ return self.api.Management.DeviceGroup.get_network_failover_enabled_state(self.device_groups)
+
+ def get_sync_status(self):
+ return self.api.Management.DeviceGroup.get_sync_status(self.device_groups)
+
+ def get_type(self):
+ return self.api.Management.DeviceGroup.get_type(self.device_groups)
+
+
+class TrafficGroups(object):
+ """Traffic groups class.
+
+ F5 BIG-IP traffic groups class.
+
+ Attributes:
+ api: iControl API instance.
+ traffic_groups: List of traffic groups.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.traffic_groups = api.Management.TrafficGroup.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.traffic_groups = filter(re_filter.search, self.traffic_groups)
+
+ def get_list(self):
+ return self.traffic_groups
+
+ def get_auto_failback_enabled_state(self):
+ return self.api.Management.TrafficGroup.get_auto_failback_enabled_state(self.traffic_groups)
+
+ def get_auto_failback_time(self):
+ return self.api.Management.TrafficGroup.get_auto_failback_time(self.traffic_groups)
+
+ def get_default_device(self):
+ return self.api.Management.TrafficGroup.get_default_device(self.traffic_groups)
+
+ def get_description(self):
+ return self.api.Management.TrafficGroup.get_description(self.traffic_groups)
+
+ def get_ha_load_factor(self):
+ return self.api.Management.TrafficGroup.get_ha_load_factor(self.traffic_groups)
+
+ def get_ha_order(self):
+ return self.api.Management.TrafficGroup.get_ha_order(self.traffic_groups)
+
+ def get_is_floating(self):
+ return self.api.Management.TrafficGroup.get_is_floating(self.traffic_groups)
+
+ def get_mac_masquerade_address(self):
+ return self.api.Management.TrafficGroup.get_mac_masquerade_address(self.traffic_groups)
+
+ def get_unit_id(self):
+ return self.api.Management.TrafficGroup.get_unit_id(self.traffic_groups)
+
+
+class Rules(object):
+ """Rules class.
+
+ F5 BIG-IP iRules class.
+
+ Attributes:
+ api: iControl API instance.
+ rules: List of iRules.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.rules = api.LocalLB.Rule.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.traffic_groups = filter(re_filter.search, self.rules)
+
+ def get_list(self):
+ return self.rules
+
+ def get_description(self):
+ return self.api.LocalLB.Rule.get_description(rule_names=self.rules)
+
+ def get_ignore_vertification(self):
+ return self.api.LocalLB.Rule.get_ignore_vertification(rule_names=self.rules)
+
+ def get_verification_status(self):
+ return self.api.LocalLB.Rule.get_verification_status_v2(rule_names=self.rules)
+
+ def get_definition(self):
+ return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)]
+
+class Nodes(object):
+ """Nodes class.
+
+ F5 BIG-IP nodes class.
+
+ Attributes:
+ api: iControl API instance.
+ nodes: List of nodes.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.nodes = api.LocalLB.NodeAddressV2.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.nodes = filter(re_filter.search, self.nodes)
+
+ def get_list(self):
+ return self.nodes
+
+ def get_address(self):
+ return self.api.LocalLB.NodeAddressV2.get_address(nodes=self.nodes)
+
+ def get_connection_limit(self):
+ return self.api.LocalLB.NodeAddressV2.get_connection_limit(nodes=self.nodes)
+
+ def get_description(self):
+ return self.api.LocalLB.NodeAddressV2.get_description(nodes=self.nodes)
+
+ def get_dynamic_ratio(self):
+ return self.api.LocalLB.NodeAddressV2.get_dynamic_ratio_v2(nodes=self.nodes)
+
+ def get_monitor_instance(self):
+ return self.api.LocalLB.NodeAddressV2.get_monitor_instance(nodes=self.nodes)
+
+ def get_monitor_rule(self):
+ return self.api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=self.nodes)
+
+ def get_monitor_status(self):
+ return self.api.LocalLB.NodeAddressV2.get_monitor_status(nodes=self.nodes)
+
+ def get_object_status(self):
+ return self.api.LocalLB.NodeAddressV2.get_object_status(nodes=self.nodes)
+
+ def get_rate_limit(self):
+ return self.api.LocalLB.NodeAddressV2.get_rate_limit(nodes=self.nodes)
+
+ def get_ratio(self):
+ return self.api.LocalLB.NodeAddressV2.get_ratio(nodes=self.nodes)
+
+ def get_session_status(self):
+ return self.api.LocalLB.NodeAddressV2.get_session_status(nodes=self.nodes)
+
+
+class VirtualAddresses(object):
+ """Virtual addresses class.
+
+ F5 BIG-IP virtual addresses class.
+
+ Attributes:
+ api: iControl API instance.
+ virtual_addresses: List of virtual addresses.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.virtual_addresses = api.LocalLB.VirtualAddressV2.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.virtual_addresses = filter(re_filter.search, self.virtual_addresses)
+
+ def get_list(self):
+ return self.virtual_addresses
+
+ def get_address(self):
+ return self.api.LocalLB.VirtualAddressV2.get_address(self.virtual_addresses)
+
+ def get_arp_state(self):
+ return self.api.LocalLB.VirtualAddressV2.get_arp_state(self.virtual_addresses)
+
+ def get_auto_delete_state(self):
+ return self.api.LocalLB.VirtualAddressV2.get_auto_delete_state(self.virtual_addresses)
+
+ def get_connection_limit(self):
+ return self.api.LocalLB.VirtualAddressV2.get_connection_limit(self.virtual_addresses)
+
+ def get_description(self):
+ return self.api.LocalLB.VirtualAddressV2.get_description(self.virtual_addresses)
+
+ def get_enabled_state(self):
+ return self.api.LocalLB.VirtualAddressV2.get_enabled_state(self.virtual_addresses)
+
+ def get_icmp_echo_state(self):
+ return self.api.LocalLB.VirtualAddressV2.get_icmp_echo_state(self.virtual_addresses)
+
+ def get_is_floating_state(self):
+ return self.api.LocalLB.VirtualAddressV2.get_is_floating_state(self.virtual_addresses)
+
+ def get_netmask(self):
+ return self.api.LocalLB.VirtualAddressV2.get_netmask(self.virtual_addresses)
+
+ def get_object_status(self):
+ return self.api.LocalLB.VirtualAddressV2.get_object_status(self.virtual_addresses)
+
+ def get_route_advertisement_state(self):
+ return self.api.LocalLB.VirtualAddressV2.get_route_advertisement_state(self.virtual_addresses)
+
+ def get_traffic_group(self):
+ return self.api.LocalLB.VirtualAddressV2.get_traffic_group(self.virtual_addresses)
+
+
+class AddressClasses(object):
+ """Address group/class class.
+
+ F5 BIG-IP address group/class class.
+
+ Attributes:
+ api: iControl API instance.
+ address_classes: List of address classes.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.address_classes = api.LocalLB.Class.get_address_class_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.address_classes = filter(re_filter.search, self.address_classes)
+
+ def get_list(self):
+ return self.address_classes
+
+ def get_address_class(self):
+ key = self.api.LocalLB.Class.get_address_class(self.address_classes)
+ value = self.api.LocalLB.Class.get_address_class_member_data_value(key)
+ result = map(zip, [x['members'] for x in key], value)
+ return result
+
+ def get_description(self):
+ return self.api.LocalLB.Class.get_description(self.address_classes)
+
+
+class Certificates(object):
+ """Certificates class.
+
+ F5 BIG-IP certificates class.
+
+ Attributes:
+ api: iControl API instance.
+ certificates: List of certificate identifiers.
+ certificate_list: List of certificate information structures.
+ """
+
+ def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"):
+ self.api = api
+ self.certificate_list = api.Management.KeyCertificate.get_certificate_list(mode=mode)
+ self.certificates = [x['certificate']['cert_info']['id'] for x in self.certificate_list]
+ if regex:
+ re_filter = re.compile(regex)
+ self.certificates = filter(re_filter.search, self.certificates)
+ self.certificate_list = [x for x in self.certificate_list if x['certificate']['cert_info']['id'] in self.certificates]
+
+ def get_list(self):
+ return self.certificates
+
+ def get_certificate_list(self):
+ return self.certificate_list
+
+
+class Keys(object):
+ """Keys class.
+
+ F5 BIG-IP keys class.
+
+ Attributes:
+ api: iControl API instance.
+ keys: List of key identifiers.
+ key_list: List of key information structures.
+ """
+
+ def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"):
+ self.api = api
+ self.key_list = api.Management.KeyCertificate.get_key_list(mode=mode)
+ self.keys = [x['key_info']['id'] for x in self.key_list]
+ if regex:
+ re_filter = re.compile(regex)
+ self.keys = filter(re_filter.search, self.keys)
+ self.key_list = [x for x in self.key_list if x['key_info']['id'] in self.keys]
+
+ def get_list(self):
+ return self.keys
+
+ def get_key_list(self):
+ return self.key_list
+
+
+class ProfileClientSSL(object):
+ """Client SSL profiles class.
+
+ F5 BIG-IP client SSL profiles class.
+
+ Attributes:
+ api: iControl API instance.
+ profiles: List of client SSL profiles.
+ """
+
+ def __init__(self, api, regex=None):
+ self.api = api
+ self.profiles = api.LocalLB.ProfileClientSSL.get_list()
+ if regex:
+ re_filter = re.compile(regex)
+ self.profiles = filter(re_filter.search, self.profiles)
+
+ def get_list(self):
+ return self.profiles
+
+ def get_alert_timeout(self):
+ return self.api.LocalLB.ProfileClientSSL.get_alert_timeout(self.profiles)
+
+ def get_allow_nonssl_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_allow_nonssl_state(self.profiles)
+
+ def get_authenticate_depth(self):
+ return self.api.LocalLB.ProfileClientSSL.get_authenticate_depth(self.profiles)
+
+ def get_authenticate_once_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_authenticate_once_state(self.profiles)
+
+ def get_ca_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_ca_file_v2(self.profiles)
+
+ def get_cache_size(self):
+ return self.api.LocalLB.ProfileClientSSL.get_cache_size(self.profiles)
+
+ def get_cache_timeout(self):
+ return self.api.LocalLB.ProfileClientSSL.get_cache_timeout(self.profiles)
+
+ def get_certificate_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_certificate_file_v2(self.profiles)
+
+ def get_chain_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_chain_file_v2(self.profiles)
+
+ def get_cipher_list(self):
+ return self.api.LocalLB.ProfileClientSSL.get_cipher_list(self.profiles)
+
+ def get_client_certificate_ca_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_client_certificate_ca_file_v2(self.profiles)
+
+ def get_crl_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_crl_file_v2(self.profiles)
+
+ def get_default_profile(self):
+ return self.api.LocalLB.ProfileClientSSL.get_default_profile(self.profiles)
+
+ def get_description(self):
+ return self.api.LocalLB.ProfileClientSSL.get_description(self.profiles)
+
+ def get_forward_proxy_ca_certificate_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_certificate_file(self.profiles)
+
+ def get_forward_proxy_ca_key_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_key_file(self.profiles)
+
+ def get_forward_proxy_ca_passphrase(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_passphrase(self.profiles)
+
+ def get_forward_proxy_certificate_extension_include(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_extension_include(self.profiles)
+
+ def get_forward_proxy_certificate_lifespan(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_lifespan(self.profiles)
+
+ def get_forward_proxy_enabled_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_enabled_state(self.profiles)
+
+ def get_forward_proxy_lookup_by_ipaddr_port_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_lookup_by_ipaddr_port_state(self.profiles)
+
+ def get_handshake_timeout(self):
+ return self.api.LocalLB.ProfileClientSSL.get_handshake_timeout(self.profiles)
+
+ def get_key_file(self):
+ return self.api.LocalLB.ProfileClientSSL.get_key_file_v2(self.profiles)
+
+ def get_modssl_emulation_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_modssl_emulation_state(self.profiles)
+
+ def get_passphrase(self):
+ return self.api.LocalLB.ProfileClientSSL.get_passphrase(self.profiles)
+
+ def get_peer_certification_mode(self):
+ return self.api.LocalLB.ProfileClientSSL.get_peer_certification_mode(self.profiles)
+
+ def get_profile_mode(self):
+ return self.api.LocalLB.ProfileClientSSL.get_profile_mode(self.profiles)
+
+ def get_renegotiation_maximum_record_delay(self):
+ return self.api.LocalLB.ProfileClientSSL.get_renegotiation_maximum_record_delay(self.profiles)
+
+ def get_renegotiation_period(self):
+ return self.api.LocalLB.ProfileClientSSL.get_renegotiation_period(self.profiles)
+
+ def get_renegotiation_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_renegotiation_state(self.profiles)
+
+ def get_renegotiation_throughput(self):
+ return self.api.LocalLB.ProfileClientSSL.get_renegotiation_throughput(self.profiles)
+
+ def get_retain_certificate_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_retain_certificate_state(self.profiles)
+
+ def get_secure_renegotiation_mode(self):
+ return self.api.LocalLB.ProfileClientSSL.get_secure_renegotiation_mode(self.profiles)
+
+ def get_server_name(self):
+ return self.api.LocalLB.ProfileClientSSL.get_server_name(self.profiles)
+
+ def get_session_ticket_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_session_ticket_state(self.profiles)
+
+ def get_sni_default_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_sni_default_state(self.profiles)
+
+ def get_sni_require_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_sni_require_state(self.profiles)
+
+ def get_ssl_option(self):
+ return self.api.LocalLB.ProfileClientSSL.get_ssl_option(self.profiles)
+
+ def get_strict_resume_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_strict_resume_state(self.profiles)
+
+ def get_unclean_shutdown_state(self):
+ return self.api.LocalLB.ProfileClientSSL.get_unclean_shutdown_state(self.profiles)
+
+ def get_is_base_profile(self):
+ return self.api.LocalLB.ProfileClientSSL.is_base_profile(self.profiles)
+
+ def get_is_system_profile(self):
+ return self.api.LocalLB.ProfileClientSSL.is_system_profile(self.profiles)
+
+
+class SystemInfo(object):
+ """System information class.
+
+ F5 BIG-IP system information class.
+
+ Attributes:
+ api: iControl API instance.
+ """
+
+ def __init__(self, api):
+ self.api = api
+
+ def get_base_mac_address(self):
+ return self.api.System.SystemInfo.get_base_mac_address()
+
+ def get_blade_temperature(self):
+ return self.api.System.SystemInfo.get_blade_temperature()
+
+ def get_chassis_slot_information(self):
+ return self.api.System.SystemInfo.get_chassis_slot_information()
+
+ def get_globally_unique_identifier(self):
+ return self.api.System.SystemInfo.get_globally_unique_identifier()
+
+ def get_group_id(self):
+ return self.api.System.SystemInfo.get_group_id()
+
+ def get_hardware_information(self):
+ return self.api.System.SystemInfo.get_hardware_information()
+
+ def get_marketing_name(self):
+ return self.api.System.SystemInfo.get_marketing_name()
+
+ def get_product_information(self):
+ return self.api.System.SystemInfo.get_product_information()
+
+ def get_pva_version(self):
+ return self.api.System.SystemInfo.get_pva_version()
+
+ def get_system_id(self):
+ return self.api.System.SystemInfo.get_system_id()
+
+ def get_system_information(self):
+ return self.api.System.SystemInfo.get_system_information()
+
+ def get_time(self):
+ return self.api.System.SystemInfo.get_time()
+
+ def get_time_zone(self):
+ return self.api.System.SystemInfo.get_time_zone()
+
+ def get_uptime(self):
+ return self.api.System.SystemInfo.get_uptime()
+
+
+def generate_dict(api_obj, fields):
+ result_dict = {}
+ lists = []
+ supported_fields = []
+ if api_obj.get_list():
+ for field in fields:
+ try:
+ api_response = getattr(api_obj, "get_" + field)()
+ except MethodNotFound:
+ pass
+ else:
+ lists.append(api_response)
+ supported_fields.append(field)
+ for i, j in enumerate(api_obj.get_list()):
+ temp = {}
+ temp.update([(item[0], item[1][i]) for item in zip(supported_fields, lists)])
+ result_dict[j] = temp
+ return result_dict
+
+def generate_simple_dict(api_obj, fields):
+ result_dict = {}
+ for field in fields:
+ try:
+ api_response = getattr(api_obj, "get_" + field)()
+ except MethodNotFound:
+ pass
+ else:
+ result_dict[field] = api_response
+ return result_dict
+
+def generate_interface_dict(f5, regex):
+ interfaces = Interfaces(f5.get_api(), regex)
+ fields = ['active_media', 'actual_flow_control', 'bundle_state',
+ 'description', 'dual_media_state', 'enabled_state', 'if_index',
+ 'learning_mode', 'lldp_admin_status', 'lldp_tlvmap',
+ 'mac_address', 'media', 'media_option', 'media_option_sfp',
+ 'media_sfp', 'media_speed', 'media_status', 'mtu',
+ 'phy_master_slave_mode', 'prefer_sfp_state', 'flow_control',
+ 'sflow_poll_interval', 'sflow_poll_interval_global',
+ 'sfp_media_state', 'stp_active_edge_port_state',
+ 'stp_enabled_state', 'stp_link_type',
+ 'stp_protocol_detection_reset_state']
+ return generate_dict(interfaces, fields)
+
+def generate_self_ip_dict(f5, regex):
+ self_ips = SelfIPs(f5.get_api(), regex)
+ fields = ['address', 'allow_access_list', 'description',
+ 'enforced_firewall_policy', 'floating_state', 'fw_rule',
+ 'netmask', 'staged_firewall_policy', 'traffic_group',
+ 'vlan', 'is_traffic_group_inherited']
+ return generate_dict(self_ips, fields)
+
+def generate_trunk_dict(f5, regex):
+ trunks = Trunks(f5.get_api(), regex)
+ fields = ['active_lacp_state', 'configured_member_count', 'description',
+ 'distribution_hash_option', 'interface', 'lacp_enabled_state',
+ 'lacp_timeout_option', 'link_selection_policy', 'media_speed',
+ 'media_status', 'operational_member_count', 'stp_enabled_state',
+ 'stp_protocol_detection_reset_state']
+ return generate_dict(trunks, fields)
+
+def generate_vlan_dict(f5, regex):
+ vlans = Vlans(f5.get_api(), regex)
+ fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description',
+ 'dynamic_forwarding', 'failsafe_action', 'failsafe_state',
+ 'failsafe_timeout', 'if_index', 'learning_mode',
+ 'mac_masquerade_address', 'member', 'mtu',
+ 'sflow_poll_interval', 'sflow_poll_interval_global',
+ 'sflow_sampling_rate', 'sflow_sampling_rate_global',
+ 'source_check_state', 'true_mac_address', 'vlan_id']
+ return generate_dict(vlans, fields)
+
+def generate_vs_dict(f5, regex):
+ virtual_servers = VirtualServers(f5.get_api(), regex)
+ fields = ['actual_hardware_acceleration', 'authentication_profile',
+ 'auto_lasthop', 'bw_controller_policy', 'clone_pool',
+ 'cmp_enable_mode', 'connection_limit', 'connection_mirror_state',
+ 'default_pool_name', 'description', 'destination',
+ 'enabled_state', 'enforced_firewall_policy',
+ 'fallback_persistence_profile', 'fw_rule', 'gtm_score',
+ 'last_hop_pool', 'nat64_state', 'object_status',
+ 'persistence_profile', 'profile', 'protocol',
+ 'rate_class', 'rate_limit', 'rate_limit_destination_mask',
+ 'rate_limit_mode', 'rate_limit_source_mask', 'related_rule',
+ 'rule', 'security_log_profile', 'snat_pool', 'snat_type',
+ 'source_address', 'source_address_translation_lsn_pool',
+ 'source_address_translation_snat_pool',
+ 'source_address_translation_type', 'source_port_behavior',
+ 'staged_firewall_policy', 'translate_address_state',
+ 'translate_port_state', 'type', 'vlan', 'wildmask']
+ return generate_dict(virtual_servers, fields)
+
+def generate_pool_dict(f5, regex):
+ pools = Pools(f5.get_api(), regex)
+ fields = ['action_on_service_down', 'active_member_count',
+ 'aggregate_dynamic_ratio', 'allow_nat_state',
+ 'allow_snat_state', 'client_ip_tos', 'client_link_qos',
+ 'description', 'gateway_failsafe_device',
+ 'ignore_persisted_weight_state', 'lb_method', 'member',
+ 'minimum_active_member', 'minimum_up_member',
+ 'minimum_up_member_action', 'minimum_up_member_enabled_state',
+ 'monitor_association', 'monitor_instance', 'object_status',
+ 'profile', 'queue_depth_limit',
+ 'queue_on_connection_limit_state', 'queue_time_limit',
+ 'reselect_tries', 'server_ip_tos', 'server_link_qos',
+ 'simple_timeout', 'slow_ramp_time']
+ return generate_dict(pools, fields)
+
+def generate_device_dict(f5, regex):
+ devices = Devices(f5.get_api(), regex)
+ fields = ['active_modules', 'base_mac_address', 'blade_addresses',
+ 'build', 'chassis_id', 'chassis_type', 'comment',
+ 'configsync_address', 'contact', 'description', 'edition',
+ 'failover_state', 'hostname', 'inactive_modules', 'location',
+ 'management_address', 'marketing_name', 'multicast_address',
+ 'optional_modules', 'platform_id', 'primary_mirror_address',
+ 'product', 'secondary_mirror_address', 'software_version',
+ 'timelimited_modules', 'timezone', 'unicast_addresses']
+ return generate_dict(devices, fields)
+
+def generate_device_group_dict(f5, regex):
+ device_groups = DeviceGroups(f5.get_api(), regex)
+ fields = ['all_preferred_active', 'autosync_enabled_state','description',
+ 'device', 'full_load_on_sync_state',
+ 'incremental_config_sync_size_maximum',
+ 'network_failover_enabled_state', 'sync_status', 'type']
+ return generate_dict(device_groups, fields)
+
+def generate_traffic_group_dict(f5, regex):
+ traffic_groups = TrafficGroups(f5.get_api(), regex)
+ fields = ['auto_failback_enabled_state', 'auto_failback_time',
+ 'default_device', 'description', 'ha_load_factor',
+ 'ha_order', 'is_floating', 'mac_masquerade_address',
+ 'unit_id']
+ return generate_dict(traffic_groups, fields)
+
+def generate_rule_dict(f5, regex):
+ rules = Rules(f5.get_api(), regex)
+ fields = ['definition', 'description', 'ignore_vertification',
+ 'verification_status']
+ return generate_dict(rules, fields)
+
+def generate_node_dict(f5, regex):
+ nodes = Nodes(f5.get_api(), regex)
+ fields = ['address', 'connection_limit', 'description', 'dynamic_ratio',
+ 'monitor_instance', 'monitor_rule', 'monitor_status',
+ 'object_status', 'rate_limit', 'ratio', 'session_status']
+ return generate_dict(nodes, fields)
+
+def generate_virtual_address_dict(f5, regex):
+ virtual_addresses = VirtualAddresses(f5.get_api(), regex)
+ fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit',
+ 'description', 'enabled_state', 'icmp_echo_state',
+ 'is_floating_state', 'netmask', 'object_status',
+ 'route_advertisement_state', 'traffic_group']
+ return generate_dict(virtual_addresses, fields)
+
+def generate_address_class_dict(f5, regex):
+ address_classes = AddressClasses(f5.get_api(), regex)
+ fields = ['address_class', 'description']
+ return generate_dict(address_classes, fields)
+
+def generate_certificate_dict(f5, regex):
+ certificates = Certificates(f5.get_api(), regex)
+ return dict(zip(certificates.get_list(), certificates.get_certificate_list()))
+
+def generate_key_dict(f5, regex):
+ keys = Keys(f5.get_api(), regex)
+ return dict(zip(keys.get_list(), keys.get_key_list()))
+
+def generate_client_ssl_profile_dict(f5, regex):
+ profiles = ProfileClientSSL(f5.get_api(), regex)
+ fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth',
+ 'authenticate_once_state', 'ca_file', 'cache_size',
+ 'cache_timeout', 'certificate_file', 'chain_file',
+ 'cipher_list', 'client_certificate_ca_file', 'crl_file',
+ 'default_profile', 'description',
+ 'forward_proxy_ca_certificate_file', 'forward_proxy_ca_key_file',
+ 'forward_proxy_ca_passphrase',
+ 'forward_proxy_certificate_extension_include',
+ 'forward_proxy_certificate_lifespan',
+ 'forward_proxy_enabled_state',
+ 'forward_proxy_lookup_by_ipaddr_port_state', 'handshake_timeout',
+ 'key_file', 'modssl_emulation_state', 'passphrase',
+ 'peer_certification_mode', 'profile_mode',
+ 'renegotiation_maximum_record_delay', 'renegotiation_period',
+ 'renegotiation_state', 'renegotiation_throughput',
+ 'retain_certificate_state', 'secure_renegotiation_mode',
+ 'server_name', 'session_ticket_state', 'sni_default_state',
+ 'sni_require_state', 'ssl_option', 'strict_resume_state',
+ 'unclean_shutdown_state', 'is_base_profile', 'is_system_profile']
+ return generate_dict(profiles, fields)
+
+def generate_system_info_dict(f5):
+ system_info = SystemInfo(f5.get_api())
+ fields = ['base_mac_address',
+ 'blade_temperature', 'chassis_slot_information',
+ 'globally_unique_identifier', 'group_id',
+ 'hardware_information',
+ 'marketing_name',
+ 'product_information', 'pva_version', 'system_id',
+ 'system_information', 'time',
+ 'time_zone', 'uptime']
+ return generate_simple_dict(system_info, fields)
+
+def generate_software_list(f5):
+ software = Software(f5.get_api())
+ software_list = software.get_all_software_status()
+ return software_list
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ server = dict(type='str', required=True),
+ user = dict(type='str', required=True),
+ password = dict(type='str', required=True),
+ session = dict(type='bool', default=False),
+ include = dict(type='list', required=True),
+ filter = dict(type='str', required=False),
+ )
+ )
+
+ if not bigsuds_found:
+ module.fail_json(msg="the python bigsuds module is required")
+
+ server = module.params['server']
+ user = module.params['user']
+ password = module.params['password']
+ session = module.params['session']
+ fact_filter = module.params['filter']
+ if fact_filter:
+ regex = fnmatch.translate(fact_filter)
+ else:
+ regex = None
+ include = map(lambda x: x.lower(), module.params['include'])
+ valid_includes = ('address_class', 'certificate', 'client_ssl_profile',
+ 'device_group', 'interface', 'key', 'node', 'pool',
+ 'rule', 'self_ip', 'software', 'system_info',
+ 'traffic_group', 'trunk', 'virtual_address',
+ 'virtual_server', 'vlan')
+ include_test = map(lambda x: x in valid_includes, include)
+ if not all(include_test):
+ module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include)))
+
+ try:
+ facts = {}
+
+ if len(include) > 0:
+ f5 = F5(server, user, password, session)
+ saved_active_folder = f5.get_active_folder()
+ saved_recursive_query_state = f5.get_recursive_query_state()
+ if saved_active_folder != "/":
+ f5.set_active_folder("/")
+ if saved_recursive_query_state != "STATE_ENABLED":
+ f5.enable_recursive_query_state()
+
+ if 'interface' in include:
+ facts['interface'] = generate_interface_dict(f5, regex)
+ if 'self_ip' in include:
+ facts['self_ip'] = generate_self_ip_dict(f5, regex)
+ if 'trunk' in include:
+ facts['trunk'] = generate_trunk_dict(f5, regex)
+ if 'vlan' in include:
+ facts['vlan'] = generate_vlan_dict(f5, regex)
+ if 'virtual_server' in include:
+ facts['virtual_server'] = generate_vs_dict(f5, regex)
+ if 'pool' in include:
+ facts['pool'] = generate_pool_dict(f5, regex)
+ if 'device' in include:
+ facts['device'] = generate_device_dict(f5, regex)
+ if 'device_group' in include:
+ facts['device_group'] = generate_device_group_dict(f5, regex)
+ if 'traffic_group' in include:
+ facts['traffic_group'] = generate_traffic_group_dict(f5, regex)
+ if 'rule' in include:
+ facts['rule'] = generate_rule_dict(f5, regex)
+ if 'node' in include:
+ facts['node'] = generate_node_dict(f5, regex)
+ if 'virtual_address' in include:
+ facts['virtual_address'] = generate_virtual_address_dict(f5, regex)
+ if 'address_class' in include:
+ facts['address_class'] = generate_address_class_dict(f5, regex)
+ if 'software' in include:
+ facts['software'] = generate_software_list(f5)
+ if 'certificate' in include:
+ facts['certificate'] = generate_certificate_dict(f5, regex)
+ if 'key' in include:
+ facts['key'] = generate_key_dict(f5, regex)
+ if 'client_ssl_profile' in include:
+ facts['client_ssl_profile'] = generate_client_ssl_profile_dict(f5, regex)
+ if 'system_info' in include:
+ facts['system_info'] = generate_system_info_dict(f5)
+
+ # restore saved state
+ if saved_active_folder and saved_active_folder != "/":
+ f5.set_active_folder(saved_active_folder)
+ if saved_recursive_query_state and \
+ saved_recursive_query_state != "STATE_ENABLED":
+ f5.set_recursive_query_state(saved_recursive_query_state)
+
+ result = {'ansible_facts': facts}
+
+ except Exception, e:
+ module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc()))
+
+ module.exit_json(**result)
+
+# include magic from lib/ansible/module_common.py
+#<>
+main()
+
diff --git a/net_infrastructure/dnsimple b/net_infrastructure/dnsimple
new file mode 100755
index 00000000000..5bb53198945
--- /dev/null
+++ b/net_infrastructure/dnsimple
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+DOCUMENTATION = '''
+---
+module: dnsimple
+version_added: "1.6"
+short_description: Interface with dnsimple.com (a DNS hosting service).
+description:
+ - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)"
+options:
+ account_email:
+ description:
+ - "Account email. If ommitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)"
+ required: false
+ default: null
+
+ account_api_token:
+ description:
+ - Account API token. See I(account_email) for info.
+ required: false
+ default: null
+
+ domain:
+ description:
+ - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If ommitted, a list of domains will be returned.
+ - If domain is present but the domain doesn't exist, it will be created.
+ required: false
+ default: null
+
+ record:
+ description:
+ - Record to add, if blank a record for the domain will be created, supports the wildcard (*)
+ required: false
+ default: null
+
+ record_ids:
+ description:
+ - List of records to ensure they either exist or don't exist
+ required: false
+ default: null
+
+ type:
+ description:
+ - The type of DNS record to create
+ required: false
+ choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ]
+ default: null
+
+ ttl:
+ description:
+ - The TTL to give the new record
+ required: false
+ default: 3600 (one hour)
+
+ value:
+ description:
+ - Record value
+ - "Must be specified when trying to ensure a record exists"
+ required: false
+ default: null
+
+ priority:
+ description:
+ - Record priority
+ required: false
+ default: null
+
+ state:
+ description:
+ - whether the record should exist or not
+ required: false
+ choices: [ 'present', 'absent' ]
+ default: null
+
+ solo:
+ description:
+ - Whether the record should be the only one for that record type and record name. Only use with state=present on a record
+ required: false
+ default: null
+
+requirements: [ dnsimple ]
+author: Alex Coomans
+'''
+
+EXAMPLES = '''
+# authenicate using email and API token
+- local_action: dnsimple account_email=test@example.com account_api_token=dummyapitoken
+
+# fetch all domains
+- local_action dnsimple
+ register: domains
+
+# fetch my.com domain records
+- local_action: dnsimple domain=my.com state=present
+ register: records
+
+# delete a domain
+- local_action: dnsimple domain=my.com state=absent
+
+# create a test.my.com A record to point to 127.0.0.01
+- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1
+ register: record
+
+# and then delete it
+- local_action: dnsimple domain=my.com record_ids={{ record['id'] }}
+
+# create a my.com CNAME record to example.com
+- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present
+
+# change it's ttl
+- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present
+
+# and delete the record
+- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent
+
+'''
+
+import os
+try:
+ from dnsimple import DNSimple
+ from dnsimple.dnsimple import DNSimpleException
+except ImportError:
+ print "failed=True msg='dnsimple required for this module'"
+ sys.exit(1)
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ account_email = dict(required=False),
+ account_api_token = dict(required=False, no_log=True),
+ domain = dict(required=False),
+ record = dict(required=False),
+ record_ids = dict(required=False, type='list'),
+ type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']),
+ ttl = dict(required=False, default=3600, type='int'),
+ value = dict(required=False),
+ priority = dict(required=False, type='int'),
+ state = dict(required=False, choices=['present', 'absent']),
+ solo = dict(required=False, type='bool'),
+ ),
+ required_together = (
+ ['record', 'value']
+ ),
+ supports_check_mode = True,
+ )
+
+ account_email = module.params.get('account_email')
+ account_api_token = module.params.get('account_api_token')
+ domain = module.params.get('domain')
+ record = module.params.get('record')
+ record_ids = module.params.get('record_ids')
+ record_type = module.params.get('type')
+ ttl = module.params.get('ttl')
+ value = module.params.get('value')
+ priority = module.params.get('priority')
+ state = module.params.get('state')
+ is_solo = module.params.get('solo')
+
+ if account_email and account_api_token:
+ client = DNSimple(email=account_email, api_token=account_api_token)
+ elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'):
+ client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN'))
+ else:
+ client = DNSimple()
+
+ try:
+ # Let's figure out what operation we want to do
+
+ # No domain, return a list
+ if not domain:
+ domains = client.domains()
+ module.exit_json(changed=False, result=[d['domain'] for d in domains])
+
+ # Domain & No record
+ if domain and record is None and not record_ids:
+ domains = [d['domain'] for d in client.domains()]
+ if domain.isdigit():
+ dr = next((d for d in domains if d['id'] == int(domain)), None)
+ else:
+ dr = next((d for d in domains if d['name'] == domain), None)
+ if state == 'present':
+ if dr:
+ module.exit_json(changed=False, result=dr)
+ else:
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.add_domain(domain)['domain'])
+ elif state == 'absent':
+ if dr:
+ if not module.check_mode:
+ client.delete(domain)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
+
+ # need the not none check since record could be an empty string
+ if domain and record is not None:
+ records = [r['record'] for r in client.records(str(domain))]
+
+ if not record_type:
+ module.fail_json(msg="Missing the record type")
+
+ if not value:
+ module.fail_json(msg="Missing the record value")
+
+ rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None)
+
+ if state == 'present':
+ changed = False
+ if is_solo:
+ # delete any records that have the same name and record type
+ same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type]
+ if rr:
+ same_type = [rid for rid in same_type if rid != rr['id']]
+ if same_type:
+ if not module.check_mode:
+ for rid in same_type:
+ client.delete_record(str(domain), rid)
+ changed = True
+ if rr:
+ # check if we need to update
+ if rr['ttl'] != ttl or rr['prio'] != priority:
+ data = {}
+ if ttl: data['ttl'] = ttl
+ if priority: data['prio'] = priority
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record'])
+ else:
+ module.exit_json(changed=changed, result=rr)
+ else:
+ # create it
+ data = {
+ 'name': record,
+ 'record_type': record_type,
+ 'content': value,
+ }
+ if ttl: data['ttl'] = ttl
+ if priority: data['prio'] = priority
+ if module.check_mode:
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=True, result=client.add_record(str(domain), data)['record'])
+ elif state == 'absent':
+ if rr:
+ if not module.check_mode:
+ client.delete_record(str(domain), rr['id'])
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
+
+ # Make sure these record_ids either all exist or none
+ if domain and record_ids:
+ current_records = [str(r['record']['id']) for r in client.records(str(domain))]
+ wanted_records = [str(r) for r in record_ids]
+ if state == 'present':
+ difference = list(set(wanted_records) - set(current_records))
+ if difference:
+ module.fail_json(msg="Missing the following records: %s" % difference)
+ else:
+ module.exit_json(changed=False)
+ elif state == 'absent':
+ difference = list(set(wanted_records) & set(current_records))
+ if difference:
+ if not module.check_mode:
+ for rid in difference:
+ client.delete_record(str(domain), rid)
+ module.exit_json(changed=True)
+ else:
+ module.exit_json(changed=False)
+ else:
+ module.fail_json(msg="'%s' is an unknown value for the state argument" % state)
+
+ except DNSimpleException, e:
+ module.fail_json(msg="Unable to contact DNSimple: %s" % e.message)
+
+ module.fail_json(msg="Unknown what you wanted me to do")
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/net_infrastructure/dnsmadeeasy b/net_infrastructure/dnsmadeeasy
index d4af13e884a..148e25a5011 100644
--- a/net_infrastructure/dnsmadeeasy
+++ b/net_infrastructure/dnsmadeeasy
@@ -73,6 +73,15 @@ options:
choices: [ 'present', 'absent' ]
default: null
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+
notes:
- The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP.
- This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks.
@@ -106,8 +115,6 @@ EXAMPLES = '''
IMPORT_ERROR = None
try:
- import urllib
- import urllib2
import json
from time import strftime, gmtime
import hashlib
@@ -115,22 +122,6 @@ try:
except ImportError, e:
IMPORT_ERROR = str(e)
-
-class RequestWithMethod(urllib2.Request):
-
- """Workaround for using DELETE/PUT/etc with urllib2"""
-
- def __init__(self, url, method, data=None, headers={}):
- self._method = method
- urllib2.Request.__init__(self, url, data, headers)
-
- def get_method(self):
- if self._method:
- return self._method
- else:
- return urllib2.Request.get_method(self)
-
-
class DME2:
def __init__(self, apikey, secret, domain, module):
@@ -138,7 +129,7 @@ class DME2:
self.api = apikey
self.secret = secret
- self.baseurl = 'http://api.dnsmadeeasy.com/V2.0/'
+ self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/'
self.domain = str(domain)
self.domain_map = None # ["domain_name"] => ID
self.record_map = None # ["record_name"] => ID
@@ -169,21 +160,15 @@ class DME2:
url = self.baseurl + resource
if data and not isinstance(data, basestring):
data = urllib.urlencode(data)
- request = RequestWithMethod(url, method, data, self._headers())
- try:
- response = urllib2.urlopen(request)
- except urllib2.HTTPError, e:
- self.module.fail_json(
- msg="%s returned %s, with body: %s" % (url, e.code, e.read()))
- except Exception, e:
- self.module.fail_json(
- msg="Failed contacting: %s : Exception %s" % (url, e.message()))
+ response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers())
+ if info['status'] not in (200, 201, 204):
+ self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg']))
try:
return json.load(response)
except Exception, e:
- return False
+ return {}
def getDomain(self, domain_id):
if not self.domain_map:
@@ -263,6 +248,7 @@ def main():
'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']),
record_value=dict(required=False),
record_ttl=dict(required=False, default=1800, type='int'),
+ validate_certs = dict(default='yes', type='bool'),
),
required_together=(
['record_value', 'record_ttl', 'record_type']
@@ -282,7 +268,7 @@ def main():
domain_records = DME.getRecords()
if not domain_records:
module.fail_json(
- msg="The %s domain name is not accessible with this api_key; try using its ID if known." % domain)
+ msg="The requested domain name is not accessible with this api_key; try using its ID if known.")
module.exit_json(changed=False, result=domain_records)
# Fetch existing record + Build new one
@@ -338,4 +324,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
main()
diff --git a/net_infrastructure/lldp b/net_infrastructure/lldp
new file mode 100755
index 00000000000..6b8836852f6
--- /dev/null
+++ b/net_infrastructure/lldp
@@ -0,0 +1,83 @@
+#!/usr/bin/python -tt
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import subprocess
+
+DOCUMENTATION = '''
+---
+module: lldp
+version_added: 1.6
+short_description: get details reported by lldp
+description:
+ - Reads data out of lldpctl
+options: {}
+author: Andy Hill
+notes:
+ - Requires lldpd running and lldp enabled on switches
+'''
+
+EXAMPLES = '''
+# Retrieve switch/port information
+ - name: Gather information from lldp
+ lldp:
+
+ - name: Print each switch/port
+ debug: msg="{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }}
+ with_items: lldp.keys()
+
+# TASK: [Print each switch/port] ***********************************************************
+# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"}
+# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"}
+# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"}
+
+'''
+
+def gather_lldp():
+ cmd = ['lldpctl', '-f', 'keyvalue']
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ (output, err) = proc.communicate()
+ if output:
+ output_dict = {}
+ lldp_entries = output.split("\n")
+
+ for entry in lldp_entries:
+ if entry:
+ path, value = entry.strip().split("=", 1)
+ path = path.split(".")
+ path_components, final = path[:-1], path[-1]
+
+ current_dict = output_dict
+ for path_component in path_components:
+ current_dict[path_component] = current_dict.get(path_component, {})
+ current_dict = current_dict[path_component]
+ current_dict[final] = value
+ return output_dict
+
+
+def main():
+ module = AnsibleModule({})
+
+ lldp_output = gather_lldp()
+ try:
+ data = {'lldp': lldp_output['lldp']}
+ module.exit_json(ansible_facts=data)
+ except TypeError:
+ module.fail_json(msg="lldpctl command failed. is lldpd running?")
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
+
diff --git a/net_infrastructure/netscaler b/net_infrastructure/netscaler
index 1aa370895d5..2a8881cf56f 100644
--- a/net_infrastructure/netscaler
+++ b/net_infrastructure/netscaler
@@ -73,6 +73,14 @@ options:
default: server
choices: ["server", "service"]
aliases: []
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
requirements: [ "urllib", "urllib2" ]
author: Nandor Sivok
'''
@@ -90,8 +98,6 @@ ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=api
import json
-import urllib
-import urllib2
import base64
import socket
@@ -100,23 +106,25 @@ class netscaler(object):
_nitro_base_url = '/nitro/v1/'
+ def __init__(self, module):
+ self.module = module
+
def http_request(self, api_endpoint, data_json={}):
request_url = self._nsc_protocol + '://' + self._nsc_host + self._nitro_base_url + api_endpoint
- data_json = urllib.urlencode(data_json)
- if len(data_json):
- req = urllib2.Request(request_url, data_json)
- req.add_header('Content-Type', 'application/x-www-form-urlencoded')
- else:
- req = urllib2.Request(request_url)
+ data_json = urllib.urlencode(data_json)
+ if not len(data_json):
+ data_json = None
- base64string = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip()
- req.add_header('Authorization', "Basic %s" % base64string)
+ auth = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip()
+ headers = {
+ 'Authorization': 'Basic %s' % auth,
+ 'Content-Type' : 'application/x-www-form-urlencoded',
+ }
- resp = urllib2.urlopen(req)
- resp = json.load(resp)
+ response, info = fetch_url(self.module, request_url, data=data_json)
- return resp
+ return json.load(response.read())
def prepare_request(self, action):
resp = self.http_request(
@@ -134,7 +142,7 @@ class netscaler(object):
def core(module):
- n = netscaler()
+ n = netscaler(module)
n._nsc_host = module.params.get('nsc_host')
n._nsc_user = module.params.get('user')
n._nsc_pass = module.params.get('password')
@@ -158,7 +166,8 @@ def main():
password = dict(required=True),
action = dict(default='enable', choices=['enable','disable']),
name = dict(default=socket.gethostname()),
- type = dict(default='server', choices=['service', 'server'])
+ type = dict(default='server', choices=['service', 'server']),
+ validate_certs=dict(default='yes', type='bool'),
)
)
@@ -177,4 +186,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
main()
diff --git a/network/get_url b/network/get_url
index 9704b8dbadb..74cc5479f4a 100644
--- a/network/get_url
+++ b/network/get_url
@@ -83,6 +83,13 @@ options:
required: false
default: 'yes'
choices: ['yes', 'no']
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
others:
description:
- all arguments accepted by the M(file) module also work here
@@ -108,19 +115,6 @@ try:
except ImportError:
HAS_HASHLIB=False
-try:
- import urllib2
- HAS_URLLIB2 = True
-except ImportError:
- HAS_URLLIB2 = False
-
-try:
- import urlparse
- import socket
- HAS_URLPARSE = True
-except ImportError:
- HAS_URLPARSE=False
-
# ==============================================================
# url handling
@@ -130,72 +124,6 @@ def url_filename(url):
return 'index.html'
return fn
-def url_do_get(module, url, dest, use_proxy, last_mod_time, force):
- """
- Get url and return request and info
- Credits: http://stackoverflow.com/questions/7006574/how-to-download-file-from-ftp
- """
-
- USERAGENT = 'ansible-httpget'
- info = dict(url=url, dest=dest)
- r = None
- handlers = []
-
- parsed = urlparse.urlparse(url)
-
- if '@' in parsed[1]:
- credentials, netloc = parsed[1].split('@', 1)
- if ':' in credentials:
- username, password = credentials.split(':', 1)
- else:
- username = credentials
- password = ''
- parsed = list(parsed)
- parsed[1] = netloc
-
- passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
- # this creates a password manager
- passman.add_password(None, netloc, username, password)
- # because we have put None at the start it will always
- # use this username/password combination for urls
- # for which `theurl` is a super-url
-
- authhandler = urllib2.HTTPBasicAuthHandler(passman)
- # create the AuthHandler
- handlers.append(authhandler)
-
- #reconstruct url without credentials
- url = urlparse.urlunparse(parsed)
-
- if not use_proxy:
- proxyhandler = urllib2.ProxyHandler({})
- handlers.append(proxyhandler)
-
- opener = urllib2.build_opener(*handlers)
- urllib2.install_opener(opener)
- request = urllib2.Request(url)
- request.add_header('User-agent', USERAGENT)
-
- if last_mod_time and not force:
- tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
- request.add_header('If-Modified-Since', tstamp)
- else:
- request.add_header('cache-control', 'no-cache')
-
- try:
- r = urllib2.urlopen(request)
- info.update(r.info())
- info['url'] = r.geturl() # The URL goes in too, because of redirects.
- info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200))
- except urllib2.HTTPError, e:
- # Must not fail_json() here so caller can handle HTTP 304 unmodified
- info.update(dict(msg=str(e), status=e.code))
- except urllib2.URLError, e:
- code = getattr(e, 'code', -1)
- module.fail_json(msg="Request failed: %s" % str(e), status_code=code)
-
- return r, info
-
def url_get(module, url, dest, use_proxy, last_mod_time, force):
"""
Download data from the url and store in a temporary file.
@@ -203,7 +131,7 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force):
Return (tempfile, info about the request)
"""
- req, info = url_do_get(module, url, dest, use_proxy, last_mod_time, force)
+ rsp, info = fetch_url(module, url, use_proxy=use_proxy, force=force, last_mod_time=last_mod_time)
if info['status'] == 304:
module.exit_json(url=url, dest=dest, changed=False, msg=info.get('msg', ''))
@@ -215,12 +143,12 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force):
fd, tempname = tempfile.mkstemp()
f = os.fdopen(fd, 'wb')
try:
- shutil.copyfileobj(req, f)
+ shutil.copyfileobj(rsp, f)
except Exception, err:
os.remove(tempname)
module.fail_json(msg="failed to create temporary content file: %s" % str(err))
f.close()
- req.close()
+ rsp.close()
return tempname, info
def extract_filename_from_headers(headers):
@@ -247,21 +175,16 @@ def extract_filename_from_headers(headers):
def main():
- # does this really happen on non-ancient python?
- if not HAS_URLLIB2:
- module.fail_json(msg="urllib2 is not installed")
- if not HAS_URLPARSE:
- module.fail_json(msg="urlparse is not installed")
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ url = dict(required=True),
+ dest = dict(required=True),
+ sha256sum = dict(default=''),
+ )
module = AnsibleModule(
# not checking because of daisy chain to file module
- argument_spec = dict(
- url = dict(required=True),
- dest = dict(required=True),
- force = dict(default='no', aliases=['thirsty'], type='bool'),
- sha256sum = dict(default=''),
- use_proxy = dict(default='yes', type='bool')
- ),
+ argument_spec = argument_spec,
add_file_common_args=True
)
@@ -366,4 +289,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
main()
diff --git a/network/uri b/network/uri
index 0060c1fdc90..b8b9b04ab9c 100644
--- a/network/uri
+++ b/network/uri
@@ -106,7 +106,7 @@ options:
required: false
status_code:
description:
- - A valid, numeric, HTTP status code that signifies success of the request.
+ - A valid, numeric, HTTP status code that signifies success of the request. Can also be comma separated list of status codes.
required: false
default: 200
timeout:
@@ -143,23 +143,29 @@ EXAMPLES = '''
when: 'AWESOME' not in "{{ webpage.content }}"
-# Create a JIRA issue.
-- action: >
- uri url=https://your.jira.example.com/rest/api/2/issue/
- method=POST user=your_username password=your_pass
- body="{{ lookup('file','issue.json') }}" force_basic_auth=yes
- status_code=201 HEADER_Content-Type="application/json"
+# Create a JIRA issue
-- action: >
- uri url=https://your.form.based.auth.examle.com/index.php
- method=POST body="name=your_username&password=your_password&enter=Sign%20in"
- status_code=302 HEADER_Content-Type="application/x-www-form-urlencoded"
- register: login
+- uri: url=https://your.jira.example.com/rest/api/2/issue/
+ method=POST user=your_username password=your_pass
+ body="{{ lookup('file','issue.json') }}" force_basic_auth=yes
+ status_code=201 HEADER_Content-Type="application/json"
# Login to a form based webpage, then use the returned cookie to
-# access the app in later tasks.
-- action: uri url=https://your.form.based.auth.example.com/dashboard.php
- method=GET return_content=yes HEADER_Cookie="{{login.set_cookie}}"
+# access the app in later tasks
+
+- uri: url=https://your.form.based.auth.examle.com/index.php
+ method=POST body="name=your_username&password=your_password&enter=Sign%20in"
+ status_code=302 HEADER_Content-Type="application/x-www-form-urlencoded"
+ register: login
+
+- uri: url=https://your.form.based.auth.example.com/dashboard.php
+ method=GET return_content=yes HEADER_Cookie="{{login.set_cookie}}"
+
+# Queue build of a project in Jenkins:
+
+- uri: url=http://{{jenkins.host}}/job/{{jenkins.job}}/build?token={{jenkins.token}}
+ method=GET user={{jenkins.user}} password={{jenkins.password}} force_basic_auth=yes status_code=201
+
'''
HAS_HTTPLIB2 = True
@@ -335,7 +341,7 @@ def main():
follow_redirects = dict(required=False, default='safe', choices=['all', 'safe', 'none', 'yes', 'no']),
creates = dict(required=False, default=None),
removes = dict(required=False, default=None),
- status_code = dict(required=False, default=200, type='int'),
+ status_code = dict(required=False, default=[200], type='list'),
timeout = dict(required=False, default=30, type='int'),
),
check_invalid_arguments=False,
@@ -358,7 +364,7 @@ def main():
redirects = module.params['follow_redirects']
creates = module.params['creates']
removes = module.params['removes']
- status_code = int(module.params['status_code'])
+ status_code = [int(x) for x in list(module.params['status_code'])]
socket_timeout = module.params['timeout']
# Grab all the http headers. Need this hack since passing multi-values is currently a bit ugly. (e.g. headers='{"Content-Type":"application/json"}')
@@ -427,7 +433,7 @@ def main():
uresp['json'] = js
except:
pass
- if resp['status'] != status_code:
+ if resp['status'] not in status_code:
module.fail_json(msg="Status code was not " + str(status_code), content=content, **uresp)
elif return_content:
module.exit_json(changed=changed, content=content, **uresp)
diff --git a/notification/flowdock b/notification/flowdock
index a5be40d1f10..009487fb438 100644
--- a/notification/flowdock
+++ b/notification/flowdock
@@ -76,6 +76,14 @@ options:
description:
- (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox.
required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
# informational: requirements for nodes
requirements: [ urllib, urllib2 ]
@@ -96,31 +104,12 @@ EXAMPLES = '''
tags=tag1,tag2,tag3
'''
-HAS_URLLIB = True
-try:
- import urllib
-except ImportError:
- HAS_URLLIB = False
-
-HAS_URLLIB2 = True
-try:
- import urllib2
-except ImportError:
- HAS_URLLIB2 = False
-
-
-
# ===========================================
# Module execution.
#
def main():
- if not HAS_URLLIB:
- module.fail_json(msg="urllib is not installed")
- if not HAS_URLLIB2:
- module.fail_json(msg="urllib2 is not installed")
-
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
@@ -135,6 +124,7 @@ def main():
project=dict(required=False),
tags=dict(required=False),
link=dict(required=False),
+ validate_certs = dict(default='yes', type='bool'),
),
supports_check_mode=True
)
@@ -187,14 +177,16 @@ def main():
module.exit_json(changed=False)
# Send the data to Flowdock
- try:
- response = urllib2.urlopen(url, urllib.urlencode(params))
- except Exception, e:
- module.fail_json(msg="unable to send msg: %s" % e)
+ data = urllib.urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="unable to send msg: %s" % info['msg'])
- module.exit_json(changed=False, msg=module.params["msg"])
+ module.exit_json(changed=True, msg=module.params["msg"])
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
main()
diff --git a/notification/grove b/notification/grove
index b759f025e29..e6bf241bdaa 100644
--- a/notification/grove
+++ b/notification/grove
@@ -31,6 +31,14 @@ options:
description:
- Icon for the service
required: false
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
author: Jonas Pfenniger
'''
@@ -41,8 +49,6 @@ EXAMPLES = '''
message=deployed {{ target }}
'''
-import urllib
-
BASE_URL = 'https://grove.io/api/notice/%s/'
# ==============================================================
@@ -57,7 +63,10 @@ def do_notify_grove(module, channel_token, service, message, url=None, icon_url=
if icon_url is not None:
my_data['icon_url'] = icon_url
- urllib.urlopen(my_url, urllib.urlencode(my_data))
+ data = urllib.urlencode(my_data)
+ response, info = fetch_url(module, my_url, data=data)
+ if info['status'] != 200:
+ module.fail_json(msg="failed to send notification: %s" % info['msg'])
# ==============================================================
# main
@@ -70,6 +79,7 @@ def main():
service = dict(type='str', default='ansible'),
url = dict(type='str', default=None),
icon_url = dict(type='str', default=None),
+ validate_certs = dict(default='yes', type='bool'),
)
)
diff --git a/notification/hipchat b/notification/hipchat
index eec2b8c3618..4ff95b32bf6 100644
--- a/notification/hipchat
+++ b/notification/hipchat
@@ -46,6 +46,21 @@ options:
required: false
default: 'yes'
choices: [ "yes", "no" ]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+ version_added: 1.5.1
+ api:
+ description:
+ - API url if using a self-hosted hipchat server
+ required: false
+ default: 'https://api.hipchat.com/v1/rooms/message'
+ version_added: 1.6.0
+
# informational: requirements for nodes
requirements: [ urllib, urllib2 ]
@@ -60,23 +75,10 @@ EXAMPLES = '''
# HipChat module specific support methods.
#
-HAS_URLLIB = True
-try:
- import urllib
-except ImportError:
- HAS_URLLIB = False
+MSG_URI = "https://api.hipchat.com/v1/rooms/message"
-HAS_URLLIB2 = True
-try:
- import urllib2
-except ImportError:
- HAS_URLLIB2 = False
-
-MSG_URI = "https://api.hipchat.com/v1/rooms/message?"
-
-
-def send_msg(token, room, msg_from, msg, msg_format='text',
- color='yellow', notify=False):
+def send_msg(module, token, room, msg_from, msg, msg_format='text',
+ color='yellow', notify=False, api=MSG_URI):
'''sending message to hipchat'''
params = {}
@@ -85,15 +87,20 @@ def send_msg(token, room, msg_from, msg, msg_format='text',
params['message'] = msg
params['message_format'] = msg_format
params['color'] = color
+ params['api'] = api
if notify:
params['notify'] = 1
else:
params['notify'] = 0
- url = MSG_URI + "auth_token=%s" % (token)
- response = urllib2.urlopen(url, urllib.urlencode(params))
- return response.read()
+ url = api + "?auth_token=%s" % (token)
+ data = urllib.urlencode(params)
+ response, info = fetch_url(module, url, data=data)
+ if info['status'] == 200:
+ return response.read()
+ else:
+ module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
# ===========================================
@@ -102,11 +109,6 @@ def send_msg(token, room, msg_from, msg, msg_format='text',
def main():
- if not HAS_URLLIB:
- module.fail_json(msg="urllib is not installed")
- if not HAS_URLLIB2:
- module.fail_json(msg="urllib2 is not installed")
-
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
@@ -117,6 +119,8 @@ def main():
"purple", "gray", "random"]),
msg_format=dict(default="text", choices=["text", "html"]),
notify=dict(default=True, type='bool'),
+ validate_certs = dict(default='yes', type='bool'),
+ api = dict(default=MSG_URI),
),
supports_check_mode=True
)
@@ -128,17 +132,18 @@ def main():
color = module.params["color"]
msg_format = module.params["msg_format"]
notify = module.params["notify"]
+ api = module.params["api"]
try:
- send_msg(token, room, msg_from, msg, msg_format,
- color, notify)
+ send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api)
except Exception, e:
module.fail_json(msg="unable to sent msg: %s" % e)
changed = True
- module.exit_json(changed=changed, room=room, msg_from=msg_from,
- msg=msg)
+ module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg)
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
main()
diff --git a/notification/irc b/notification/irc
index 11bdc4a95ec..bba7319a083 100644
--- a/notification/irc
+++ b/notification/irc
@@ -39,7 +39,7 @@ options:
default: 6667
nick:
description:
- - Nickname
+ - Nickname. May be shortened, depending on server's NICKLEN setting.
required: false
default: ansible
msg:
@@ -49,10 +49,10 @@ options:
default: null
color:
description:
- - Text color for the message. Default is black.
+ - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
required: false
- default: black
- choices: [ "yellow", "red", "green", "blue", "black" ]
+ default: "none"
+ choices: [ "none", "yellow", "red", "green", "blue", "black" ]
channel:
description:
- Channel name
@@ -94,7 +94,7 @@ from time import sleep
def send_msg(channel, msg, server='localhost', port='6667',
- nick="ansible", color='black', passwd=False, timeout=30):
+ nick="ansible", color='none', passwd=False, timeout=30):
'''send message to IRC'''
colornumbers = {
@@ -107,10 +107,11 @@ def send_msg(channel, msg, server='localhost', port='6667',
try:
colornumber = colornumbers[color]
+ colortext = "\x03" + colornumber
except:
- colornumber = "01" # black
+ colortext = ""
- message = "\x03" + colornumber + msg
+ message = colortext + msg
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
irc.connect((server, int(port)))
@@ -122,11 +123,15 @@ def send_msg(channel, msg, server='localhost', port='6667',
start = time.time()
while 1:
motd += irc.recv(1024)
- if re.search('^:\S+ 00[1-4] %s :' % nick, motd, flags=re.M):
+ # The server might send back a shorter nick than we specified (due to NICKLEN),
+ # so grab that and use it from now on (assuming we find the 00[1-4] response).
+ match = re.search('^:\S+ 00[1-4] (?P\S+) :', motd, flags=re.M)
+ if match:
+ nick = match.group('nick')
break
elif time.time() - start > timeout:
raise Exception('Timeout waiting for IRC server welcome response')
- time.sleep(0.5)
+ sleep(0.5)
irc.send('JOIN %s\r\n' % channel)
join = ''
@@ -137,13 +142,13 @@ def send_msg(channel, msg, server='localhost', port='6667',
break
elif time.time() - start > timeout:
raise Exception('Timeout waiting for IRC JOIN response')
- time.sleep(0.5)
+ sleep(0.5)
irc.send('PRIVMSG %s :%s\r\n' % (channel, message))
- time.sleep(1)
+ sleep(1)
irc.send('PART %s\r\n' % channel)
irc.send('QUIT\r\n')
- time.sleep(1)
+ sleep(1)
irc.close()
# ===========================================
@@ -158,8 +163,8 @@ def main():
port=dict(default=6667),
nick=dict(default='ansible'),
msg=dict(required=True),
- color=dict(default="black", choices=["yellow", "red", "green",
- "blue", "black"]),
+ color=dict(default="none", choices=["yellow", "red", "green",
+ "blue", "black", "none"]),
channel=dict(required=True),
passwd=dict(),
timeout=dict(type='int', default=30)
diff --git a/notification/mqtt b/notification/mqtt
index d00307018dc..d701bd9348a 100644
--- a/notification/mqtt
+++ b/notification/mqtt
@@ -1,7 +1,7 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# (c) 2013, Jan-Piet Mens
+# (c) 2013, 2014, Jan-Piet Mens
#
# This file is part of Ansible
#
@@ -80,7 +80,7 @@ options:
requirements: [ mosquitto ]
notes:
- This module requires a connection to an MQTT broker such as Mosquitto
- U(http://mosquitto.org) and the C(mosquitto) Python module (U(http://mosquitto.org/python)).
+ U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)).
author: Jan-Piet Mens
'''
@@ -97,34 +97,12 @@ EXAMPLES = '''
# MQTT module support methods.
#
-HAS_MOSQUITTO = True
+HAS_PAHOMQTT = True
try:
import socket
- import mosquitto
+ import paho.mqtt.publish as mqtt
except ImportError:
- HAS_MOSQUITTO = False
-import os
-
-def publish(module, topic, payload, server='localhost', port='1883', qos='0',
- client_id='', retain=False, username=None, password=None):
- '''Open connection to MQTT broker and publish the topic'''
-
- mqttc = mosquitto.Mosquitto(client_id, clean_session=True)
-
- if username is not None and password is not None:
- mqttc.username_pw_set(username, password)
-
- rc = mqttc.connect(server, int(port), 5)
- if rc != 0:
- module.fail_json(msg="unable to connect to MQTT broker")
-
- mqttc.publish(topic, payload, int(qos), retain)
- rc = mqttc.loop()
- if rc != 0:
- module.fail_json(msg="unable to send to MQTT broker")
-
- mqttc.disconnect()
-
+ HAS_PAHOMQTT = False
# ===========================================
# Main
@@ -132,10 +110,6 @@ def publish(module, topic, payload, server='localhost', port='1883', qos='0',
def main():
- if not HAS_MOSQUITTO:
- module.fail_json(msg="mosquitto is not installed")
-
-
module = AnsibleModule(
argument_spec=dict(
server = dict(default = 'localhost'),
@@ -151,15 +125,18 @@ def main():
supports_check_mode=True
)
- server = module.params["server"]
- port = module.params["port"]
- topic = module.params["topic"]
- payload = module.params["payload"]
- client_id = module.params["client_id"]
- qos = module.params["qos"]
- retain = module.params["retain"]
- username = module.params["username"]
- password = module.params["password"]
+ if not HAS_PAHOMQTT:
+ module.fail_json(msg="Paho MQTT is not installed")
+
+ server = module.params.get("server", 'localhost')
+ port = module.params.get("port", 1883)
+ topic = module.params.get("topic")
+ payload = module.params.get("payload")
+ client_id = module.params.get("client_id", '')
+ qos = int(module.params.get("qos", 0))
+ retain = module.params.get("retain")
+ username = module.params.get("username", None)
+ password = module.params.get("password", None)
if client_id is None:
client_id = "%s_%s" % (socket.getfqdn(), os.getpid())
@@ -167,9 +144,18 @@ def main():
if payload and payload == 'None':
payload = None
+ auth=None
+ if username is not None:
+ auth = { 'username' : username, 'password' : password }
+
try:
- publish(module, topic, payload, server, port, qos, client_id, retain,
- username, password)
+ rc = mqtt.single(topic, payload,
+ qos=qos,
+ retain=retain,
+ client_id=client_id,
+ hostname=server,
+ port=port,
+ auth=auth)
except Exception, e:
module.fail_json(msg="unable to publish to MQTT broker %s" % (e))
diff --git a/notification/nexmo b/notification/nexmo
new file mode 100644
index 00000000000..d4898c40cdb
--- /dev/null
+++ b/notification/nexmo
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Matt Martz
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+DOCUMENTATION = """
+module: nexmo
+short_description: Send a SMS via nexmo
+description:
+ - Send a SMS message via nexmo
+version_added: 1.6
+author: Matt Martz
+options:
+ api_key:
+ description:
+ - Nexmo API Key
+ required: true
+ api_secret:
+ description:
+ - Nexmo API Secret
+ required: true
+ src:
+ description:
+ - Nexmo Number to send from
+ required: true
+ dest:
+ description:
+ - Phone number(s) to send SMS message to
+ required: true
+ msg:
+ description:
+ - Message to text to send. Messages longer than 160 characters will be
+ split into multiple messages
+ required: true
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices:
+ - 'yes'
+ - 'no'
+"""
+
+EXAMPLES = """
+- name: Send notification message via Nexmo
+ local_action:
+ module: nexmo
+ api_key: 640c8a53
+ api_secret: 0ce239a6
+ src: 12345678901
+ dest:
+ - 10987654321
+ - 16789012345
+ msg: "{{ inventory_hostname }} completed"
+"""
+
+
+NEXMO_API = 'https://rest.nexmo.com/sms/json'
+
+
+def send_msg(module):
+ failed = list()
+ responses = dict()
+ msg = {
+ 'api_key': module.params.get('api_key'),
+ 'api_secret': module.params.get('api_secret'),
+ 'from': module.params.get('src'),
+ 'text': module.params.get('msg')
+ }
+ for number in module.params.get('dest'):
+ msg['to'] = number
+ url = "%s?%s" % (NEXMO_API, urllib.urlencode(msg))
+
+ headers = dict(Accept='application/json')
+ response, info = fetch_url(module, url, headers=headers)
+ if info['status'] != 200:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+
+ try:
+ responses[number] = json.load(response)
+ except:
+ failed.append(number)
+ responses[number] = dict(failed=True)
+ else:
+ for message in responses[number]['messages']:
+ if int(message['status']) != 0:
+ failed.append(number)
+ responses[number] = dict(failed=True, **responses[number])
+
+ if failed:
+ msg = 'One or messages failed to send'
+ else:
+ msg = ''
+
+ module.exit_json(failed=bool(failed), msg=msg, changed=False,
+ responses=responses)
+
+
+def main():
+ argument_spec = url_argument_spec()
+ argument_spec.update(
+ dict(
+ api_key=dict(required=True, no_log=True),
+ api_secret=dict(required=True, no_log=True),
+ src=dict(required=True, type='int'),
+ dest=dict(required=True, type='list'),
+ msg=dict(required=True),
+ ),
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec
+ )
+
+ send_msg(module)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
+main()
diff --git a/notification/osx_say b/notification/osx_say
index de5d1917c5f..39e3da88c19 100644
--- a/notification/osx_say
+++ b/notification/osx_say
@@ -44,8 +44,6 @@ EXAMPLES = '''
- local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox
'''
-import subprocess
-
DEFAULT_VOICE='Trinoids'
def say(module, msg, voice):
diff --git a/notification/slack b/notification/slack
new file mode 100644
index 00000000000..176d6b338fb
--- /dev/null
+++ b/notification/slack
@@ -0,0 +1,173 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Ramon de la Fuente
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+DOCUMENTATION = """
+module: slack
+short_description: Send Slack notifications
+description:
+ - The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration
+version_added: 1.6
+author: Ramon de la Fuente
+options:
+ domain:
+ description:
+ - Slack (sub)domain for your environment without protocol.
+ (i.e. C(future500.slack.com))
+ required: true
+ token:
+ description:
+ - Slack integration token
+ required: true
+ msg:
+ description:
+ - Message to send.
+ required: true
+ channel:
+ description:
+ - Channel to send the message to. If absent, the message goes to the channel selected for the I(token).
+ required: false
+ username:
+ description:
+ - This is the sender of the message.
+ required: false
+ default: ansible
+ icon_url:
+ description:
+ - Url for the message sender's icon (default C(http://www.ansible.com/favicon.ico))
+ required: false
+ icon_emoji:
+ description:
+ - Emoji for the message sender. See Slack documentation for options.
+ (if I(icon_emoji) is set, I(icon_url) will not be used)
+ required: false
+ link_names:
+ description:
+ - Automatically create links for channels and usernames in I(msg).
+ required: false
+ default: 1
+ choices:
+ - 1
+ - 0
+ parse:
+ description:
+ - Setting for the message parser at Slack
+ required: false
+ choices:
+ - 'full'
+ - 'none'
+ validate_certs:
+ description:
+ - If C(no), SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices:
+ - 'yes'
+ - 'no'
+"""
+
+EXAMPLES = """
+- name: Send notification message via Slack
+ local_action:
+ module: slack
+ domain: future500.slack.com
+ token: thetokengeneratedbyslack
+ msg: "{{ inventory_hostname }} completed"
+
+- name: Send notification message via Slack all options
+ local_action:
+ module: slack
+ domain: future500.slack.com
+ token: thetokengeneratedbyslack
+ msg: "{{ inventory_hostname }} completed"
+ channel: "#ansible"
+ username: "Ansible on {{ inventory_hostname }}"
+ icon_url: "http://www.example.com/some-image-file.png"
+ link_names: 0
+ parse: 'none'
+
+"""
+
+
+SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s'
+
+def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse):
+ payload = dict(text=text)
+
+ if channel is not None:
+ payload['channel'] = channel if (channel[0] == '#') else '#'+channel
+ if username is not None:
+ payload['username'] = username
+ if icon_emoji is not None:
+ payload['icon_emoji'] = icon_emoji
+ else:
+ payload['icon_url'] = icon_url
+ if link_names is not None:
+ payload['link_names'] = link_names
+ if parse is not None:
+ payload['parse'] = parse
+
+ payload="payload=" + module.jsonify(payload)
+ return payload
+
+def do_notify_slack(module, domain, token, payload):
+ slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, token)
+
+ response, info = fetch_url(module, slack_incoming_webhook, data=payload)
+ if info['status'] != 200:
+ obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, '[obscured]')
+ module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg']))
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ domain = dict(type='str', required=True),
+ token = dict(type='str', required=True),
+ msg = dict(type='str', required=True),
+ channel = dict(type='str', default=None),
+ username = dict(type='str', default='Ansible'),
+ icon_url = dict(type='str', default='http://www.ansible.com/favicon.ico'),
+ icon_emoji = dict(type='str', default=None),
+ link_names = dict(type='int', default=1, choices=[0,1]),
+ parse = dict(type='str', default=None, choices=['none', 'full']),
+
+ validate_certs = dict(default='yes', type='bool'),
+ )
+ )
+
+ domain = module.params['domain']
+ token = module.params['token']
+ text = module.params['msg']
+ channel = module.params['channel']
+ username = module.params['username']
+ icon_url = module.params['icon_url']
+ icon_emoji = module.params['icon_emoji']
+ link_names = module.params['link_names']
+ parse = module.params['parse']
+
+ payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse)
+ do_notify_slack(module, domain, token, payload)
+
+ module.exit_json(msg="OK")
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+main()
\ No newline at end of file
diff --git a/notification/sns b/notification/sns
new file mode 100644
index 00000000000..f2ed178554e
--- /dev/null
+++ b/notification/sns
@@ -0,0 +1,190 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Michael J. Schultz
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+DOCUMENTATION = """
+module: sns
+short_description: Send Amazon Simple Notification Service (SNS) messages
+description:
+ - The M(sns) module sends notifications to a topic on your Amazon SNS account
+version_added: 1.6
+author: Michael J. Schultz
+options:
+ msg:
+ description:
+ - Default message to send.
+ required: true
+ aliases: [ "default" ]
+ subject:
+ description:
+ - Subject line for email delivery.
+ required: false
+ topic:
+ description:
+ - The topic you want to publish to.
+ required: true
+ email:
+ description:
+ - Message to send to email-only subscription
+ required: false
+ sqs:
+ description:
+ - Message to send to SQS-only subscription
+ required: false
+ sms:
+ description:
+ - Message to send to SMS-only subscription
+ required: false
+ http:
+ description:
+ - Message to send to HTTP-only subscription
+ required: false
+ https:
+ description:
+ - Message to send to HTTPS-only subscription
+ required: false
+ aws_secret_key:
+ description:
+ - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
+ required: false
+ default: None
+ aliases: ['ec2_secret_key', 'secret_key']
+ aws_access_key:
+ description:
+ - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
+ required: false
+ default: None
+ aliases: ['ec2_access_key', 'access_key']
+ region:
+ description:
+ - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
+ required: false
+ aliases: ['aws_region', 'ec2_region']
+
+requirements: [ "boto" ]
+author: Michael J. Schultz
+"""
+
+EXAMPLES = """
+- name: Send default notification message via SNS
+ local_action:
+ module: sns
+ msg: "{{ inventory_hostname }} has completed the play."
+ subject: "Deploy complete!"
+ topic: "deploy"
+
+- name: Send notification messages via SNS with short message for SMS
+ local_action:
+ module: sns
+ msg: "{{ inventory_hostname }} has completed the play."
+ sms: "deployed!"
+ subject: "Deploy complete!"
+ topic: "deploy"
+"""
+
+import sys
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
+try:
+ import boto
+ import boto.sns
+except ImportError:
+ print "failed=True msg='boto required for this module'"
+ sys.exit(1)
+
+
+def arn_topic_lookup(connection, short_topic):
+ response = connection.get_all_topics()
+ result = response[u'ListTopicsResponse'][u'ListTopicsResult']
+ # topic names cannot have colons, so this captures the full topic name
+ lookup_topic = ':{}'.format(short_topic)
+ for topic in result[u'Topics']:
+ if topic[u'TopicArn'].endswith(lookup_topic):
+ return topic[u'TopicArn']
+ return None
+
+
+def main():
+ argument_spec = ec2_argument_spec()
+ argument_spec.update(
+ dict(
+ msg=dict(type='str', required=True, aliases=['default']),
+ subject=dict(type='str', default=None),
+ topic=dict(type='str', required=True),
+ email=dict(type='str', default=None),
+ sqs=dict(type='str', default=None),
+ sms=dict(type='str', default=None),
+ http=dict(type='str', default=None),
+ https=dict(type='str', default=None),
+ )
+ )
+
+ module = AnsibleModule(argument_spec=argument_spec)
+
+ msg = module.params['msg']
+ subject = module.params['subject']
+ topic = module.params['topic']
+ email = module.params['email']
+ sqs = module.params['sqs']
+ sms = module.params['sms']
+ http = module.params['http']
+ https = module.params['https']
+
+ region, ec2_url, aws_connect_params = get_aws_connection_info(module)
+ if not region:
+ module.fail_json(msg="region must be specified")
+ try:
+ connection = connect_to_aws(boto.sns, region, **aws_connect_params)
+ except boto.exception.NoAuthHandlerFound, e:
+ module.fail_json(msg=str(e))
+
+ # .publish() takes full ARN topic id, but I'm lazy and type shortnames
+ # so do a lookup (topics cannot contain ':', so thats the decider)
+ if ':' in topic:
+ arn_topic = topic
+ else:
+ arn_topic = arn_topic_lookup(connection, topic)
+
+ if not arn_topic:
+ module.fail_json(msg='Could not find topic: {}'.format(topic))
+
+ dict_msg = {'default': msg}
+ if email:
+ dict_msg.update(email=email)
+ if sqs:
+ dict_msg.update(sqs=sqs)
+ if sms:
+ dict_msg.update(sms=sms)
+ if http:
+ dict_msg.update(http=http)
+ if https:
+ dict_msg.update(https=https)
+
+ json_msg = json.dumps(dict_msg)
+ try:
+ connection.publish(topic=arn_topic, subject=subject,
+ message_structure='json', message=json_msg)
+ except boto.exception.BotoServerError, e:
+ module.fail_json(msg=str(e))
+
+ module.exit_json(msg="OK")
+
+main()
diff --git a/notification/twilio b/notification/twilio
new file mode 100644
index 00000000000..8969c28aa50
--- /dev/null
+++ b/notification/twilio
@@ -0,0 +1,135 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Matt Makai
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+DOCUMENTATION = '''
+---
+version_added: "1.6"
+module: twilio
+short_description: Sends a text message to a mobile phone through Twilio.
+description:
+ - Sends a text message to a phone number through an the Twilio SMS service.
+notes:
+ - Like the other notification modules, this one requires an external
+ dependency to work. In this case, you'll need a Twilio account with
+ a purchased or verified phone number to send the text message.
+options:
+ account_sid:
+ description:
+ user's account id for Twilio found on the account page
+ required: true
+ auth_token:
+ description: user's authentication token for Twilio found on the account page
+ required: true
+ msg:
+ description:
+ the body of the text message
+ required: true
+ to_number:
+ description:
+ what phone number to send the text message to, format +15551112222
+ required: true
+ from_number:
+ description:
+ what phone number to send the text message from, format +15551112222
+ required: true
+
+requirements: [ urllib, urllib2 ]
+author: Matt Makai
+'''
+
+EXAMPLES = '''
+# send a text message from the local server about the build status to (555) 303 5681
+# note: you have to have purchased the 'from_number' on your Twilio account
+- local_action: text msg="All servers with webserver role are now configured."
+ account_sid={{ twilio_account_sid }}
+ auth_token={{ twilio_auth_token }}
+ from_number=+15552014545 to_number=+15553035681
+
+# send a text message from a server to (555) 111 3232
+# note: you have to have purchased the 'from_number' on your Twilio account
+- text: msg="This server's configuration is now complete."
+ account_sid={{ twilio_account_sid }}
+ auth_token={{ twilio_auth_token }}
+ from_number=+15553258899 to_number=+15551113232
+
+'''
+
+# =======================================
+# text module support methods
+#
+try:
+ import urllib, urllib2
+except ImportError:
+ module.fail_json(msg="urllib and urllib2 are required")
+
+import base64
+
+
+def post_text(module, account_sid, auth_token, msg, from_number, to_number):
+ URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \
+ % (account_sid,)
+ AGENT = "Ansible/1.5"
+
+ data = {'From':from_number, 'To':to_number, 'Body':msg}
+ encoded_data = urllib.urlencode(data)
+ request = urllib2.Request(URI)
+ base64string = base64.encodestring('%s:%s' % \
+ (account_sid, auth_token)).replace('\n', '')
+ request.add_header('User-Agent', AGENT)
+ request.add_header('Content-type', 'application/x-www-form-urlencoded')
+ request.add_header('Accept', 'application/ansible')
+ request.add_header('Authorization', 'Basic %s' % base64string)
+ return urllib2.urlopen(request, encoded_data)
+
+
+# =======================================
+# Main
+#
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ account_sid=dict(required=True),
+ auth_token=dict(required=True),
+ msg=dict(required=True),
+ from_number=dict(required=True),
+ to_number=dict(required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ account_sid = module.params['account_sid']
+ auth_token = module.params['auth_token']
+ msg = module.params['msg']
+ from_number = module.params['from_number']
+ to_number = module.params['to_number']
+
+ try:
+ response = post_text(module, account_sid, auth_token, msg,
+ from_number, to_number)
+ except Exception, e:
+ module.fail_json(msg="unable to send text message to %s" % to_number)
+
+ module.exit_json(msg=msg, changed=False)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/notification/typetalk b/notification/typetalk
new file mode 100644
index 00000000000..b987acbe837
--- /dev/null
+++ b/notification/typetalk
@@ -0,0 +1,116 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+DOCUMENTATION = '''
+---
+module: typetalk
+version_added: "1.6"
+short_description: Send a message to typetalk
+description:
+ - Send a message to typetalk using typetalk API ( http://developers.typetalk.in/ )
+options:
+ client_id:
+ description:
+ - OAuth2 client ID
+ required: true
+ client_secret:
+ description:
+ - OAuth2 client secret
+ required: true
+ topic:
+ description:
+ - topic id to post message
+ required: true
+ msg:
+ description:
+ - message body
+ required: true
+requirements: [ urllib, urllib2, json ]
+author: Takashi Someda
+'''
+
+EXAMPLES = '''
+- typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed"
+'''
+
+try:
+ import urllib
+except ImportError:
+ urllib = None
+
+try:
+ import urllib2
+except ImportError:
+ urllib2 = None
+
+try:
+ import json
+except ImportError:
+ json = None
+
+
+def do_request(url, params, headers={}):
+ data = urllib.urlencode(params)
+ headers = dict(headers, **{
+ 'User-Agent': 'Ansible/typetalk module',
+ })
+ return urllib2.urlopen(urllib2.Request(url, data, headers))
+
+
+def get_access_token(client_id, client_secret):
+ params = {
+ 'client_id': client_id,
+ 'client_secret': client_secret,
+ 'grant_type': 'client_credentials',
+ 'scope': 'topic.post'
+ }
+ res = do_request('https://typetalk.in/oauth2/access_token', params)
+ return json.load(res)['access_token']
+
+
+def send_message(client_id, client_secret, topic, msg):
+ """
+ send message to typetalk
+ """
+ try:
+ access_token = get_access_token(client_id, client_secret)
+ url = 'https://typetalk.in/api/v1/topics/%d' % topic
+ headers = {
+ 'Authorization': 'Bearer %s' % access_token,
+ }
+ do_request(url, {'message': msg}, headers)
+ return True, {'access_token': access_token}
+ except urllib2.HTTPError, e:
+ return False, e
+
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ client_id=dict(required=True),
+ client_secret=dict(required=True),
+ topic=dict(required=True, type='int'),
+ msg=dict(required=True),
+ ),
+ supports_check_mode=False
+ )
+
+ if not (urllib and urllib2 and json):
+ module.fail_json(msg="urllib, urllib2 and json modules are required")
+
+ client_id = module.params["client_id"]
+ client_secret = module.params["client_secret"]
+ topic = module.params["topic"]
+ msg = module.params["msg"]
+
+ res, error = send_message(client_id, client_secret, topic, msg)
+ if not res:
+ module.fail_json(msg='fail to send message with response code %s' % error.code)
+
+ module.exit_json(changed=True, topic=topic, msg=msg)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/packaging/apt b/packaging/apt
old mode 100644
new mode 100755
index f143c8f7b73..6bd19177f2d
--- a/packaging/apt
+++ b/packaging/apt
@@ -29,18 +29,18 @@ version_added: "0.0.2"
options:
pkg:
description:
- - A package name or package specifier with version, like C(foo) or C(foo=1.0). Shell like wildcards (fnmatch) like apt* are also supported.
+ - A package name, like C(foo), or package specifier with version, like C(foo=1.0). Wildcards (fnmatch) like apt* are also supported.
required: false
default: null
state:
description:
- - Indicates the desired package state
+ - Indicates the desired package state. C(latest) ensures that the latest version is installed.
required: false
default: present
choices: [ "latest", "absent", "present" ]
update_cache:
description:
- - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step
+ - Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
required: false
default: no
choices: [ "yes", "no" ]
@@ -62,7 +62,7 @@ options:
default: null
install_recommends:
description:
- - Corresponds to the C(--no-install-recommends) option for I(apt), default behavior works as apt's default behavior, C(no) does not install recommended packages. Suggested packages are never installed.
+ - Corresponds to the C(--no-install-recommends) option for I(apt). Default behavior (C(yes)) replicates apt's default behavior; C(no) does not install recommended packages. Suggested packages are never installed.
required: false
default: yes
choices: [ "yes", "no" ]
@@ -88,6 +88,11 @@ options:
- Options should be supplied as comma separated list
required: false
default: 'force-confdef,force-confold'
+ deb:
+ description:
+ - Path to a local .deb package file to install.
+ required: false
+ version_added: "1.6"
requirements: [ python-apt, aptitude ]
author: Matthew Williams
notes:
@@ -125,6 +130,9 @@ EXAMPLES = '''
# Pass options to dpkg on run
- apt: upgrade=dist update_cache=yes dpkg_options='force-confold,force-confdef'
+
+# Install a .deb package
+- apt: deb=/tmp/mypackage.deb
'''
@@ -138,7 +146,11 @@ import datetime
import fnmatch
# APT related constants
-APT_ENVVARS = "DEBIAN_FRONTEND=noninteractive DEBIAN_PRIORITY=critical"
+APT_ENV_VARS = dict(
+ DEBIAN_FRONTEND = 'noninteractive',
+ DEBIAN_PRIORITY = 'critical'
+)
+
DPKG_OPTIONS = 'force-confdef,force-confold'
APT_GET_ZERO = "0 upgraded, 0 newly installed"
APTITUDE_ZERO = "0 packages upgraded, 0 newly installed"
@@ -148,8 +160,9 @@ APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
HAS_PYTHON_APT = True
try:
import apt
+ import apt.debfile
import apt_pkg
-except:
+except ImportError:
HAS_PYTHON_APT = False
def package_split(pkgspec):
@@ -182,7 +195,7 @@ def package_status(m, pkgname, version, cache, state):
has_files = False # older python-apt cannot be used to determine non-purged
try:
- package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED
+ package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED
except AttributeError: # python-apt 0.7.X has very weak low-level object
try:
# might not be necessary as python-apt post-0.7.X should have current_state property
@@ -260,7 +273,10 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None,
else:
check_arg = ''
- cmd = "%s %s -y %s %s %s install %s" % (APT_ENVVARS, APT_GET_CMD, dpkg_options, force_yes, check_arg, packages)
+ for (k,v) in APT_ENV_VARS.iteritems():
+ os.environ[k] = v
+
+ cmd = "%s -y %s %s %s install %s" % (APT_GET_CMD, dpkg_options, force_yes, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
@@ -269,12 +285,57 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None,
rc, out, err = m.run_command(cmd)
if rc:
- m.fail_json(msg="'apt-get install %s' failed: %s" % (packages, err), stdout=out, stderr=err)
+ return (False, dict(msg="'apt-get install %s' failed: %s" % (packages, err), stdout=out, stderr=err))
else:
- m.exit_json(changed=True, stdout=out, stderr=err)
+ return (True, dict(changed=True, stdout=out, stderr=err))
else:
+ return (True, dict(changed=False))
+
+def install_deb(m, debfile, cache, force, install_recommends, dpkg_options):
+ changed=False
+ pkg = apt.debfile.DebPackage(debfile)
+
+ # Check if it's already installed
+ if pkg.compare_to_version_in_cache() == pkg.VERSION_SAME:
m.exit_json(changed=False)
+ # Check if package is installable
+ if not pkg.check():
+ m.fail_json(msg=pkg._failure_string)
+
+ (success, retvals) = install(m=m, pkgspec=pkg.missing_deps,
+ cache=cache,
+ install_recommends=install_recommends,
+ dpkg_options=expand_dpkg_options(dpkg_options))
+ if not success:
+ m.fail_json(**retvals)
+ changed = retvals['changed']
+
+
+ options = ' '.join(["--%s"% x for x in dpkg_options.split(",")])
+
+ if m.check_mode:
+ options += " --simulate"
+ if force:
+ options += " --force-yes"
+
+
+ cmd = "dpkg %s -i %s" % (options, debfile)
+ rc, out, err = m.run_command(cmd)
+
+ if "stdout" in retvals:
+ stdout = retvals["stdout"] + out
+ else:
+ stdout = out
+ if "stderr" in retvals:
+ stderr = retvals["stderr"] + err
+ else:
+ stderr = err
+ if rc == 0:
+ m.exit_json(changed=True, stdout=stdout, stderr=stderr)
+ else:
+ m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
+
def remove(m, pkgspec, cache, purge=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
packages = ""
@@ -292,7 +353,11 @@ def remove(m, pkgspec, cache, purge=False,
purge = '--purge'
else:
purge = ''
- cmd = "%s %s -q -y %s %s remove %s" % (APT_ENVVARS, APT_GET_CMD, dpkg_options, purge, packages)
+
+ for (k,v) in APT_ENV_VARS.iteritems():
+ os.environ[k] = v
+
+ cmd = "%s -q -y %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, packages)
if m.check_mode:
m.exit_json(changed=True)
@@ -332,7 +397,11 @@ def upgrade(m, mode="yes", force=False,
force_yes = ''
apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
- cmd = '%s %s -y %s %s %s %s' % (APT_ENVVARS, apt_cmd_path, dpkg_options,
+
+ for (k,v) in APT_ENV_VARS.iteritems():
+ os.environ[k] = v
+
+ cmd = '%s -y %s %s %s %s' % (apt_cmd_path, dpkg_options,
force_yes, check_arg, upgrade_command)
rc, out, err = m.run_command(cmd)
if rc:
@@ -349,20 +418,21 @@ def main():
cache_valid_time = dict(type='int'),
purge = dict(default=False, type='bool'),
package = dict(default=None, aliases=['pkg', 'name']),
+ deb = dict(default=None),
default_release = dict(default=None, aliases=['default-release']),
install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'),
force = dict(default='no', type='bool'),
upgrade = dict(choices=['yes', 'safe', 'full', 'dist']),
dpkg_options = dict(default=DPKG_OPTIONS)
),
- mutually_exclusive = [['package', 'upgrade']],
- required_one_of = [['package', 'upgrade', 'update_cache']],
+ mutually_exclusive = [['package', 'upgrade', 'deb']],
+ required_one_of = [['package', 'upgrade', 'update_cache', 'deb']],
supports_check_mode = True
)
if not HAS_PYTHON_APT:
try:
- module.run_command('apt-get update && apt-get install python-apt -y -q')
+ module.run_command('apt-get update && apt-get install python-apt -y -q', use_unsafe_shell=True)
global apt, apt_pkg
import apt
import apt_pkg
@@ -421,7 +491,7 @@ def main():
if cache_valid is not True:
cache.update()
cache.open(progress=None)
- if not p['package'] and not p['upgrade']:
+ if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(changed=False)
force_yes = p['force']
@@ -429,6 +499,13 @@ def main():
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes, dpkg_options)
+ if p['deb']:
+ if p['state'] != "installed":
+ module.fail_json(msg="deb only supports state=installed")
+ install_deb(module, p['deb'], cache,
+ install_recommends=install_recommends,
+ force=force_yes, dpkg_options=p['dpkg_options'])
+
packages = p['package'].split(',')
latest = p['state'] == 'latest'
for package in packages:
@@ -438,14 +515,24 @@ def main():
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if p['state'] == 'latest':
- install(module, packages, cache, upgrade=True,
+ result = install(module, packages, cache, upgrade=True,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes, dpkg_options=dpkg_options)
+ (success, retvals) = result
+ if success:
+ module.exit_json(**retvals)
+ else:
+ module.fail_json(**retvals)
elif p['state'] in [ 'installed', 'present' ]:
- install(module, packages, cache, default_release=p['default_release'],
+ result = install(module, packages, cache, default_release=p['default_release'],
install_recommends=install_recommends,force=force_yes,
dpkg_options=dpkg_options)
+ (success, retvals) = result
+ if success:
+ module.exit_json(**retvals)
+ else:
+ module.fail_json(**retvals)
elif p['state'] in [ 'removed', 'absent' ]:
remove(module, packages, cache, p['purge'], dpkg_options)
diff --git a/packaging/apt_key b/packaging/apt_key
index eee86337020..2308d34329f 100644
--- a/packaging/apt_key
+++ b/packaging/apt_key
@@ -58,12 +58,26 @@ options:
default: none
description:
- url to retrieve key from.
+ keyserver:
+ version_added: "1.6"
+ required: false
+ default: none
+ description:
+ - keyserver to retrieve key from.
state:
required: false
choices: [ absent, present ]
default: present
description:
- used to specify if key is being added or revoked
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target url will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
'''
EXAMPLES = '''
@@ -88,7 +102,6 @@ EXAMPLES = '''
# FIXME: standardize into module_common
-from urllib2 import urlopen, URLError
from traceback import format_exc
from re import compile as re_compile
# FIXME: standardize into module_common
@@ -105,7 +118,7 @@ REQUIRED_EXECUTABLES=['gpg', 'grep', 'apt-key']
def check_missing_binaries(module):
missing = [e for e in REQUIRED_EXECUTABLES if not find_executable(e)]
if len(missing):
- module.fail_json(msg="binaries are missing", names=all)
+ module.fail_json(msg="binaries are missing", names=missing)
def all_keys(module, keyring):
if keyring:
@@ -124,7 +137,7 @@ def all_keys(module, keyring):
return results
def key_present(module, key_id):
- (rc, out, err) = module.run_command("apt-key list | 2>&1 grep -i -q %s" % key_id)
+ (rc, out, err) = module.run_command("apt-key list | 2>&1 grep -i -q %s" % pipes.quote(key_id), use_unsafe_shell=True)
return rc == 0
def download_key(module, url):
@@ -133,14 +146,15 @@ def download_key(module, url):
if url is None:
module.fail_json(msg="needed a URL but was not specified")
try:
- connection = urlopen(url)
- if connection is None:
- module.fail_json("error connecting to download key from url")
- data = connection.read()
- return data
+ rsp, info = fetch_url(module, url)
+ return rsp.read()
except Exception:
- module.fail_json(msg="error getting key id from url", traceback=format_exc())
+ module.fail_json(msg="error getting key id from url: %s" % url, traceback=format_exc())
+def import_key(module, keyserver, key_id):
+ cmd = "apt-key adv --keyserver %s --recv %s" % (keyserver, key_id)
+ (rc, out, err) = module.run_command(cmd, check_rc=True)
+ return True
def add_key(module, keyfile, keyring, data=None):
if data is not None:
@@ -175,6 +189,8 @@ def main():
file=dict(required=False),
key=dict(required=False),
keyring=dict(required=False),
+ validate_certs=dict(default='yes', type='bool'),
+ keyserver=dict(required=False),
state=dict(required=False, choices=['present', 'absent'], default='present')
),
supports_check_mode=True
@@ -186,6 +202,7 @@ def main():
filename = module.params['file']
keyring = module.params['keyring']
state = module.params['state']
+ keyserver = module.params['keyserver']
changed = False
if key_id:
@@ -194,7 +211,7 @@ def main():
if key_id.startswith('0x'):
key_id = key_id[2:]
except ValueError:
- module.fail_json("Invalid key_id")
+ module.fail_json(msg="Invalid key_id", id=key_id)
# FIXME: I think we have a common facility for this, if not, want
check_missing_binaries(module)
@@ -206,7 +223,7 @@ def main():
if key_id and key_id in keys:
module.exit_json(changed=False)
else:
- if not filename and not data:
+ if not filename and not data and not keyserver:
data = download_key(module, url)
if key_id and key_id in keys:
module.exit_json(changed=False)
@@ -215,6 +232,8 @@ def main():
module.exit_json(changed=True)
if filename:
add_key(module, filename, keyring)
+ elif keyserver:
+ import_key(module, keyserver, key_id)
else:
add_key(module, "-", keyring, data)
changed=False
@@ -240,4 +259,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
main()
diff --git a/packaging/apt_repository b/packaging/apt_repository
index 4587d90ba78..a0d3b89e739 100644
--- a/packaging/apt_repository
+++ b/packaging/apt_repository
@@ -28,7 +28,7 @@ short_description: Add and remove APT repositores
description:
- Add or remove an APT repositories in Ubuntu and Debian.
notes:
- - This module works on Debian and Ubuntu and requires C(python-apt) and C(python-pycurl) packages.
+ - This module works on Debian and Ubuntu and requires C(python-apt).
- This module supports Debian Squeeze (version 6) as well as its successors.
- This module treats Debian and Ubuntu distributions separately. So PPA could be installed only on Ubuntu machines.
options:
@@ -43,15 +43,21 @@ options:
default: "present"
description:
- A source string state.
+ mode:
+ required: false
+ default: 0644
+ description:
+ - The octal mode for newly created files in sources.list.d
+ version_added: "1.6"
update_cache:
description:
- - Run the equivalent of C(apt-get update) if has changed.
+ - Run the equivalent of C(apt-get update) when a change occurs. Cache updates are run after making changes.
required: false
default: "yes"
choices: [ "yes", "no" ]
author: Alexander Saltanov
version_added: "0.7"
-requirements: [ python-apt, python-pycurl ]
+requirements: [ python-apt ]
'''
EXAMPLES = '''
@@ -70,10 +76,6 @@ apt_repository: repo='ppa:nginx/stable'
'''
import glob
-try:
- import json
-except ImportError:
- import simplejson as json
import os
import re
import tempfile
@@ -87,22 +89,19 @@ try:
except ImportError:
HAVE_PYTHON_APT = False
-try:
- import pycurl
- HAVE_PYCURL = True
-except ImportError:
- HAVE_PYCURL = False
VALID_SOURCE_TYPES = ('deb', 'deb-src')
+def install_python_apt(module):
-class CurlCallback:
- def __init__(self):
- self.contents = ''
-
- def body_callback(self, buf):
- self.contents = self.contents + buf
-
+ if not module.check_mode:
+ apt_get_path = module.get_bin_path('apt-get')
+ if apt_get_path:
+ rc, so, se = module.run_command('%s update && %s install python-apt -y -q' % (apt_get_path, apt_get_path))
+ if rc == 0:
+ global apt, apt_pkg
+ import apt
+ import apt_pkg
class InvalidSource(Exception):
pass
@@ -140,12 +139,22 @@ class SourcesList(object):
def _suggest_filename(self, line):
def _cleanup_filename(s):
return '_'.join(re.sub('[^a-zA-Z0-9]', ' ', s).split())
+ def _strip_username_password(s):
+ if '@' in s:
+ s = s.split('@', 1)
+ s = s[-1]
+ return s
# Drop options and protocols.
line = re.sub('\[[^\]]+\]', '', line)
line = re.sub('\w+://', '', line)
+ # split line into valid keywords
parts = [part for part in line.split() if part not in VALID_SOURCE_TYPES]
+
+ # Drop usernames and passwords
+ parts[0] = _strip_username_password(parts[0])
+
return '%s.list' % _cleanup_filename(' '.join(parts[:1]))
def _parse(self, line, raise_if_invalid_or_disabled=False):
@@ -214,7 +223,10 @@ class SourcesList(object):
if sources:
d, fn = os.path.split(filename)
fd, tmp_path = tempfile.mkstemp(prefix=".%s-" % fn, dir=d)
- os.chmod(os.path.join(fd, tmp_path), 0644)
+
+ # allow the user to override the default mode
+ this_mode = module.params['mode']
+ module.set_mode_if_different(tmp_path, this_mode, False)
f = os.fdopen(fd, 'w')
for n, valid, enabled, source, comment in sources:
@@ -290,29 +302,19 @@ class SourcesList(object):
class UbuntuSourcesList(SourcesList):
- LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s'
+ LP_API = 'https://launchpad.net/api/1.0/~%s/+archive/%s'
- def __init__(self, add_ppa_signing_keys_callback=None):
+ def __init__(self, module, add_ppa_signing_keys_callback=None):
+ self.module = module
self.add_ppa_signing_keys_callback = add_ppa_signing_keys_callback
super(UbuntuSourcesList, self).__init__()
def _get_ppa_info(self, owner_name, ppa_name):
- # we can not use urllib2 here as it does not do cert verification
lp_api = self.LP_API % (owner_name, ppa_name)
- return self._get_ppa_info_curl(lp_api)
-
- def _get_ppa_info_curl(self, lp_api):
- callback = CurlCallback()
- curl = pycurl.Curl()
- curl.setopt(pycurl.SSL_VERIFYPEER, 1)
- curl.setopt(pycurl.SSL_VERIFYHOST, 2)
- curl.setopt(pycurl.WRITEFUNCTION, callback.body_callback)
- curl.setopt(pycurl.URL, str(lp_api))
- curl.setopt(pycurl.HTTPHEADER, ["Accept: application/json"])
- curl.perform()
- curl.close()
- lp_page = callback.contents
- return json.loads(lp_page)
+
+ headers = dict(Accept='application/json')
+ response, info = fetch_url(self.module, lp_api, headers=headers)
+ return json.load(response)
def _expand_ppa(self, path):
ppa = path.split(':')[1]
@@ -352,7 +354,10 @@ def get_add_ppa_signing_key_callback(module):
def _run_command(command):
module.run_command(command, check_rc=True)
- return _run_command if not module.check_mode else None
+ if module.check_mode:
+ return None
+ else:
+ return _run_command
def main():
@@ -360,16 +365,17 @@ def main():
argument_spec=dict(
repo=dict(required=True),
state=dict(choices=['present', 'absent'], default='present'),
+ mode=dict(required=False, default=0644),
update_cache = dict(aliases=['update-cache'], type='bool', default='yes'),
+ # this should not be needed, but exists as a failsafe
+ install_python_apt=dict(required=False, default="yes", type='bool'),
),
supports_check_mode=True,
)
- if not HAVE_PYTHON_APT:
- module.fail_json(msg='Could not import python modules: apt_pkg. Please install python-apt package.')
-
- if not HAVE_PYCURL:
- module.fail_json(msg='Could not import python modules: pycurl. Please install python-pycurl package.')
+ params = module.params
+ if params['install_python_apt'] and not HAVE_PYTHON_APT and not module.check_mode:
+ install_python_apt(module)
repo = module.params['repo']
state = module.params['state']
@@ -377,7 +383,8 @@ def main():
sourceslist = None
if isinstance(distro, aptsources.distro.UbuntuDistribution):
- sourceslist = UbuntuSourcesList(add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module))
+ sourceslist = UbuntuSourcesList(module,
+ add_ppa_signing_keys_callback=get_add_ppa_signing_key_callback(module))
elif isinstance(distro, aptsources.distro.DebianDistribution) or \
isinstance(distro, aptsources.distro.Distribution):
sourceslist = SourcesList()
@@ -410,5 +417,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
main()
diff --git a/packaging/apt_rpm b/packaging/apt_rpm
new file mode 100755
index 00000000000..0eca3132224
--- /dev/null
+++ b/packaging/apt_rpm
@@ -0,0 +1,168 @@
+#!/usr/bin/python -tt
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Evgenii Terechkov
+# Written by Evgenii Terechkov
+# Based on urpmi module written by Philippe Makowski
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see .
+
+
+DOCUMENTATION = '''
+---
+module: apt_rpm
+short_description: apt_rpm package manager
+description:
+ - Manages packages with I(apt-rpm). Both low-level (I(rpm)) and high-level (I(apt-get)) package manager binaries required.
+version_added: "1.5"
+options:
+ pkg:
+ description:
+ - name of package to install, upgrade or remove.
+ required: true
+ default: null
+ state:
+ description:
+ - Indicates the desired package state
+ required: false
+ default: present
+ choices: [ "absent", "present" ]
+ update_cache:
+ description:
+ - update the package database first C(apt-get update).
+ required: false
+ default: no
+ choices: [ "yes", "no" ]
+author: Evgenii Terechkov
+notes: []
+'''
+
+EXAMPLES = '''
+# install package foo
+- apt_rpm: pkg=foo state=present
+# remove package foo
+- apt_rpm: pkg=foo state=absent
+# description: remove packages foo and bar
+- apt_rpm: pkg=foo,bar state=absent
+# description: update the package database and install bar (bar will be the updated if a newer version exists)
+- apt_rpm: name=bar state=present update_cache=yes
+'''
+
+
+import json
+import shlex
+import os
+import sys
+
+APT_PATH="/usr/bin/apt-get"
+RPM_PATH="/usr/bin/rpm"
+
+def query_package(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rc = os.system("%s -q %s" % (RPM_PATH,name))
+ if rc == 0:
+ return True
+ else:
+ return False
+
+def query_package_provides(module, name):
+ # rpm -q returns 0 if the package is installed,
+ # 1 if it is not installed
+ rc = os.system("%s -q --provides %s >/dev/null" % (RPM_PATH,name))
+ return rc == 0
+
+def update_package_db(module):
+ rc = os.system("%s update" % APT_PATH)
+
+ if rc != 0:
+ module.fail_json(msg="could not update package db")
+
+def remove_packages(module, packages):
+
+ remove_c = 0
+ # Using a for loop incase of error, we can report the package that failed
+ for package in packages:
+ # Query the package first, to see if we even need to remove
+ if not query_package(module, package):
+ continue
+
+ rc = os.system("%s -y remove %s > /dev/null" % (APT_PATH,package))
+
+ if rc != 0:
+ module.fail_json(msg="failed to remove %s" % (package))
+
+ remove_c += 1
+
+ if remove_c > 0:
+ module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+
+ module.exit_json(changed=False, msg="package(s) already absent")
+
+
+def install_packages(module, pkgspec):
+
+ packages = ""
+ for package in pkgspec:
+ if not query_package_provides(module, package):
+ packages += "'%s' " % package
+
+ if len(packages) != 0:
+
+ cmd = ("%s -y install %s > /dev/null" % (APT_PATH, packages))
+
+ rc, out, err = module.run_command(cmd)
+
+ installed = True
+ for packages in pkgspec:
+ if not query_package_provides(module, package):
+ installed = False
+
+ # apt-rpm always have 0 for exit code if --force is used
+ if rc or not installed:
+ module.fail_json(msg="'apt-get -y install %s' failed: %s" % (packages, err))
+ else:
+ module.exit_json(changed=True, msg="%s present(s)" % packages)
+ else:
+ module.exit_json(changed=False)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']),
+ update_cache = dict(default=False, aliases=['update-cache'], type='bool'),
+ package = dict(aliases=['pkg', 'name'], required=True)))
+
+
+ if not os.path.exists(APT_PATH) or not os.path.exists(RPM_PATH):
+ module.fail_json(msg="cannot find /usr/bin/apt-get and/or /usr/bin/rpm")
+
+ p = module.params
+
+ if p['update_cache']:
+ update_package_db(module)
+
+ packages = p['package'].split(',')
+
+ if p['state'] in [ 'installed', 'present' ]:
+ install_packages(module, packages)
+
+ elif p['state'] in [ 'removed', 'absent' ]:
+ remove_packages(module, packages)
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+
+main()
diff --git a/packaging/composer b/packaging/composer
new file mode 100644
index 00000000000..983a38dec64
--- /dev/null
+++ b/packaging/composer
@@ -0,0 +1,153 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Dimitrios Tydeas Mengidis
+
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+DOCUMENTATION = '''
+---
+module: composer
+author: Dimitrios Tydeas Mengidis
+short_description: Dependency Manager for PHP
+version_added: "1.6"
+description:
+ - Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs and it will install them in your project for you
+options:
+ working_dir:
+ description:
+ - Directory of your project ( see --working-dir )
+ required: true
+ default: null
+ aliases: [ "working-dir" ]
+ prefer_source:
+ description:
+ - Forces installation from package sources when possible ( see --prefer-source )
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ aliases: [ "prefer-source" ]
+ prefer_dist:
+ description:
+ - Forces installation from package dist even for de versions ( see --prefer-dist )
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ aliases: [ "prefer-dist" ]
+ no_dev:
+ description:
+ - Disables installation of require-dev packages ( see --no-dev )
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ aliases: [ "no-dev" ]
+ no_scripts:
+ description:
+ - Skips the execution of all scripts defined in composer.json ( see --no-scripts )
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ aliases: [ "no-scripts" ]
+ no_plugins:
+ description:
+ - Disables all plugins ( see --no-plugins )
+ required: false
+ default: "no"
+ choices: [ "yes", "no" ]
+ aliases: [ "no-plugins" ]
+ optimize_autoloader:
+ description:
+ - Optimize autoloader during autoloader dump ( see --optimize-autoloader ). Convert PSR-0/4 autoloading to classmap to get a faster autoloader. This is recommended especially for production, but can take a bit of time to run so it is currently not done by default.
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ aliases: [ "optimize-autoloader" ]
+requirements:
+ - php
+ - composer installed in bin path (recommended /usr/local/bin)
+notes:
+ - Default options that are always appended in each execution are --no-ansi, --no-progress, and --no-interaction
+'''
+
+EXAMPLES = '''
+# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock
+- composer: command=install working_dir=/path/to/project
+'''
+
+import os
+import re
+
+def parse_out(string):
+ return re.sub("\s+", " ", string).strip()
+
+def has_changed(string):
+ return (re.match("Nothing to install or update", string) != None)
+
+def composer_install(module, options):
+ php_path = module.get_bin_path("php", True, ["/usr/local/bin"])
+ composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"])
+ cmd = "%s %s install %s" % (php_path, composer_path, " ".join(options))
+
+ return module.run_command(cmd)
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ working_dir = dict(aliases=["working-dir"], required=True),
+ prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]),
+ prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]),
+ no_dev = dict(default="yes", type="bool", aliases=["no-dev"]),
+ no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]),
+ no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]),
+ optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]),
+ ),
+ supports_check_mode=True
+ )
+
+ module.params["working_dir"] = os.path.abspath(module.params["working_dir"])
+
+ options = set([])
+ # Default options
+ options.add("--no-ansi")
+ options.add("--no-progress")
+ options.add("--no-interaction")
+
+ if module.check_mode:
+ options.add("--dry-run")
+
+ # Prepare options
+ for i in module.params:
+ opt = "--%s" % i.replace("_","-")
+ p = module.params[i]
+ if isinstance(p, (bool)) and p:
+ options.add(opt)
+ elif isinstance(p, (str)):
+ options.add("%s=%s" % (opt, p))
+
+ rc, out, err = composer_install(module, options)
+
+ if rc != 0:
+ output = parse_out(err)
+ module.fail_json(msg=output)
+ else:
+ output = parse_out(out)
+ module.exit_json(changed=has_changed(output), msg=output)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/packaging/cpanm b/packaging/cpanm
index 5f5ae98022f..5b1a9878d21 100644
--- a/packaging/cpanm
+++ b/packaging/cpanm
@@ -25,7 +25,7 @@ module: cpanm
short_description: Manages Perl library dependencies.
description:
- Manage Perl library dependencies.
-version_added: "1.0"
+version_added: "1.6"
options:
name:
description:
@@ -72,14 +72,17 @@ author: Franck Cuny
def _is_package_installed(module, name, locallib, cpanm):
cmd = ""
if locallib:
- cmd = "PERL5LIB={locallib}/lib/perl5".format(locallib=locallib)
- cmd = "{cmd} perl -M{name} -e '1'".format(cmd=cmd, name=name)
+ os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib
+ cmd = "%s perl -M%s -e '1'" % (cmd, name)
res, stdout, stderr = module.run_command(cmd, check_rc=False)
- installed = True if res == 0 else False
- return installed
-
+ if res == 0:
+ return True
+ else:
+ return False
def _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm):
+ # this code should use "%s" like everything else and just return early but not fixing all of it now.
+ # don't copy stuff like this
if from_path:
cmd = "{cpanm} {path}".format(cpanm=cpanm, path=from_path)
else:
@@ -111,21 +114,20 @@ def main():
required_one_of=[['name', 'from_path']],
)
- cpanm = module.get_bin_path('cpanm', True)
-
- name = module.params['name']
+ cpanm = module.get_bin_path('cpanm', True)
+ name = module.params['name']
from_path = module.params['from_path']
- notest = module.boolean(module.params.get('notest', False))
- locallib = module.params['locallib']
- mirror = module.params['mirror']
+ notest = module.boolean(module.params.get('notest', False))
+ locallib = module.params['locallib']
+ mirror = module.params['mirror']
- changed = False
+ changed = False
installed = _is_package_installed(module, name, locallib, cpanm)
if not installed:
out_cpanm = err_cpanm = ''
- cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm)
+ cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm)
rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False)
@@ -137,7 +139,6 @@ def main():
module.exit_json(changed=changed, binary=cpanm, name=name)
-
# import module snippets
from ansible.module_utils.basic import *
diff --git a/packaging/easy_install b/packaging/easy_install
index bdacf8e464b..889a81f025a 100644
--- a/packaging/easy_install
+++ b/packaging/easy_install
@@ -151,8 +151,8 @@ def main():
command = '%s %s' % (virtualenv, env)
if site_packages:
command += ' --system-site-packages'
- os.chdir(tempfile.gettempdir())
- rc_venv, out_venv, err_venv = module.run_command(command)
+ cwd = tempfile.gettempdir()
+ rc_venv, out_venv, err_venv = module.run_command(command, cwd=cwd)
rc += rc_venv
out += out_venv
diff --git a/packaging/gem b/packaging/gem
index 25fc337e14e..0d1a157a1f4 100644
--- a/packaging/gem
+++ b/packaging/gem
@@ -34,8 +34,9 @@ options:
state:
description:
- The desired state of the gem. C(latest) ensures that the latest version is installed.
- required: true
+ required: false
choices: [present, absent, latest]
+ default: present
gem_source:
description:
- The path to a local gem used as installation source.
@@ -66,6 +67,12 @@ options:
description:
- Version of the gem to be installed/removed.
required: false
+ pre_release:
+ description:
+ - Allow installation of pre-release versions of the gem.
+ required: false
+ default: "no"
+ version_added: "1.6"
author: Johan Wiren
'''
@@ -89,7 +96,7 @@ def get_rubygems_path(module):
return module.get_bin_path('gem', True)
def get_rubygems_version(module):
- cmd = [get_rubygems_path(module), '--version']
+ cmd = [ get_rubygems_path(module), '--version' ]
(rc, out, err) = module.run_command(cmd, check_rc=True)
match = re.match(r'^(\d+)\.(\d+)\.(\d+)', out)
@@ -173,6 +180,8 @@ def install(module):
cmd.append('--user-install')
else:
cmd.append('--no-user-install')
+ if module.params['pre_release']:
+ cmd.append('--pre')
cmd.append('--no-rdoc')
cmd.append('--no-ri')
cmd.append(module.params['gem_source'])
@@ -187,8 +196,9 @@ def main():
include_dependencies = dict(required=False, default=True, type='bool'),
name = dict(required=True, type='str'),
repository = dict(required=False, aliases=['source'], type='str'),
- state = dict(required=False, choices=['present','absent','latest'], type='str'),
+ state = dict(required=False, default='present', choices=['present','absent','latest'], type='str'),
user_install = dict(required=False, default=True, type='bool'),
+ pre_release = dict(required=False, default=False, type='bool'),
version = dict(required=False, type='str'),
),
supports_check_mode = True,
diff --git a/packaging/homebrew b/packaging/homebrew
index ab1362acf1d..0dfc86096ff 100644
--- a/packaging/homebrew
+++ b/packaging/homebrew
@@ -2,6 +2,8 @@
# -*- coding: utf-8 -*-
# (c) 2013, Andrew Dunham
+# (c) 2013, Daniel Jaouen
+#
# Based on macports (Jimmy Tang )
#
# This module is free software: you can redistribute it and/or modify
@@ -20,11 +22,11 @@
DOCUMENTATION = '''
---
module: homebrew
-author: Andrew Dunham
+author: Andrew Dunham and Daniel Jaouen
short_description: Package manager for Homebrew
description:
- Manages Homebrew packages
-version_added: "1.4"
+version_added: "1.1"
options:
name:
description:
@@ -33,7 +35,7 @@ options:
state:
description:
- state of the package
- choices: [ 'present', 'absent' ]
+ choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'unlinked' ]
required: false
default: present
update_homebrew:
@@ -47,130 +49,743 @@ options:
- options flags to install a package
required: false
default: null
+ version_added: "1.4"
notes: []
'''
EXAMPLES = '''
- homebrew: name=foo state=present
- homebrew: name=foo state=present update_homebrew=yes
+- homebrew: name=foo state=latest update_homebrew=yes
+- homebrew: update_homebrew=yes upgrade=yes
+- homebrew: name=foo state=head
+- homebrew: name=foo state=linked
- homebrew: name=foo state=absent
- homebrew: name=foo,bar state=absent
- homebrew: name=foo state=present install_options=with-baz,enable-debug
'''
+import os.path
+import re
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class Homebrew(object):
+ '''A class to manage Homebrew packages.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ - # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ - # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_PACKAGE_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ - # dashes
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
+ INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, basestring):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
-def update_homebrew(module, brew_path):
- """ Updates packages list. """
-
- rc, out, err = module.run_command("%s update" % brew_path)
-
- if rc != 0:
- module.fail_json(msg="could not update homebrew")
+ return (
+ isinstance(brew_path, basestring)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+ @classmethod
+ def valid_package(cls, package):
+ '''A valid package is either None or alphanumeric.'''
-def query_package(module, brew_path, name, state="present"):
- """ Returns whether a package is installed or not. """
+ if package is None:
+ return True
- if state == "present":
- rc, out, err = module.run_command("%s list %s" % (brew_path, name))
- if rc == 0:
+ return (
+ isinstance(package, basestring)
+ and not cls.INVALID_PACKAGE_REGEX.search(package)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - None
+ - installed
+ - upgraded
+ - head
+ - linked
+ - unlinked
+ - absent
+ '''
+
+ if state is None:
return True
+ else:
+ return (
+ isinstance(state, basestring)
+ and state.lower() in (
+ 'installed',
+ 'upgraded',
+ 'head',
+ 'linked',
+ 'unlinked',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewException(self.message)
+
+ else:
+ if isinstance(path, basestring):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_package(self):
+ return self._current_package
+
+ @current_package.setter
+ def current_package(self, package):
+ if not self.valid_package(package):
+ self._current_package = None
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(package)
+ raise HomebrewException(self.message)
+
+ else:
+ self._current_package = package
+ return package
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path=None, packages=None, state=None,
+ update_homebrew=False, install_options=None):
+ if not install_options:
+ install_options = list()
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ install_options=install_options, )
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in kwargs.iteritems():
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_path()
+ self._prep_brew_path()
+
+ def _prep_path(self):
+ if not self.path:
+ self.path = ['/usr/local/bin']
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_package_is_installed(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ cmd = [
+ "{brew_path}".format(brew_path=self.brew_path),
+ "info",
+ self.current_package,
+ ]
+ rc, out, err = self.module.run_command(cmd)
+ for line in out.split('\n'):
+ if (
+ re.search(r'Built from source', line)
+ or re.search(r'Poured from bottle', line)
+ ):
+ return True
return False
+ def _outdated_packages(self):
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'outdated',
+ ])
+ return [line.split(' ')[0].strip() for line in out.split('\n') if line]
+
+ def _current_package_is_outdated(self):
+ if not self.valid_package(self.current_package):
+ return False
+
+ return self.current_package in self._outdated_packages()
+
+ def _current_package_is_installed_from_head(self):
+ if not Homebrew.valid_package(self.current_package):
+ return False
+ elif not self._current_package_is_installed():
+ return False
+
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'info',
+ self.current_package,
+ ])
+
+ try:
+ version_info = [line for line in out.split('\n') if line][0]
+ except IndexError:
+ return False
+
+ return version_info.split(' ')[-1] == 'HEAD'
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.update_homebrew:
+ self._update_homebrew()
+
+ if self.packages:
+ if self.state == 'installed':
+ return self._install_packages()
+ elif self.state == 'upgraded':
+ return self._upgrade_packages()
+ elif self.state == 'head':
+ return self._install_packages()
+ elif self.state == 'linked':
+ return self._link_packages()
+ elif self.state == 'unlinked':
+ return self._unlink_packages()
+ elif self.state == 'absent':
+ return self._uninstall_packages()
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, basestring):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
-def remove_packages(module, brew_path, packages):
- """ Uninstalls one or more packages if installed. """
-
- removed_count = 0
-
- # Using a for loop incase of error, we can report the package that failed
- for package in packages:
- # Query the package first, to see if we even need to remove.
- if not query_package(module, brew_path, package):
- continue
-
- if module.check_mode:
- module.exit_json(changed=True)
- rc, out, err = module.run_command([brew_path, 'remove', package])
-
- if query_package(module, brew_path, package):
- module.fail_json(msg="failed to remove %s: %s" % (package, out.strip()))
-
- removed_count += 1
-
- if removed_count > 0:
- module.exit_json(changed=True, msg="removed %d package(s)" % removed_count)
-
- module.exit_json(changed=False, msg="package(s) already absent")
-
-
-def install_packages(module, brew_path, packages, options):
- """ Installs one or more packages if not already installed. """
-
- installed_count = 0
-
- for package in packages:
- if query_package(module, brew_path, package):
- continue
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+ # /updated ------------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Package already installed: {0}'.format(
+ self.current_package,
+ )
+ return True
- if module.check_mode:
- module.exit_json(changed=True)
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be installed: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ if self.state == 'head':
+ head = '--HEAD'
+ else:
+ head = None
+
+ opts = (
+ [self.brew_path, 'install']
+ + self.install_options
+ + [self.current_package, head]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Package installed: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _install_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._install_current_package()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # upgraded ------------------------------- {{{
+ def _upgrade_current_package(self):
+ command = 'upgrade'
+
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ command = 'install'
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.message = 'Package is already upgraded: {0}'.format(
+ self.current_package,
+ )
+ self.unchanged_count += 1
+ return True
- cmd = [brew_path, 'install', package]
- if options:
- cmd.extend(options)
- rc, out, err = module.run_command(cmd)
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be upgraded: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, command]
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_package_is_installed() and not self._current_package_is_outdated():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Package upgraded: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_all_packages(self):
+ opts = (
+ [self.brew_path, 'upgrade']
+ + self.install_options
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
- if not query_package(module, brew_path, package):
- module.fail_json(msg="failed to install %s: '%s' %s" % (package, cmd, out.strip()))
+ if rc == 0:
+ self.changed = True
+ self.message = 'All packages upgraded.'
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _upgrade_packages(self):
+ if not self.packages:
+ self._upgrade_all_packages()
+ else:
+ for package in self.packages:
+ self.current_package = package
+ self._upgrade_current_package()
+ return True
+ # /upgraded ------------------------------ }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Package already uninstalled: {0}'.format(
+ self.current_package,
+ )
+ return True
- installed_count += 1
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be uninstalled: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'uninstall']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_package_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Package uninstalled: {0}'.format(self.current_package)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewException(self.message)
+
+ def _uninstall_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._uninstall_current_package()
+
+ return True
+ # /uninstalled ----------------------------- }}}
+
+ # linked --------------------------------- {{{
+ def _link_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be linked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'link']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
- if installed_count > 0:
- module.exit_json(changed=True, msg="installed %d package(s)" % (installed_count,))
+ if rc == 0:
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Package linked: {0}'.format(self.current_package)
- module.exit_json(changed=False, msg="package(s) already present")
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be linked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ def _link_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._link_current_package()
+
+ return True
+ # /linked -------------------------------- }}}
+
+ # unlinked ------------------------------- {{{
+ def _unlink_current_package(self):
+ if not self.valid_package(self.current_package):
+ self.failed = True
+ self.message = 'Invalid package: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if not self._current_package_is_installed():
+ self.failed = True
+ self.message = 'Package not installed: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Package would be unlinked: {0}'.format(
+ self.current_package
+ )
+ raise HomebrewException(self.message)
+
+ opts = (
+ [self.brew_path, 'unlink']
+ + self.install_options
+ + [self.current_package]
+ )
+ cmd = [opt for opt in opts if opt]
+ rc, out, err = self.module.run_command(cmd)
-def generate_options_string(install_options):
- if install_options is None:
- return None
+ if rc == 0:
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Package unlinked: {0}'.format(self.current_package)
- options = []
+ return True
+ else:
+ self.failed = True
+ self.message = 'Package could not be unlinked: {0}.'.format(self.current_package)
+ raise HomebrewException(self.message)
- for option in install_options:
- options.append('--%s' % option)
+ def _unlink_packages(self):
+ for package in self.packages:
+ self.current_package = package
+ self._unlink_current_package()
- return options
+ return True
+ # /unlinked ------------------------------ }}}
+ # /commands ---------------------------------------------------- }}}
def main():
module = AnsibleModule(
- argument_spec = dict(
- name = dict(aliases=["pkg"], required=True),
- state = dict(default="present", choices=["present", "installed", "absent", "removed"]),
- update_homebrew = dict(default="no", aliases=["update-brew"], type='bool'),
- install_options = dict(default=None, aliases=["options"], type='list')
+ argument_spec=dict(
+ name=dict(aliases=["pkg"], required=False),
+ path=dict(required=False),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "latest", "upgraded", "head",
+ "linked", "unlinked",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ update_homebrew=dict(
+ default="no",
+ aliases=["update-brew"],
+ type='bool',
+ ),
+ install_options=dict(
+ default=None,
+ aliases=['options'],
+ type='list',
+ )
),
- supports_check_mode=True
+ supports_check_mode=True,
)
-
- brew_path = module.get_bin_path('brew', True, ['/usr/local/bin'])
-
p = module.params
- if p["update_homebrew"]:
- update_homebrew(module, brew_path)
-
- pkgs = p["name"].split(",")
-
- if p["state"] in ["present", "installed"]:
- opt = generate_options_string(p["install_options"])
- install_packages(module, brew_path, pkgs, opt)
-
- elif p["state"] in ["absent", "removed"]:
- remove_packages(module, brew_path, pkgs)
-
-# import module snippets
-from ansible.module_utils.basic import *
-
+ if p['name']:
+ packages = p['name'].split(',')
+ else:
+ packages = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+ else:
+ path = ['/usr/local/bin']
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('head'):
+ state = 'head'
+ if state in ('latest', 'upgraded'):
+ state = 'upgraded'
+ if state == 'linked':
+ state = 'linked'
+ if state == 'unlinked':
+ state = 'unlinked'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ update_homebrew = p['update_homebrew']
+ p['install_options'] = p['install_options'] or []
+ install_options = ['--{0}'.format(install_option)
+ for install_option in p['install_options']]
+
+ brew = Homebrew(module=module, path=path, packages=packages,
+ state=state, update_homebrew=update_homebrew,
+ install_options=install_options)
+ (failed, changed, message) = brew.run()
+ if failed:
+ module.fail_json(msg=message)
+ else:
+ module.exit_json(changed=changed, msg=message)
+
+# this is magic, see lib/ansible/module_common.py
+#<>
main()
diff --git a/packaging/homebrew_cask b/packaging/homebrew_cask
new file mode 100644
index 00000000000..fa85931afc9
--- /dev/null
+++ b/packaging/homebrew_cask
@@ -0,0 +1,513 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Daniel Jaouen
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see .
+
+DOCUMENTATION = '''
+---
+module: homebrew_cask
+author: Daniel Jaouen
+short_description: Install/uninstall homebrew casks.
+description:
+ - Manages Homebrew casks.
+version_added: "1.6"
+options:
+ name:
+ description:
+ - name of cask to install/remove
+ required: true
+ state:
+ description:
+ - state of the cask
+ choices: [ 'installed', 'uninstalled' ]
+ required: false
+ default: present
+'''
+EXAMPLES = '''
+- homebrew_cask: name=alfred state=present
+- homebrew_cask: name=alfred state=absent
+'''
+
+import os.path
+import re
+
+
+# exceptions -------------------------------------------------------------- {{{
+class HomebrewCaskException(Exception):
+ pass
+# /exceptions ------------------------------------------------------------- }}}
+
+
+# utils ------------------------------------------------------------------- {{{
+def _create_regex_group(s):
+ lines = (line.strip() for line in s.split('\n') if line.strip())
+ chars = filter(None, (line.split('#')[0].strip() for line in lines))
+ group = r'[^' + r''.join(chars) + r']'
+ return re.compile(group)
+# /utils ------------------------------------------------------------------ }}}
+
+
+class HomebrewCask(object):
+ '''A class to manage Homebrew casks.'''
+
+ # class regexes ------------------------------------------------ {{{
+ VALID_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ : # colons
+ {sep} # the OS-specific path separator
+ - # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_BREW_PATH_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ \s # spaces
+ {sep} # the OS-specific path separator
+ - # dashes
+ '''.format(sep=os.path.sep)
+
+ VALID_CASK_CHARS = r'''
+ \w # alphanumeric characters (i.e., [a-zA-Z0-9_])
+ - # dashes
+ '''
+
+ INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS)
+ INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS)
+ INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS)
+ # /class regexes ----------------------------------------------- }}}
+
+ # class validations -------------------------------------------- {{{
+ @classmethod
+ def valid_path(cls, path):
+ '''
+ `path` must be one of:
+ - list of paths
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - spaces
+ - colons
+ - os.path.sep
+ '''
+
+ if isinstance(path, basestring):
+ return not cls.INVALID_PATH_REGEX.search(path)
+
+ try:
+ iter(path)
+ except TypeError:
+ return False
+ else:
+ paths = path
+ return all(cls.valid_brew_path(path_) for path_ in paths)
+
+ @classmethod
+ def valid_brew_path(cls, brew_path):
+ '''
+ `brew_path` must be one of:
+ - None
+ - a string containing only:
+ - alphanumeric characters
+ - dashes
+ - spaces
+ - os.path.sep
+ '''
+
+ if brew_path is None:
+ return True
+
+ return (
+ isinstance(brew_path, basestring)
+ and not cls.INVALID_BREW_PATH_REGEX.search(brew_path)
+ )
+
+ @classmethod
+ def valid_cask(cls, cask):
+ '''A valid cask is either None or alphanumeric + backslashes.'''
+
+ if cask is None:
+ return True
+
+ return (
+ isinstance(cask, basestring)
+ and not cls.INVALID_CASK_REGEX.search(cask)
+ )
+
+ @classmethod
+ def valid_state(cls, state):
+ '''
+ A valid state is one of:
+ - installed
+ - absent
+ '''
+
+ if state is None:
+ return True
+ else:
+ return (
+ isinstance(state, basestring)
+ and state.lower() in (
+ 'installed',
+ 'absent',
+ )
+ )
+
+ @classmethod
+ def valid_module(cls, module):
+ '''A valid module is an instance of AnsibleModule.'''
+
+ return isinstance(module, AnsibleModule)
+ # /class validations ------------------------------------------- }}}
+
+ # class properties --------------------------------------------- {{{
+ @property
+ def module(self):
+ return self._module
+
+ @module.setter
+ def module(self, module):
+ if not self.valid_module(module):
+ self._module = None
+ self.failed = True
+ self.message = 'Invalid module: {0}.'.format(module)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._module = module
+ return module
+
+ @property
+ def path(self):
+ return self._path
+
+ @path.setter
+ def path(self, path):
+ if not self.valid_path(path):
+ self._path = []
+ self.failed = True
+ self.message = 'Invalid path: {0}.'.format(path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ if isinstance(path, basestring):
+ self._path = path.split(':')
+ else:
+ self._path = path
+
+ return path
+
+ @property
+ def brew_path(self):
+ return self._brew_path
+
+ @brew_path.setter
+ def brew_path(self, brew_path):
+ if not self.valid_brew_path(brew_path):
+ self._brew_path = None
+ self.failed = True
+ self.message = 'Invalid brew_path: {0}.'.format(brew_path)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._brew_path = brew_path
+ return brew_path
+
+ @property
+ def params(self):
+ return self._params
+
+ @params.setter
+ def params(self, params):
+ self._params = self.module.params
+ return self._params
+
+ @property
+ def current_cask(self):
+ return self._current_cask
+
+ @current_cask.setter
+ def current_cask(self, cask):
+ if not self.valid_cask(cask):
+ self._current_cask = None
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(cask)
+ raise HomebrewCaskException(self.message)
+
+ else:
+ self._current_cask = cask
+ return cask
+ # /class properties -------------------------------------------- }}}
+
+ def __init__(self, module, path=None, casks=None, state=None):
+ self._setup_status_vars()
+ self._setup_instance_vars(module=module, path=path, casks=casks,
+ state=state)
+
+ self._prep()
+
+ # prep --------------------------------------------------------- {{{
+ def _setup_status_vars(self):
+ self.failed = False
+ self.changed = False
+ self.changed_count = 0
+ self.unchanged_count = 0
+ self.message = ''
+
+ def _setup_instance_vars(self, **kwargs):
+ for key, val in kwargs.iteritems():
+ setattr(self, key, val)
+
+ def _prep(self):
+ self._prep_path()
+ self._prep_brew_path()
+
+ def _prep_path(self):
+ if not self.path:
+ self.path = ['/usr/local/bin']
+
+ def _prep_brew_path(self):
+ if not self.module:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'AnsibleModule not set.'
+ raise HomebrewCaskException(self.message)
+
+ self.brew_path = self.module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=self.path,
+ )
+ if not self.brew_path:
+ self.brew_path = None
+ self.failed = True
+ self.message = 'Unable to locate homebrew executable.'
+ raise HomebrewCaskException('Unable to locate homebrew executable.')
+
+ return self.brew_path
+
+ def _status(self):
+ return (self.failed, self.changed, self.message)
+ # /prep -------------------------------------------------------- }}}
+
+ def run(self):
+ try:
+ self._run()
+ except HomebrewCaskException:
+ pass
+
+ if not self.failed and (self.changed_count + self.unchanged_count > 1):
+ self.message = "Changed: %d, Unchanged: %d" % (
+ self.changed_count,
+ self.unchanged_count,
+ )
+ (failed, changed, message) = self._status()
+
+ return (failed, changed, message)
+
+ # checks ------------------------------------------------------- {{{
+ def _current_cask_is_installed(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ cmd = [self.brew_path, 'cask', 'list']
+ rc, out, err = self.module.run_command(cmd)
+
+ if 'nothing to list' in err:
+ return False
+ elif rc == 0:
+ casks = [cask_.strip() for cask_ in out.split('\n') if cask_.strip()]
+ return self.current_cask in casks
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /checks ------------------------------------------------------ }}}
+
+ # commands ----------------------------------------------------- {{{
+ def _run(self):
+ if self.state == 'installed':
+ return self._install_casks()
+ elif self.state == 'absent':
+ return self._uninstall_casks()
+
+ if self.command:
+ return self._command()
+
+ # updated -------------------------------- {{{
+ def _update_homebrew(self):
+ rc, out, err = self.module.run_command([
+ self.brew_path,
+ 'update',
+ ])
+ if rc == 0:
+ if out and isinstance(out, basestring):
+ already_updated = any(
+ re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE)
+ for s in out.split('\n')
+ if s
+ )
+ if not already_updated:
+ self.changed = True
+ self.message = 'Homebrew updated successfully.'
+ else:
+ self.message = 'Homebrew already up-to-date.'
+
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+ # /updated ------------------------------- }}}
+
+ # installed ------------------------------ {{{
+ def _install_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already installed: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be installed: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ cmd = [opt
+ for opt in (self.brew_path, 'cask', 'install', self.current_cask)
+ if opt]
+
+ rc, out, err = self.module.run_command(cmd)
+
+ if self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask installed: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _install_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._install_current_cask()
+
+ return True
+ # /installed ----------------------------- }}}
+
+ # uninstalled ---------------------------- {{{
+ def _uninstall_current_cask(self):
+ if not self.valid_cask(self.current_cask):
+ self.failed = True
+ self.message = 'Invalid cask: {0}.'.format(self.current_cask)
+ raise HomebrewCaskException(self.message)
+
+ if not self._current_cask_is_installed():
+ self.unchanged_count += 1
+ self.message = 'Cask already uninstalled: {0}'.format(
+ self.current_cask,
+ )
+ return True
+
+ if self.module.check_mode:
+ self.changed = True
+ self.message = 'Cask would be uninstalled: {0}'.format(
+ self.current_cask
+ )
+ raise HomebrewCaskException(self.message)
+
+ cmd = [opt
+ for opt in (self.brew_path, 'cask', 'uninstall', self.current_cask)
+ if opt]
+
+ rc, out, err = self.module.run_command(cmd)
+
+ if not self._current_cask_is_installed():
+ self.changed_count += 1
+ self.changed = True
+ self.message = 'Cask uninstalled: {0}'.format(self.current_cask)
+ return True
+ else:
+ self.failed = True
+ self.message = err.strip()
+ raise HomebrewCaskException(self.message)
+
+ def _uninstall_casks(self):
+ for cask in self.casks:
+ self.current_cask = cask
+ self._uninstall_current_cask()
+
+ return True
+ # /uninstalled ----------------------------- }}}
+ # /commands ---------------------------------------------------- }}}
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=["cask"], required=False),
+ path=dict(required=False),
+ state=dict(
+ default="present",
+ choices=[
+ "present", "installed",
+ "absent", "removed", "uninstalled",
+ ],
+ ),
+ ),
+ supports_check_mode=True,
+ )
+ p = module.params
+
+ if p['name']:
+ casks = p['name'].split(',')
+ else:
+ casks = None
+
+ path = p['path']
+ if path:
+ path = path.split(':')
+ else:
+ path = ['/usr/local/bin']
+
+ state = p['state']
+ if state in ('present', 'installed'):
+ state = 'installed'
+ if state in ('absent', 'removed', 'uninstalled'):
+ state = 'absent'
+
+ brew_cask = HomebrewCask(module=module, path=path, casks=casks,
+ state=state)
+ (failed, changed, message) = brew_cask.run()
+ if failed:
+ module.fail_json(msg=message)
+ else:
+ module.exit_json(changed=changed, msg=message)
+
+# this is magic, see lib/ansible/module_common.py
+#<>
+main()
diff --git a/packaging/homebrew_tap b/packaging/homebrew_tap
new file mode 100644
index 00000000000..a79ba076a8a
--- /dev/null
+++ b/packaging/homebrew_tap
@@ -0,0 +1,215 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Daniel Jaouen
+# Based on homebrew (Andrew Dunham )
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import re
+
+DOCUMENTATION = '''
+---
+module: homebrew_tap
+author: Daniel Jaouen
+short_description: Tap a Homebrew repository.
+description:
+ - Tap external Homebrew repositories.
+version_added: "1.6"
+options:
+ tap:
+ description:
+ - The repository to tap.
+ required: true
+ state:
+ description:
+ - state of the repository.
+ choices: [ 'present', 'absent' ]
+ required: false
+ default: 'present'
+requirements: [ homebrew ]
+'''
+
+EXAMPLES = '''
+homebrew_tap: tap=homebrew/dupes state=present
+homebrew_tap: tap=homebrew/dupes state=absent
+homebrew_tap: tap=homebrew/dupes,homebrew/science state=present
+'''
+
+
+def a_valid_tap(tap):
+ '''Returns True if the tap is valid.'''
+ regex = re.compile(r'^(\S+)/(homebrew-)?(\w+)$')
+ return regex.match(tap)
+
+
+def already_tapped(module, brew_path, tap):
+ '''Returns True if already tapped.'''
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ ])
+ taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_]
+ return tap.lower() in taps
+
+
+def add_tap(module, brew_path, tap):
+ '''Adds a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif not already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'tap',
+ tap,
+ ])
+ if already_tapped(module, brew_path, tap):
+ changed = True
+ msg = 'successfully tapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to tap: %s' % tap
+
+ else:
+ msg = 'already tapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def add_taps(module, brew_path, taps):
+ '''Adds one or more taps.'''
+ failed, unchanged, added, msg = False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = add_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ added += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'added: %d, unchanged: %d, error: ' + msg
+ msg = msg % (added, unchanged)
+ elif added:
+ changed = True
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+ else:
+ msg = 'added: %d, unchanged: %d' % (added, unchanged)
+
+ return (failed, changed, msg)
+
+
+def remove_tap(module, brew_path, tap):
+ '''Removes a single tap.'''
+ failed, changed, msg = False, False, ''
+
+ if not a_valid_tap(tap):
+ failed = True
+ msg = 'not a valid tap: %s' % tap
+
+ elif already_tapped(module, brew_path, tap):
+ if module.check_mode:
+ module.exit_json(changed=True)
+
+ rc, out, err = module.run_command([
+ brew_path,
+ 'untap',
+ tap,
+ ])
+ if not already_tapped(module, brew_path, tap):
+ changed = True
+ msg = 'successfully untapped: %s' % tap
+ else:
+ failed = True
+ msg = 'failed to untap: %s' % tap
+
+ else:
+ msg = 'already untapped: %s' % tap
+
+ return (failed, changed, msg)
+
+
+def remove_taps(module, brew_path, taps):
+ '''Removes one or more taps.'''
+ failed, unchanged, removed, msg = False, 0, 0, ''
+
+ for tap in taps:
+ (failed, changed, msg) = remove_tap(module, brew_path, tap)
+ if failed:
+ break
+ if changed:
+ removed += 1
+ else:
+ unchanged += 1
+
+ if failed:
+ msg = 'removed: %d, unchanged: %d, error: ' + msg
+ msg = msg % (removed, unchanged)
+ elif removed:
+ changed = True
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+ else:
+ msg = 'removed: %d, unchanged: %d' % (removed, unchanged)
+
+ return (failed, changed, msg)
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ name=dict(aliases=['tap'], required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ ),
+ supports_check_mode=True,
+ )
+
+ brew_path = module.get_bin_path(
+ 'brew',
+ required=True,
+ opt_dirs=['/usr/local/bin']
+ )
+
+ taps = module.params['name'].split(',')
+
+ if module.params['state'] == 'present':
+ failed, changed, msg = add_taps(module, brew_path, taps)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+ elif module.params['state'] == 'absent':
+ failed, changed, msg = remove_taps(module, brew_path, taps)
+
+ if failed:
+ module.fail_json(msg=msg)
+ else:
+ module.exit_json(changed=changed, msg=msg)
+
+# this is magic, see lib/ansible/module_common.py
+#<>
+main()
diff --git a/packaging/layman b/packaging/layman
new file mode 100644
index 00000000000..a0b12202812
--- /dev/null
+++ b/packaging/layman
@@ -0,0 +1,236 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Jakub Jirutka
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+import shutil
+from os import path
+from urllib2 import Request, urlopen, URLError
+
+DOCUMENTATION = '''
+---
+module: layman
+author: Jakub Jirutka
+version_added: "1.6"
+short_description: Manage Gentoo overlays
+description:
+ - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux.
+ Please note that Layman must be installed on a managed node prior using this module.
+options:
+ name:
+ description:
+ - The overlay id to install, synchronize, or uninstall.
+ Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)).
+ required: true
+ list_url:
+ description:
+ - An URL of the alternative overlays list that defines the overlay to install.
+ This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where
+ C(overlay_defs) is readed from the Layman's configuration.
+ required: false
+ state:
+ description:
+ - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay.
+ required: false
+ default: present
+ choices: [present, absent, updated]
+'''
+
+EXAMPLES = '''
+# Install the overlay 'mozilla' which is on the central overlays list.
+- layman: name=mozilla
+
+# Install the overlay 'cvut' from the specified alternative list.
+- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml
+
+# Update (sync) the overlay 'cvut', or install if not installed yet.
+- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml state=updated
+
+# Update (sync) all of the installed overlays.
+- layman: name=ALL state=updated
+
+# Uninstall the overlay 'cvut'.
+- layman: name=cvut state=absent
+'''
+
+USERAGENT = 'ansible-httpget'
+
+try:
+ from layman.api import LaymanAPI
+ from layman.config import BareConfig
+ HAS_LAYMAN_API = True
+except ImportError:
+ HAS_LAYMAN_API = False
+
+
+class ModuleError(Exception): pass
+
+
+def init_layman(config=None):
+ '''Returns the initialized ``LaymanAPI``.
+
+ :param config: the layman's configuration to use (optional)
+ '''
+ if config is None: config = BareConfig(read_configfile=True, quietness=1)
+ return LaymanAPI(config)
+
+
+def download_url(url, dest):
+ '''
+ :param url: the URL to download
+ :param dest: the absolute path of where to save the downloaded content to;
+ it must be writable and not a directory
+
+ :raises ModuleError
+ '''
+ request = Request(url)
+ request.add_header('User-agent', USERAGENT)
+
+ try:
+ response = urlopen(request)
+ except URLError, e:
+ raise ModuleError("Failed to get %s: %s" % (url, str(e)))
+
+ try:
+ with open(dest, 'w') as f:
+ shutil.copyfileobj(response, f)
+ except IOError, e:
+ raise ModuleError("Failed to write: %s" % str(e))
+
+
+def install_overlay(name, list_url=None):
+ '''Installs the overlay repository. If not on the central overlays list,
+ then :list_url of an alternative list must be provided. The list will be
+ fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the
+ ``overlay_defs`` is read from the Layman's configuration).
+
+ :param name: the overlay id
+ :param list_url: the URL of the remote repositories list to look for the overlay
+ definition (optional, default: None)
+
+ :returns: True if the overlay was installed, or False if already exists
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ # read Layman configuration
+ layman_conf = BareConfig(read_configfile=True)
+ layman = init_layman(layman_conf)
+
+ if layman.is_installed(name):
+ return False
+
+ if not layman.is_repo(name):
+ if not list_url: raise ModuleError("Overlay '%s' is not on the list of known " \
+ "overlays and URL of the remote list was not provided." % name)
+
+ overlay_defs = layman_conf.get_option('overlay_defs')
+ dest = path.join(overlay_defs, name + '.xml')
+
+ download_url(list_url, dest)
+
+ # reload config
+ layman = init_layman()
+
+ if not layman.add_repos(name): raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def uninstall_overlay(name):
+ '''Uninstalls the given overlay repository from the system.
+
+ :param name: the overlay id to uninstall
+
+ :returns: True if the overlay was uninstalled, or False if doesn't exist
+ (i.e. nothing has changed)
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.is_installed(name):
+ return False
+
+ layman.delete_repos(name)
+ if layman.get_errors(): raise ModuleError(layman.get_errors())
+
+ return True
+
+
+def sync_overlay(name):
+ '''Synchronizes the specified overlay repository.
+
+ :param name: the overlay repository id to sync
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ if not layman.sync(name):
+ messages = [ str(item[1]) for item in layman.sync_results[2] ]
+ raise ModuleError(messages)
+
+
+def sync_overlays():
+ '''Synchronize all of the installed overlays.
+
+ :raises ModuleError
+ '''
+ layman = init_layman()
+
+ for name in layman.get_installed():
+ sync_overlay(name)
+
+
+def main():
+ # define module
+ module = AnsibleModule(
+ argument_spec = {
+ 'name': { 'required': True },
+ 'list_url': { 'aliases': ['url'] },
+ 'state': { 'default': "present", 'choices': ['present', 'absent', 'updated'] },
+ }
+ )
+
+ if not HAS_LAYMAN_API:
+ module.fail_json(msg='Layman is not installed')
+
+ state, name, url = (module.params[key] for key in ['state', 'name', 'list_url'])
+
+ changed = False
+ try:
+ if state == 'present':
+ changed = install_overlay(name, url)
+
+ elif state == 'updated':
+ if name == 'ALL':
+ sync_overlays()
+ elif install_overlay(name, url):
+ changed = True
+ else:
+ sync_overlay(name)
+ else:
+ changed = uninstall_overlay(name)
+
+ except ModuleError, e:
+ module.fail_json(msg=e.message)
+ else:
+ module.exit_json(changed=changed, name=name)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/packaging/macports b/packaging/macports
index b58224b63fe..ae7010b1cbd 100644
--- a/packaging/macports
+++ b/packaging/macports
@@ -53,6 +53,7 @@ EXAMPLES = '''
- macports: name=foo state=inactive
'''
+import pipes
def update_package_db(module, port_path):
""" Updates packages list. """
@@ -68,7 +69,7 @@ def query_package(module, port_path, name, state="present"):
if state == "present":
- rc, out, err = module.run_command("%s installed | grep -q ^.*%s" % (port_path, name))
+ rc, out, err = module.run_command("%s installed | grep -q ^.*%s" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
return True
@@ -76,7 +77,8 @@ def query_package(module, port_path, name, state="present"):
elif state == "active":
- rc, out, err = module.run_command("%s installed %s | grep -q active" % (port_path, name))
+ rc, out, err = module.run_command("%s installed %s | grep -q active" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True)
+
if rc == 0:
return True
diff --git a/packaging/npm b/packaging/npm
index 62179c373aa..7034c7f9964 100644
--- a/packaging/npm
+++ b/packaging/npm
@@ -56,6 +56,11 @@ options:
required: false
choices: [ "yes", "no" ]
default: no
+ registry:
+ description:
+ - The registry to install modules from.
+ required: false
+ version_added: "1.6"
state:
description:
- The state of the node.js library
@@ -77,6 +82,9 @@ description: Install "coffee-script" node.js package globally.
description: Remove the globally package "coffee-script".
- npm: name=coffee-script global=yes state=absent
+description: Install "coffee-script" node.js package from custom registry.
+- npm: name=coffee-script registry=http://registry.mysite.com
+
description: Install packages based on package.json.
- npm: path=/app/location
@@ -101,6 +109,7 @@ class Npm(object):
self.name = kwargs['name']
self.version = kwargs['version']
self.path = kwargs['path']
+ self.registry = kwargs['registry']
self.production = kwargs['production']
if kwargs['executable']:
@@ -123,12 +132,20 @@ class Npm(object):
cmd.append('--production')
if self.name:
cmd.append(self.name_version)
+ if self.registry:
+ cmd.append('--registry')
+ cmd.append(self.registry)
#If path is specified, cd into that path and run the command.
+ cwd = None
if self.path:
- os.chdir(self.path)
+ if not os.path.exists(self.path):
+ os.makedirs(self.path)
+ if not os.path.isdir(self.path):
+ self.module.fail_json(msg="path %s is not a directory" % self.path)
+ cwd = self.path
- rc, out, err = self.module.run_command(cmd, check_rc=check_rc)
+ rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd)
return out
return ''
@@ -142,6 +159,8 @@ class Npm(object):
for dep in data['dependencies']:
if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']:
missing.append(dep)
+ elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']:
+ missing.append(dep)
else:
installed.append(dep)
#Named dependency not installed
@@ -179,6 +198,7 @@ def main():
version=dict(default=None),
production=dict(default='no', type='bool'),
executable=dict(default=None),
+ registry=dict(default=None),
state=dict(default='present', choices=['present', 'absent', 'latest'])
)
arg_spec['global'] = dict(default='no', type='bool')
@@ -193,6 +213,7 @@ def main():
glbl = module.params['global']
production = module.params['production']
executable = module.params['executable']
+ registry = module.params['registry']
state = module.params['state']
if not path and not glbl:
@@ -201,7 +222,7 @@ def main():
module.fail_json(msg='uninstalling a package is only available for named packages')
npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \
- executable=executable)
+ executable=executable, registry=registry)
changed = False
if state == 'present':
@@ -215,7 +236,6 @@ def main():
if len(missing) or len(outdated):
changed = True
npm.install()
- npm.update()
else: #absent
installed, missing = npm.list()
if name in installed:
diff --git a/packaging/opkg b/packaging/opkg
index 4a834cf1a39..0187abe56a8 100644
--- a/packaging/opkg
+++ b/packaging/opkg
@@ -51,6 +51,7 @@ EXAMPLES = '''
- opkg: name=foo,bar state=absent
'''
+import pipes
def update_package_db(module, opkg_path):
""" Updates packages list. """
@@ -66,7 +67,7 @@ def query_package(module, opkg_path, name, state="present"):
if state == "present":
- rc, out, err = module.run_command("%s list-installed | grep -q ^%s" % (opkg_path, name))
+ rc, out, err = module.run_command("%s list-installed | grep -q ^%s" % (pipes.quote(opkg_path), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
return True
diff --git a/packaging/pacman b/packaging/pacman
index 3080cb4a607..5bf2d931e6e 100644
--- a/packaging/pacman
+++ b/packaging/pacman
@@ -1,82 +1,82 @@
#!/usr/bin/python -tt
# -*- coding: utf-8 -*-
-# (c) 2012, Afterburn
-# Written by Afterburn
-# Based on apt module written by Matthew Williams
+# (c) 2012, Afterburn
+# (c) 2013, Aaron Bull Schaefer
#
-# This module is free software: you can redistribute it and/or modify
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
-# This software is distributed in the hope that it will be useful,
+# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
-# along with this software. If not, see .
-
+# along with Ansible. If not, see .
DOCUMENTATION = '''
---
module: pacman
-short_description: Package manager for Archlinux
+short_description: Manage packages with I(pacman)
description:
- - Manages Archlinux packages
-
+ - Manage packages with the I(pacman) package manager, which is used by
+ Arch Linux and its variants.
version_added: "1.0"
+author: Afterburn
+notes: []
+requirements: []
options:
name:
description:
- - name of package to install, upgrade or remove.
- required: true
+ - Name of the package to install, upgrade, or remove.
+ required: false
+ default: null
state:
description:
- - desired state of the package.
+ - Desired state of the package.
required: false
- choices: [ "installed", "absent" ]
+ default: "present"
+ choices: ["present", "absent"]
- update_cache:
+ recurse:
description:
- - update the package database first (pacman -Syy).
+ - When removing a package, also remove its dependencies, provided
+ that they are not required by other packages and were not
+ explicitly installed by a user.
required: false
default: "no"
- choices: [ "yes", "no" ]
+ choices: ["yes", "no"]
+ version_added: "1.3"
- recurse:
+ update_cache:
description:
- - remove all not explicitly installed dependencies not required
- by other packages of the package to remove
+ - Whether or not to refresh the master package lists. This can be
+ run as part of a package installation or as a separate step.
required: false
default: "no"
- choices: [ "yes", "no" ]
- version_added: "1.3"
-
-author: Afterburn
-notes: []
+ choices: ["yes", "no"]
'''
EXAMPLES = '''
# Install package foo
-- pacman: name=foo state=installed
-
-# Remove package foo
-- pacman: name=foo state=absent
+- pacman: name=foo state=present
-# Remove packages foo and bar
+# Remove packages foo and bar
- pacman: name=foo,bar state=absent
# Recursively remove package baz
- pacman: name=baz state=absent recurse=yes
-# Update the package database (pacman -Syy) and install bar (bar will be the updated if a newer version exists)
-- pacman: name=bar, state=installed, update_cache=yes
+# Run the equivalent of "pacman -Syy" as a separate step
+- pacman: update_cache=yes
'''
-
import json
import shlex
import os
@@ -85,12 +85,12 @@ import sys
PACMAN_PATH = "/usr/bin/pacman"
-def query_package(module, name, state="installed"):
-
+def query_package(module, name, state="present"):
# pacman -Q returns 0 if the package is installed,
# 1 if it is not installed
- if state == "installed":
- rc = os.system("pacman -Q %s" % (name))
+ if state == "present":
+ cmd = "pacman -Q %s" % (name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
@@ -99,18 +99,21 @@ def query_package(module, name, state="installed"):
def update_package_db(module):
- rc = os.system("pacman -Syy > /dev/null")
+ cmd = "pacman -Syy"
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
- if rc != 0:
+ if rc == 0:
+ return True
+ else:
module.fail_json(msg="could not update package db")
-
+
def remove_packages(module, packages):
if module.params["recurse"]:
args = "Rs"
else:
args = "R"
-
+
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
@@ -118,11 +121,12 @@ def remove_packages(module, packages):
if not query_package(module, package):
continue
- rc = os.system("pacman -%s %s --noconfirm > /dev/null" % (args, package))
+ cmd = "pacman -%s %s --noconfirm" % (args, package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
-
+
remove_c += 1
if remove_c > 0:
@@ -133,7 +137,6 @@ def remove_packages(module, packages):
def install_packages(module, packages, package_files):
-
install_c = 0
for i, package in enumerate(packages):
@@ -145,13 +148,14 @@ def install_packages(module, packages, package_files):
else:
params = '-S %s' % package
- rc = os.system("pacman %s --noconfirm > /dev/null" % (params))
+ cmd = "pacman %s --noconfirm" % (params)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s" % (package))
install_c += 1
-
+
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
@@ -162,7 +166,7 @@ def check_packages(module, packages, state):
would_be_changed = []
for package in packages:
installed = query_package(module, package)
- if ((state == "installed" and not installed) or
+ if ((state == "present" and not installed) or
(state == "absent" and installed)):
would_be_changed.append(package)
if would_be_changed:
@@ -176,42 +180,50 @@ def check_packages(module, packages, state):
def main():
module = AnsibleModule(
- argument_spec = dict(
- state = dict(default="installed", choices=["installed","absent"]),
- update_cache = dict(default="no", aliases=["update-cache"], type='bool'),
- recurse = dict(default="no", type='bool'),
- name = dict(aliases=["pkg"], required=True)),
- supports_check_mode = True)
-
+ argument_spec = dict(
+ name = dict(aliases=['pkg']),
+ state = dict(default='present', choices=['present', 'installed', 'absent', 'removed']),
+ recurse = dict(default='no', choices=BOOLEANS, type='bool'),
+ update_cache = dict(default='no', aliases=['update-cache'], choices=BOOLEANS, type='bool')),
+ required_one_of = [['name', 'update_cache']],
+ supports_check_mode = True)
if not os.path.exists(PACMAN_PATH):
module.fail_json(msg="cannot find pacman, looking for %s" % (PACMAN_PATH))
p = module.params
+ # normalize the state parameter
+ if p['state'] in ['present', 'installed']:
+ p['state'] = 'present'
+ elif p['state'] in ['absent', 'removed']:
+ p['state'] = 'absent'
+
if p["update_cache"] and not module.check_mode:
update_package_db(module)
-
- pkgs = p["name"].split(",")
-
- pkg_files = []
- for i, pkg in enumerate(pkgs):
- if pkg.endswith('.pkg.tar.xz'):
- # The package given is a filename, extract the raw pkg name from
- # it and store the filename
- pkg_files.append(pkg)
- pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1])
- else:
- pkg_files.append(None)
-
- if module.check_mode:
- check_packages(module, pkgs, p['state'])
-
- if p["state"] == "installed":
- install_packages(module, pkgs, pkg_files)
-
- elif p["state"] == "absent":
- remove_packages(module, pkgs)
+ if not p['name']:
+ module.exit_json(changed=True, msg='updated the package master lists')
+
+ if p['name']:
+ pkgs = p['name'].split(',')
+
+ pkg_files = []
+ for i, pkg in enumerate(pkgs):
+ if pkg.endswith('.pkg.tar.xz'):
+ # The package given is a filename, extract the raw pkg name from
+ # it and store the filename
+ pkg_files.append(pkg)
+ pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1])
+ else:
+ pkg_files.append(None)
+
+ if module.check_mode:
+ check_packages(module, pkgs, p['state'])
+
+ if p['state'] == 'present':
+ install_packages(module, pkgs, pkg_files)
+ elif p['state'] == 'absent':
+ remove_packages(module, pkgs)
# import module snippets
from ansible.module_utils.basic import *
diff --git a/packaging/pip b/packaging/pip
index 35487c32963..aa55bf8ba0b 100644
--- a/packaging/pip
+++ b/packaging/pip
@@ -253,10 +253,10 @@ def main():
cmd = '%s --no-site-packages %s' % (virtualenv, env)
else:
cmd = '%s %s' % (virtualenv, env)
- os.chdir(tempfile.gettempdir())
+ this_dir = tempfile.gettempdir()
if chdir:
- os.chdir(chdir)
- rc, out_venv, err_venv = module.run_command(cmd)
+ this_dir = os.path.join(this_dir, chdir)
+ rc, out_venv, err_venv = module.run_command(cmd, cwd=this_dir)
out += out_venv
err += err_venv
if rc != 0:
@@ -298,10 +298,11 @@ def main():
if module.check_mode:
module.exit_json(changed=True)
- os.chdir(tempfile.gettempdir())
+ this_dir = tempfile.gettempdir()
if chdir:
- os.chdir(chdir)
- rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix)
+ this_dir = os.path.join(this_dir, chdir)
+
+ rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=this_dir)
out += out_pip
err += err_pip
if rc == 1 and state == 'absent' and 'not installed' in out_pip:
diff --git a/packaging/pkgin b/packaging/pkgin
index 0554cf9a216..866c9f76a4c 100755
--- a/packaging/pkgin
+++ b/packaging/pkgin
@@ -58,13 +58,13 @@ import json
import shlex
import os
import sys
-
+import pipes
def query_package(module, pkgin_path, name, state="present"):
if state == "present":
- rc, out, err = module.run_command("%s -y list | grep ^%s" % (pkgin_path, name))
+ rc, out, err = module.run_command("%s -y list | grep ^%s" % (pipes.quote(pkgin_path), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
# At least one package with a package name that starts with ``name``
diff --git a/packaging/pkgng b/packaging/pkgng
index 7b0468a7cbd..a1f443fd4e1 100644
--- a/packaging/pkgng
+++ b/packaging/pkgng
@@ -46,10 +46,22 @@ options:
choices: [ 'yes', 'no' ]
required: false
default: no
+ annotation:
+ description:
+ - a comma-separated list of keyvalue-pairs of the form
+ <+/-/:>[=]. A '+' denotes adding an annotation, a
+ '-' denotes removing an annotation, and ':' denotes modifying an
+ annotation.
+ If setting or modifying annotations, a value must be provided.
+ required: false
+ version_added: "1.6"
pkgsite:
description:
- - specify packagesite to use for downloading packages, if
- not specified, use settings from /usr/local/etc/pkg.conf
+ - for pkgng versions before 1.1.4, specify packagesite to use
+ for downloading packages, if not specified, use settings from
+ /usr/local/etc/pkg.conf
+ for newer pkgng versions, specify a the name of a repository
+ configured in /usr/local/etc/pkg/repos
required: false
author: bleader
notes:
@@ -60,6 +72,9 @@ EXAMPLES = '''
# Install package foo
- pkgng: name=foo state=present
+# Annotate package foo and bar
+- pkgng: name=foo,bar annotation=+test1=baz,-test2,:test3=foobar
+
# Remove packages foo and bar
- pkgng: name=foo,bar state=absent
'''
@@ -68,92 +83,217 @@ EXAMPLES = '''
import json
import shlex
import os
+import re
import sys
-def query_package(module, pkgin_path, name):
+def query_package(module, pkgng_path, name):
- rc, out, err = module.run_command("%s info -g -e %s" % (pkgin_path, name))
+ rc, out, err = module.run_command("%s info -g -e %s" % (pkgng_path, name))
if rc == 0:
return True
return False
+def pkgng_older_than(module, pkgng_path, compare_version):
+
+ rc, out, err = module.run_command("%s -v" % pkgng_path)
+ version = map(lambda x: int(x), re.split(r'[\._]', out))
-def remove_packages(module, pkgin_path, packages):
+ i = 0
+ new_pkgng = True
+ while compare_version[i] == version[i]:
+ i += 1
+ if i == min(len(compare_version), len(version)):
+ break
+ else:
+ if compare_version[i] > version[i]:
+ new_pkgng = False
+ return not new_pkgng
+
+
+def remove_packages(module, pkgng_path, packages):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
- if not query_package(module, pkgin_path, package):
+ if not query_package(module, pkgng_path, package):
continue
if not module.check_mode:
- rc, out, err = module.run_command("%s delete -y %s" % (pkgin_path, package))
+ rc, out, err = module.run_command("%s delete -y %s" % (pkgng_path, package))
- if not module.check_mode and query_package(module, pkgin_path, package):
+ if not module.check_mode and query_package(module, pkgng_path, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
- module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
+ return (True, "removed %s package(s)" % remove_c)
- module.exit_json(changed=False, msg="package(s) already absent")
+ return (False, "package(s) already absent")
-def install_packages(module, pkgin_path, packages, cached, pkgsite):
+def install_packages(module, pkgng_path, packages, cached, pkgsite):
install_c = 0
+ # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
+ # in /usr/local/etc/pkg/repos
+ old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
if pkgsite != "":
- pkgsite="PACKAGESITE=%s" % (pkgsite)
-
- if not module.check_mode and cached == "no":
- rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgin_path))
+ if old_pkgng:
+ pkgsite = "PACKAGESITE=%s" % (pkgsite)
+ else:
+ pkgsite = "-r %s" % (pkgsite)
+
+ if not module.check_mode and not cached:
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
+ else:
+ rc, out, err = module.run_command("%s update" % (pkgng_path))
if rc != 0:
module.fail_json(msg="Could not update catalogue")
for package in packages:
- if query_package(module, pkgin_path, package):
+ if query_package(module, pkgng_path, package):
continue
if not module.check_mode:
- rc, out, err = module.run_command("%s %s install -g -U -y %s" % (pkgsite, pkgin_path, package))
+ if old_pkgng:
+ rc, out, err = module.run_command("%s %s install -g -U -y %s" % (pkgsite, pkgng_path, package))
+ else:
+ rc, out, err = module.run_command("%s install %s -g -U -y %s" % (pkgng_path, pkgsite, package))
- if not module.check_mode and not query_package(module, pkgin_path, package):
+ if not module.check_mode and not query_package(module, pkgng_path, package):
module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err)
install_c += 1
if install_c > 0:
- module.exit_json(changed=True, msg="present %s package(s)" % (install_c))
+ return (True, "added %s package(s)" % (install_c))
- module.exit_json(changed=False, msg="package(s) already present")
+ return (False, "package(s) already present")
+def annotation_query(module, pkgng_path, package, tag):
+ rc, out, err = module.run_command("%s info -g -A %s" % (pkgng_path, package))
+ match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE)
+ if match:
+ return match.group('value')
+ return False
+
+
+def annotation_add(module, pkgng_path, package, tag, value):
+ _value = annotation_query(module, pkgng_path, package, tag)
+ if not _value:
+ # Annotation does not exist, add it.
+ rc, out, err = module.run_command('%s annotate -y -A %s %s "%s"'
+ % (pkgng_path, package, tag, value))
+ if rc != 0:
+ module.fail_json("could not annotate %s: %s"
+ % (package, out), stderr=err)
+ return True
+ elif _value != value:
+ # Annotation exists, but value differs
+ module.fail_json(
+ mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
+ % (package, tag, _value, value))
+ return False
+ else:
+ # Annotation exists, nothing to do
+ return False
+
+def annotation_delete(module, pkgng_path, package, tag, value):
+ _value = annotation_query(module, pkgng_path, package, tag)
+ if _value:
+ rc, out, err = module.run_command('%s annotate -y -D %s %s'
+ % (pkgng_path, package, tag))
+ if rc != 0:
+ module.fail_json("could not delete annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+ return False
+
+def annotation_modify(module, pkgng_path, package, tag, value):
+ _value = annotation_query(module, pkgng_path, package, tag)
+ if not value:
+ # No such tag
+ module.fail_json("could not change annotation to %s: tag %s does not exist"
+ % (package, tag))
+ elif _value == value:
+ # No change in value
+ return False
+ else:
+ rc,out,err = module.run_command('%s annotate -y -M %s %s "%s"'
+ % (pkgng_path, package, tag, value))
+ if rc != 0:
+ module.fail_json("could not change annotation annotation to %s: %s"
+ % (package, out), stderr=err)
+ return True
+
+
+def annotate_packages(module, pkgng_path, packages, annotation):
+ annotate_c = 0
+ annotations = map(lambda _annotation:
+ re.match(r'(?P[\+-:])(?P\w+)(=(?P\w+))?',
+ _annotation).groupdict(),
+ re.split(r',', annotation))
+
+ operation = {
+ '+': annotation_add,
+ '-': annotation_delete,
+ ':': annotation_modify
+ }
+
+ for package in packages:
+ for _annotation in annotations:
+ annotate_c += ( 1 if operation[_annotation['operation']](
+ module, pkgng_path, package,
+ _annotation['tag'], _annotation['value']) else 0 )
+
+ if annotate_c > 0:
+ return (True, "added %s annotations." % annotate_c)
+ return (False, "changed no annotations")
def main():
module = AnsibleModule(
argument_spec = dict(
- state = dict(default="present", choices=["present","absent"]),
+ state = dict(default="present", choices=["present","absent"], required=False),
name = dict(aliases=["pkg"], required=True),
cached = dict(default=False, type='bool'),
+ annotation = dict(default="", required=False),
pkgsite = dict(default="", required=False)),
supports_check_mode = True)
- pkgin_path = module.get_bin_path('pkg', True)
+ pkgng_path = module.get_bin_path('pkg', True)
p = module.params
pkgs = p["name"].split(",")
+ changed = False
+ msgs = []
+
if p["state"] == "present":
- install_packages(module, pkgin_path, pkgs, p["cached"], p["pkgsite"])
+ _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"])
+ changed = changed or _changed
+ msgs.append(_msg)
elif p["state"] == "absent":
- remove_packages(module, pkgin_path, pkgs)
+ _changed, _msg = remove_packages(module, pkgng_path, pkgs)
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ if p["annotation"]:
+ _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"])
+ changed = changed or _changed
+ msgs.append(_msg)
+
+ module.exit_json(changed=changed, msg=", ".join(msgs))
+
+
# import module snippets
from ansible.module_utils.basic import *
diff --git a/packaging/pkgutil b/packaging/pkgutil
index d6c4f536c5a..e7d1ce7a0d6 100644
--- a/packaging/pkgutil
+++ b/packaging/pkgutil
@@ -58,13 +58,14 @@ pkgutil: name=CSWcommon state=present
# Install a package from a specific repository
pkgutil: name=CSWnrpe site='ftp://myinternal.repo/opencsw/kiel state=latest'
'''
+
import os
+import pipes
def package_installed(module, name):
cmd = [module.get_bin_path('pkginfo', True)]
cmd.append('-q')
cmd.append(name)
- #rc, out, err = module.run_command(' '.join(cmd), shell=False)
rc, out, err = module.run_command(' '.join(cmd))
if rc == 0:
return True
@@ -73,12 +74,14 @@ def package_installed(module, name):
def package_latest(module, name, site):
# Only supports one package
+ name = pipes.quote(name)
+ site = pipes.quote(site)
cmd = [ 'pkgutil', '--single', '-c' ]
if site is not None:
cmd += [ '-t', site ]
cmd.append(name)
cmd += [ '| tail -1 | grep -v SAME' ]
- rc, out, err = module.run_command(' '.join(cmd))
+ rc, out, err = module.run_command(' '.join(cmd), use_unsafe_shell=True)
if rc == 1:
return True
else:
diff --git a/packaging/portage b/packaging/portage
new file mode 100644
index 00000000000..2cce4b41d1e
--- /dev/null
+++ b/packaging/portage
@@ -0,0 +1,387 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2013, Yap Sok Ann
+# Written by Yap Sok Ann
+# Based on apt module written by Matthew Williams
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see .
+
+
+DOCUMENTATION = '''
+---
+module: portage
+short_description: Package manager for Gentoo
+description:
+ - Manages Gentoo packages
+version_added: "1.6"
+
+options:
+ package:
+ description:
+ - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world)
+ required: false
+ default: null
+
+ state:
+ description:
+ - State of the package atom
+ required: false
+ default: "present"
+ choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged" ]
+
+ update:
+ description:
+ - Update packages to the best version available (--update)
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ deep:
+ description:
+ - Consider the entire dependency tree of packages (--deep)
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ newuse:
+ description:
+ - Include installed packages where USE flags have changed (--newuse)
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ oneshot:
+ description:
+ - Do not add the packages to the world file (--oneshot)
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ noreplace:
+ description:
+ - Do not re-emerge installed packages (--noreplace)
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ nodeps:
+ description:
+ - Only merge packages but not their dependencies (--nodeps)
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ onlydeps:
+ description:
+ - Only merge packages' dependencies but not the packages (--onlydeps)
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ depclean:
+ description:
+ - Remove packages not needed by explicitly merged packages (--depclean)
+ - If no package is specified, clean up the world's dependencies
+ - Otherwise, --depclean serves as a dependency aware version of --unmerge
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ quiet:
+ description:
+ - Run emerge in quiet mode (--quiet)
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ verbose:
+ description:
+ - Run emerge in verbose mode (--verbose)
+ required: false
+ default: null
+ choices: [ "yes" ]
+
+ sync:
+ description:
+ - Sync package repositories first
+ - If yes, perform "emerge --sync"
+ - If web, perform "emerge-webrsync"
+ required: false
+ default: null
+ choices: [ "yes", "web" ]
+
+requirements: [ gentoolkit ]
+author: Yap Sok Ann
+notes: []
+'''
+
+EXAMPLES = '''
+# Make sure package foo is installed
+- portage: package=foo state=present
+
+# Make sure package foo is not installed
+- portage: package=foo state=absent
+
+# Update package foo to the "best" version
+- portage: package=foo update=yes
+
+# Sync repositories and update world
+- portage: package=@world update=yes deep=yes sync=yes
+
+# Remove unneeded packages
+- portage: depclean=yes
+
+# Remove package foo if it is not explicitly needed
+- portage: package=foo state=absent depclean=yes
+'''
+
+
+import os
+import pipes
+
+
+def query_package(module, package, action):
+ if package.startswith('@'):
+ return query_set(module, package, action)
+ return query_atom(module, package, action)
+
+
+def query_atom(module, atom, action):
+ cmd = '%s list %s' % (module.equery_path, atom)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def query_set(module, set, action):
+ system_sets = [
+ '@live-rebuild',
+ '@module-rebuild',
+ '@preserved-rebuild',
+ '@security',
+ '@selected',
+ '@system',
+ '@world',
+ '@x11-module-rebuild',
+ ]
+
+ if set in system_sets:
+ if action == 'unmerge':
+ module.fail_json(msg='set %s cannot be removed' % set)
+ return False
+
+ world_sets_path = '/var/lib/portage/world_sets'
+ if not os.path.exists(world_sets_path):
+ return False
+
+ cmd = 'grep %s %s' % (set, world_sets_path)
+
+ rc, out, err = module.run_command(cmd)
+ return rc == 0
+
+
+def sync_repositories(module, webrsync=False):
+ if module.check_mode:
+ module.fail_json(msg='check mode not supported by sync')
+
+ if webrsync:
+ webrsync_path = module.get_bin_path('emerge-webrsync', required=True)
+ cmd = '%s --quiet' % webrsync_path
+ else:
+ cmd = '%s --sync --quiet' % module.emerge_path
+
+ rc, out, err = module.run_command(cmd)
+ if rc != 0:
+ module.fail_json(msg='could not sync package repositories')
+
+
+# Note: In the 3 functions below, equery is done one-by-one, but emerge is done
+# in one go. If that is not desirable, split the packages into multiple tasks
+# instead of joining them together with comma.
+
+
+def emerge_packages(module, packages):
+ p = module.params
+
+ if not (p['update'] or p['noreplace']):
+ for package in packages:
+ if not query_package(module, package, 'emerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already present.')
+
+ args = []
+ for flag in [
+ 'update', 'deep', 'newuse',
+ 'oneshot', 'noreplace',
+ 'nodeps', 'onlydeps',
+ 'quiet', 'verbose',
+ ]:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not installed.',
+ )
+
+ changed = True
+ for line in out.splitlines():
+ if line.startswith('>>> Emerging (1 of'):
+ break
+ else:
+ changed = False
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages installed.',
+ )
+
+
+def unmerge_packages(module, packages):
+ p = module.params
+
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--unmerge']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+
+ if rc != 0:
+ module.fail_json(
+ cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages not removed.',
+ )
+
+ module.exit_json(
+ changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Packages removed.',
+ )
+
+
+def cleanup_packages(module, packages):
+ p = module.params
+
+ if packages:
+ for package in packages:
+ if query_package(module, package, 'unmerge'):
+ break
+ else:
+ module.exit_json(changed=False, msg='Packages already absent.')
+
+ args = ['--depclean']
+
+ for flag in ['quiet', 'verbose']:
+ if p[flag]:
+ args.append('--%s' % flag)
+
+ cmd, (rc, out, err) = run_emerge(module, packages, *args)
+ if rc != 0:
+ module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err)
+
+ removed = 0
+ for line in out.splitlines():
+ if not line.startswith('Number removed:'):
+ continue
+ parts = line.split(':')
+ removed = int(parts[1].strip())
+ changed = removed > 0
+
+ module.exit_json(
+ changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err,
+ msg='Depclean completed.',
+ )
+
+
+def run_emerge(module, packages, *args):
+ args = list(args)
+
+ if module.check_mode:
+ args.append('--pretend')
+
+ cmd = [module.emerge_path] + args + packages
+ return cmd, module.run_command(cmd)
+
+
+portage_present_states = ['present', 'emerged', 'installed']
+portage_absent_states = ['absent', 'unmerged', 'removed']
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ package=dict(default=None, aliases=['name']),
+ state=dict(
+ default=portage_present_states[0],
+ choices=portage_present_states + portage_absent_states,
+ ),
+ update=dict(default=None, choices=['yes']),
+ deep=dict(default=None, choices=['yes']),
+ newuse=dict(default=None, choices=['yes']),
+ oneshot=dict(default=None, choices=['yes']),
+ noreplace=dict(default=None, choices=['yes']),
+ nodeps=dict(default=None, choices=['yes']),
+ onlydeps=dict(default=None, choices=['yes']),
+ depclean=dict(default=None, choices=['yes']),
+ quiet=dict(default=None, choices=['yes']),
+ verbose=dict(default=None, choices=['yes']),
+ sync=dict(default=None, choices=['yes', 'web']),
+ ),
+ required_one_of=[['package', 'sync', 'depclean']],
+ mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']],
+ supports_check_mode=True,
+ )
+
+ module.emerge_path = module.get_bin_path('emerge', required=True)
+ module.equery_path = module.get_bin_path('equery', required=True)
+
+ p = module.params
+
+ if p['sync']:
+ sync_repositories(module, webrsync=(p['sync'] == 'web'))
+ if not p['package']:
+ return
+
+ packages = p['package'].split(',') if p['package'] else []
+
+ if p['depclean']:
+ if packages and p['state'] not in portage_absent_states:
+ module.fail_json(
+ msg='Depclean can only be used with package when the state is '
+ 'one of: %s' % portage_absent_states,
+ )
+
+ cleanup_packages(module, packages)
+
+ elif p['state'] in portage_present_states:
+ emerge_packages(module, packages)
+
+ elif p['state'] in portage_absent_states:
+ unmerge_packages(module, packages)
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/packaging/portinstall b/packaging/portinstall
index 4bef8035be3..88e654b8db4 100644
--- a/packaging/portinstall
+++ b/packaging/portinstall
@@ -71,7 +71,7 @@ def query_package(module, name):
if pkg_info_path:
pkgng = False
pkg_glob_path = module.get_bin_path('pkg_glob', True)
- rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, name))
+ rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, pipes.quote(name)), use_unsafe_shell=True)
else:
pkgng = True
pkg_info_path = module.get_bin_path('pkg', True)
@@ -128,11 +128,11 @@ def remove_packages(module, packages):
if not query_package(module, package):
continue
- rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, package))
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(package)), use_unsafe_shell=True)
if query_package(module, package):
name_without_digits = re.sub('[0-9]', '', package)
- rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, name_without_digits))
+ rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(name_without_digits)),use_unsafe_shell=True)
if query_package(module, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
diff --git a/packaging/redhat_subscription b/packaging/redhat_subscription
index e363aa0946a..f9918ada4b0 100644
--- a/packaging/redhat_subscription
+++ b/packaging/redhat_subscription
@@ -75,39 +75,13 @@ EXAMPLES = '''
import os
import re
import types
-import subprocess
import ConfigParser
import shlex
-class CommandException(Exception):
- pass
-
-
-def run_command(args):
- '''
- Convenience method to run a command, specified as a list of arguments.
- Returns:
- * tuple - (stdout, stder, retcode)
- '''
-
- # Coerce into a string
- if isinstance(args, str):
- args = shlex.split(args)
-
- # Run desired command
- proc = subprocess.Popen(args, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- (stdout, stderr) = proc.communicate()
- returncode = proc.poll()
- if returncode != 0:
- cmd = ' '.join(args)
- raise CommandException("Command failed (%s): %s\n%s" % (returncode, cmd, stdout))
- return (stdout, stderr, returncode)
-
-
-class RegistrationBase (object):
- def __init__(self, username=None, password=None):
+class RegistrationBase(object):
+ def __init__(self, module, username=None, password=None):
+ self.module = module
self.username = username
self.password = password
@@ -147,9 +121,10 @@ class RegistrationBase (object):
class Rhsm(RegistrationBase):
- def __init__(self, username=None, password=None):
- RegistrationBase.__init__(self, username, password)
+ def __init__(self, module, username=None, password=None):
+ RegistrationBase.__init__(self, module, username, password)
self.config = self._read_config()
+ self.module = module
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
'''
@@ -199,8 +174,8 @@ class Rhsm(RegistrationBase):
for k,v in kwargs.items():
if re.search(r'^(system|rhsm)_', k):
args.append('--%s=%s' % (k.replace('_','.'), v))
-
- run_command(args)
+
+ self.module.run_command(args, check_rc=True)
@property
def is_registered(self):
@@ -216,13 +191,11 @@ class Rhsm(RegistrationBase):
os.path.isfile('/etc/pki/consumer/key.pem')
args = ['subscription-manager', 'identity']
- try:
- (stdout, stderr, retcode) = run_command(args)
- except CommandException, e:
- return False
- else:
- # Display some debug output
+ rc, stdout, stderr = self.module.run_command(args, check_rc=False)
+ if rc == 0:
return True
+ else:
+ return False
def register(self, username, password, autosubscribe, activationkey):
'''
@@ -243,8 +216,7 @@ class Rhsm(RegistrationBase):
if password:
args.extend(['--password', password])
- # Do the needful...
- run_command(args)
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unsubscribe(self):
'''
@@ -253,7 +225,7 @@ class Rhsm(RegistrationBase):
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unsubscribe', '--all']
- run_command(args)
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def unregister(self):
'''
@@ -262,7 +234,7 @@ class Rhsm(RegistrationBase):
* Exception - if error occurs while running command
'''
args = ['subscription-manager', 'unregister']
- run_command(args)
+ rc, stderr, stdout = self.module.run_command(args, check_rc=True)
def subscribe(self, regexp):
'''
@@ -273,7 +245,7 @@ class Rhsm(RegistrationBase):
'''
# Available pools ready for subscription
- available_pools = RhsmPools()
+ available_pools = RhsmPools(self.module)
for pool in available_pools.filter(regexp):
pool.subscribe()
@@ -284,7 +256,8 @@ class RhsmPool(object):
Convenience class for housing subscription information
'''
- def __init__(self, **kwargs):
+ def __init__(self, module, **kwargs):
+ self.module = module
for k,v in kwargs.items():
setattr(self, k, v)
@@ -292,15 +265,20 @@ class RhsmPool(object):
return str(self.__getattribute__('_name'))
def subscribe(self):
- (stdout, stderr, retcode) = run_command("subscription-manager subscribe --pool %s" % self.PoolId)
- return True
+ args = "subscription-manager subscribe --pool %s" % self.PoolId
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
+ if rc == 0:
+ return True
+ else:
+ return False
class RhsmPools(object):
"""
This class is used for manipulating pools subscriptions with RHSM
"""
- def __init__(self):
+ def __init__(self, module):
+ self.module = module
self.products = self._load_product_list()
def __iter__(self):
@@ -310,7 +288,8 @@ class RhsmPools(object):
"""
Loads list of all availaible pools for system in data structure
"""
- (stdout, stderr, retval) = run_command("subscription-manager list --available")
+ args = "subscription-manager list --available"
+ rc, stdout, stderr = self.module.run_command(args, check_rc=True)
products = []
for line in stdout.split('\n'):
@@ -326,7 +305,7 @@ class RhsmPools(object):
value = value.strip()
if key in ['ProductName', 'SubscriptionName']:
# Remember the name for later processing
- products.append(RhsmPool(_name=value, key=value))
+ products.append(RhsmPool(self.module, _name=value, key=value))
elif products:
# Associate value with most recently recorded product
products[-1].__setattr__(key, value)
@@ -348,7 +327,7 @@ class RhsmPools(object):
def main():
# Load RHSM configuration from file
- rhn = Rhsm()
+ rhn = Rhsm(None)
module = AnsibleModule(
argument_spec = dict(
@@ -364,6 +343,7 @@ def main():
)
)
+ rhn.module = module
state = module.params['state']
username = module.params['username']
password = module.params['password']
diff --git a/packaging/rhn_register b/packaging/rhn_register
index 5e8c3718f98..552dfcc580a 100644
--- a/packaging/rhn_register
+++ b/packaging/rhn_register
@@ -58,7 +58,7 @@ EXAMPLES = '''
# Register as user (joe_user) with password (somepass) against a satellite
# server specified by (server_url).
-- rhn_register:
+- rhn_register: >
state=present
username=joe_user
password=somepass
@@ -72,12 +72,7 @@ EXAMPLES = '''
'''
import sys
-import os
-import re
import types
-import subprocess
-import ConfigParser
-import shlex
import xmlrpclib
import urlparse
@@ -89,72 +84,10 @@ try:
except ImportError, e:
module.fail_json(msg="Unable to import up2date_client. Is 'rhn-client-tools' installed?\n%s" % e)
-
-class CommandException(Exception):
- pass
-
-
-def run_command(args):
- '''
- Convenience method to run a command, specified as a list of arguments.
- Returns:
- * tuple - (stdout, stder, retcode)
- '''
-
- # Coerce into a string
- if isinstance(args, str):
- args = shlex.split(args)
-
- # Run desired command
- proc = subprocess.Popen(args, stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT)
- (stdout, stderr) = proc.communicate()
- returncode = proc.poll()
- if returncode != 0:
- cmd = ' '.join(args)
- raise CommandException("Command failed (%s): %s\n%s" % (returncode, cmd, stdout))
- return (stdout, stderr, returncode)
-
-
-class RegistrationBase (object):
- def __init__(self, username=None, password=None):
- self.username = username
- self.password = password
-
- def configure(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def enable(self):
- # Remove any existing redhat.repo
- redhat_repo = '/etc/yum.repos.d/redhat.repo'
- if os.path.isfile(redhat_repo):
- os.unlink(redhat_repo)
-
- def register(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def unregister(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def unsubscribe(self):
- raise NotImplementedError("Must be implemented by a sub-class")
-
- def update_plugin_conf(self, plugin, enabled=True):
- plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
- if os.path.isfile(plugin_conf):
- cfg = ConfigParser.ConfigParser()
- cfg.read([plugin_conf])
- if enabled:
- cfg.set('main', 'enabled', 1)
- else:
- cfg.set('main', 'enabled', 0)
- fd = open(plugin_conf, 'rwa+')
- cfg.write(fd)
- fd.close()
-
- def subscribe(self, **kwargs):
- raise NotImplementedError("Must be implemented by a sub-class")
-
+# INSERT REDHAT SNIPPETS
+from ansible.module_utils.redhat import *
+# INSERT COMMON SNIPPETS
+from ansible.module_utils.basic import *
class Rhn(RegistrationBase):
@@ -264,21 +197,26 @@ class Rhn(RegistrationBase):
Register system to RHN. If enable_eus=True, extended update
support will be requested.
'''
- register_cmd = "/usr/sbin/rhnreg_ks --username '%s' --password '%s' --force" % (self.username, self.password)
+ register_cmd = "/usr/sbin/rhnreg_ks --username='%s' --password='%s' --force" % (self.username, self.password)
+ if self.module.params.get('server_url', None):
+ register_cmd += " --serverUrl=%s" % self.module.params.get('server_url')
if enable_eus:
register_cmd += " --use-eus-channel"
if activationkey is not None:
register_cmd += " --activationkey '%s'" % activationkey
# FIXME - support --profilename
# FIXME - support --systemorgid
- run_command(register_cmd)
+ rc, stdout, stderr = self.module.run_command(register_cmd, check_rc=True, use_unsafe_shell=True)
def api(self, method, *args):
'''
Convenience RPC wrapper
'''
if not hasattr(self, 'server') or self.server is None:
- url = "https://xmlrpc.%s/rpc/api" % self.hostname
+ if self.hostname != 'rhn.redhat.com':
+ url = "https://%s/rpc/api" % self.hostname
+ else:
+ url = "https://xmlrpc.%s/rpc/api" % self.hostname
self.server = xmlrpclib.Server(url, verbose=0)
self.session = self.server.auth.login(self.username, self.password)
@@ -309,14 +247,14 @@ class Rhn(RegistrationBase):
Subscribe to requested yum repositories using 'rhn-channel' command
'''
rhn_channel_cmd = "rhn-channel --user='%s' --password='%s'" % (self.username, self.password)
- (stdout, stderr, rc) = run_command(rhn_channel_cmd + " --available-channels")
+ rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --available-channels", check_rc=True)
# Enable requested repoid's
for wanted_channel in channels:
# Each inserted repo regexp will be matched. If no match, no success.
for availaible_channel in stdout.rstrip().split('\n'): # .rstrip() because of \n at the end -> empty string at the end
if re.search(wanted_repo, available_channel):
- run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel)
+ rc, stdout, stderr = self.module.run_command(rhn_channel_cmd + " --add --channel=%s" % available_channel, check_rc=True)
def main():
@@ -341,6 +279,7 @@ def main():
rhn.configure(module.params['server_url'])
activationkey = module.params['activationkey']
channels = module.params['channels']
+ rhn.module = module
# Ensure system is registered
if state == 'present':
@@ -359,10 +298,10 @@ def main():
rhn.enable()
rhn.register(module.params['enable_eus'] == True, activationkey)
rhn.subscribe(channels)
- except CommandException, e:
+ except Exception, e:
module.fail_json(msg="Failed to register with '%s': %s" % (rhn.hostname, e))
- else:
- module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
+
+ module.exit_json(changed=True, msg="System successfully registered to '%s'." % rhn.hostname)
# Ensure system is *not* registered
if state == 'absent':
@@ -371,12 +310,10 @@ def main():
else:
try:
rhn.unregister()
- except CommandException, e:
+ except Exception, e:
module.fail_json(msg="Failed to unregister: %s" % e)
- else:
- module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
+
+ module.exit_json(changed=True, msg="System successfully unregistered from %s." % rhn.hostname)
-# import module snippets
-from ansible.module_utils.basic import *
main()
diff --git a/packaging/rpm_key b/packaging/rpm_key
index 82532477348..d60706b157d 100644
--- a/packaging/rpm_key
+++ b/packaging/rpm_key
@@ -42,6 +42,14 @@ options:
choices: [present, absent]
description:
- Wheather the key will be imported or removed from the rpm db.
+ validate_certs:
+ description:
+ - If C(no) and the C(key) is a url starting with https, SSL certificates will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
'''
EXAMPLES = '''
@@ -57,7 +65,6 @@ EXAMPLES = '''
import syslog
import os.path
import re
-import urllib2
import tempfile
# Attempt to download at most 8192 bytes.
@@ -116,8 +123,8 @@ class RpmKey:
def fetch_key(self, url, maxbytes=MAXBYTES):
"""Downloads a key from url, returns a valid path to a gpg key"""
try:
- fd = urllib2.urlopen(url)
- key = fd.read(maxbytes)
+ rsp, info = fetch_url(self.module, url)
+ key = rsp.read(maxbytes)
if not is_pubkey(key):
self.module.fail_json(msg="Not a public key: %s" % url)
tmpfd, tmpname = tempfile.mkstemp()
@@ -131,7 +138,9 @@ class RpmKey:
def normalize_keyid(self, keyid):
"""Ensure a keyid doesn't have a leading 0x, has leading or trailing whitespace, and make sure is lowercase"""
ret = keyid.strip().lower()
- if ret.startswith(('0x', '0X')):
+ if ret.startswith('0x'):
+ return ret[2:]
+ elif ret.startswith('0X'):
return ret[2:]
else:
return ret
@@ -141,9 +150,9 @@ class RpmKey:
stdout, stderr = self.execute_command([gpg, '--no-tty', '--batch', '--with-colons', '--fixed-list-mode', '--list-packets', keyfile])
for line in stdout.splitlines():
line = line.strip()
- if line.startswith('keyid:'):
+ if line.startswith(':signature packet:'):
# We want just the last 8 characters of the keyid
- keyid = line.split(':')[1].strip()[8:]
+ keyid = line.split()[-1].strip()[8:]
return keyid
self.json_fail(msg="Unexpected gpg output")
@@ -161,7 +170,7 @@ class RpmKey:
return stdout, stderr
def is_key_imported(self, keyid):
- stdout, stderr = self.execute_command([self.rpm, '-q', 'gpg-pubkey'])
+ stdout, stderr = self.execute_command([self.rpm, '-qa', 'gpg-pubkey'])
for line in stdout.splitlines():
line = line.strip()
if not line:
@@ -187,7 +196,8 @@ def main():
module = AnsibleModule(
argument_spec = dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
- key=dict(required=True, type='str')
+ key=dict(required=True, type='str'),
+ validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
@@ -198,4 +208,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
main()
diff --git a/packaging/svr4pkg b/packaging/svr4pkg
index 485e7ebcbfe..4e790b46c52 100644
--- a/packaging/svr4pkg
+++ b/packaging/svr4pkg
@@ -57,6 +57,20 @@ options:
description:
- Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4)
required: false
+ zone:
+ description:
+ - Whether to install the package only in the current zone, or install it into all zones.
+ - The installation into all zones works only if you are working with the global zone.
+ required: false
+ default: "all"
+ choices: ["current", "all"]
+ version_added: "1.6"
+ category:
+ description:
+ - Install/Remove category instead of a single package.
+ required: false
+ choices: ["true", "false"]
+ version_added: "1.6"
'''
EXAMPLES = '''
@@ -64,22 +78,27 @@ EXAMPLES = '''
- svr4pkg: name=CSWcommon src=/tmp/cswpkgs.pkg state=present
# Install a package directly from an http site
-- svr4pkg: name=CSWpkgutil src=http://get.opencsw.org/now state=present
+- svr4pkg: name=CSWpkgutil src=http://get.opencsw.org/now state=present zone=current
# Install a package with a response file
- svr4pkg: name=CSWggrep src=/tmp/third-party.pkg response_file=/tmp/ggrep.response state=present
# Ensure that a package is not installed.
- svr4pkg: name=SUNWgnome-sound-recorder state=absent
+
+# Ensure that a category is not installed.
+- svr4pkg: name=FIREFOX state=absent category=true
'''
import os
import tempfile
-def package_installed(module, name):
+def package_installed(module, name, category):
cmd = [module.get_bin_path('pkginfo', True)]
cmd.append('-q')
+ if category:
+ cmd.append('-c')
cmd.append(name)
rc, out, err = module.run_command(' '.join(cmd))
if rc == 0:
@@ -116,13 +135,18 @@ def run_command(module, cmd):
cmd[0] = module.get_bin_path(progname, True)
return module.run_command(cmd)
-def package_install(module, name, src, proxy, response_file):
+def package_install(module, name, src, proxy, response_file, zone, category):
adminfile = create_admin_file()
- cmd = [ 'pkgadd', '-na', adminfile, '-d', src ]
+ cmd = [ 'pkgadd', '-n']
+ if zone == 'current':
+ cmd += [ '-G' ]
+ cmd += [ '-a', adminfile, '-d', src ]
if proxy is not None:
cmd += [ '-x', proxy ]
if response_file is not None:
cmd += [ '-r', response_file ]
+ if category:
+ cmd += [ '-Y' ]
cmd.append(name)
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
@@ -130,7 +154,10 @@ def package_install(module, name, src, proxy, response_file):
def package_uninstall(module, name, src):
adminfile = create_admin_file()
- cmd = [ 'pkgrm', '-na', adminfile, name]
+ if category:
+ cmd = [ 'pkgrm', '-na', adminfile, '-Y', name ]
+ else:
+ cmd = [ 'pkgrm', '-na', adminfile, name]
(rc, out, err) = run_command(module, cmd)
os.unlink(adminfile)
return (rc, out, err)
@@ -142,7 +169,9 @@ def main():
state = dict(required = True, choices=['present', 'absent']),
src = dict(default = None),
proxy = dict(default = None),
- response_file = dict(default = None)
+ response_file = dict(default = None),
+ zone = dict(required=False, default = 'all', choices=['current','all']),
+ category = dict(default=False, type='bool')
),
supports_check_mode=True
)
@@ -151,6 +180,8 @@ def main():
src = module.params['src']
proxy = module.params['proxy']
response_file = module.params['response_file']
+ zone = module.params['zone']
+ category = module.params['category']
rc = None
out = ''
err = ''
@@ -162,20 +193,20 @@ def main():
if src is None:
module.fail_json(name=name,
msg="src is required when state=present")
- if not package_installed(module, name):
+ if not package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
- (rc, out, err) = package_install(module, name, src, proxy, response_file)
+ (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category)
# Stdout is normally empty but for some packages can be
# very long and is not often useful
if len(out) > 75:
out = out[:75] + '...'
elif state == 'absent':
- if package_installed(module, name):
+ if package_installed(module, name, category):
if module.check_mode:
module.exit_json(changed=True)
- (rc, out, err) = package_uninstall(module, name, src)
+ (rc, out, err) = package_uninstall(module, name, src, category)
out = out[:75]
if rc is None:
diff --git a/packaging/swdepot b/packaging/swdepot
index 6fd89088cc0..b41a860531f 100644
--- a/packaging/swdepot
+++ b/packaging/swdepot
@@ -19,6 +19,7 @@
# along with this software. If not, see .
import re
+import pipes
DOCUMENTATION = '''
---
@@ -78,9 +79,9 @@ def query_package(module, name, depot=None):
cmd_list = '/usr/sbin/swlist -a revision -l product'
if depot:
- rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, depot, name, name))
+ rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, pipes.quote(depot), pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
else:
- rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, name, name))
+ rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True)
if rc == 0:
version = re.sub("\s\s+|\t" , " ", stdout).strip().split()[1]
else:
diff --git a/packaging/urpmi b/packaging/urpmi
index b001ed94dee..be49dfd2648 100644
--- a/packaging/urpmi
+++ b/packaging/urpmi
@@ -91,7 +91,8 @@ def query_package(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
- rc = os.system("rpm -q %s" % (name))
+ cmd = "rpm -q %s" % (name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
@@ -103,13 +104,14 @@ def query_package_provides(module, name):
# rpm -q returns 0 if the package is installed,
# 1 if it is not installed
- rc = os.system("rpm -q --provides %s >/dev/null" % (name))
+ cmd = "rpm -q --provides %s" % (name)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
return rc == 0
def update_package_db(module):
- rc = os.system("urpmi.update -a -q")
-
+ cmd = "urpmi.update -a -q"
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="could not update package db")
@@ -123,7 +125,8 @@ def remove_packages(module, packages):
if not query_package(module, package):
continue
- rc = os.system("%s --auto %s > /dev/null" % (URPME_PATH, package))
+ cmd = "%s --auto %s" % (URPME_PATH, package)
+ rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
@@ -155,7 +158,7 @@ def install_packages(module, pkgspec, force=True, no_suggests=True):
else:
force_yes = ''
- cmd = ("%s --auto %s --quiet %s %s > /dev/null" % (URPMI_PATH, force_yes, no_suggests_yes, packages))
+ cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_suggests_yes, packages))
rc, out, err = module.run_command(cmd)
diff --git a/packaging/yum b/packaging/yum
index 61bb836b43a..aded7abbb63 100644
--- a/packaging/yum
+++ b/packaging/yum
@@ -3,6 +3,7 @@
# (c) 2012, Red Hat, Inc
# Written by Seth Vidal
+# (c) 2014, Epic Games, Inc.
#
# This file is part of Ansible
#
@@ -108,7 +109,7 @@ EXAMPLES = '''
- name: remove the Apache package
yum: name=httpd state=removed
-- name: install the latest version of Apche from the testing repo
+- name: install the latest version of Apache from the testing repo
yum: name=httpd enablerepo=testing state=installed
- name: upgrade all packages
@@ -535,6 +536,7 @@ def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
if found:
continue
+
# if not - then pass in the spec as what to install
# we could get here if nothing provides it but that's not
# the error we're catching here
diff --git a/source_control/bzr b/source_control/bzr
index bc2dfc3089f..996150a39af 100644
--- a/source_control/bzr
+++ b/source_control/bzr
@@ -75,16 +75,16 @@ class Bzr(object):
self.version = version
self.bzr_path = bzr_path
- def _command(self, args_list, **kwargs):
- (rc, out, err) = self.module.run_command(
- [self.bzr_path] + args_list, **kwargs)
+ def _command(self, args_list, cwd=None, **kwargs):
+ (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs)
return (rc, out, err)
def get_version(self):
'''samples the version of the bzr branch'''
- os.chdir(self.dest)
+
cmd = "%s revno" % self.bzr_path
- revno = os.popen(cmd).read().strip()
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ revno = stdout.strip()
return revno
def clone(self):
@@ -94,17 +94,18 @@ class Bzr(object):
os.makedirs(dest_dirname)
except:
pass
- os.chdir(dest_dirname)
if self.version.lower() != 'head':
args_list = ["branch", "-r", self.version, self.parent, self.dest]
else:
args_list = ["branch", self.parent, self.dest]
- return self._command(args_list, check_rc=True)
+ return self._command(args_list, check_rc=True, cwd=dest_dirname)
def has_local_mods(self):
- os.chdir(self.dest)
+
cmd = "%s status -S" % self.bzr_path
- lines = os.popen(cmd).read().splitlines()
+ rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest)
+ lines = stdout.splitlines()
+
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
return len(lines) > 0
@@ -114,30 +115,27 @@ class Bzr(object):
Discards any changes to tracked files in the working
tree since that commit.
'''
- os.chdir(self.dest)
if not force and self.has_local_mods():
self.module.fail_json(msg="Local modifications exist in branch (force=no).")
- return self._command(["revert"], check_rc=True)
+ return self._command(["revert"], check_rc=True, cwd=self.dest)
def fetch(self):
'''updates branch from remote sources'''
- os.chdir(self.dest)
if self.version.lower() != 'head':
- (rc, out, err) = self._command(["pull", "-r", self.version])
+ (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest)
else:
- (rc, out, err) = self._command(["pull"])
+ (rc, out, err) = self._command(["pull"], cwd=self.dest)
if rc != 0:
self.module.fail_json(msg="Failed to pull")
return (rc, out, err)
def switch_version(self):
'''once pulled, switch to a particular revno or revid'''
- os.chdir(self.dest)
if self.version.lower() != 'head':
args_list = ["revert", "-r", self.version]
else:
args_list = ["revert"]
- return self._command(args_list, check_rc=True)
+ return self._command(args_list, check_rc=True, cwd=self.dest)
# ===========================================
diff --git a/source_control/git b/source_control/git
index ca876c666b5..968b763b1a4 100644
--- a/source_control/git
+++ b/source_control/git
@@ -45,12 +45,13 @@ options:
branch name, or a tag name.
accept_hostkey:
required: false
- default: false
+ default: "no"
+ choices: [ "yes", "no" ]
version_added: "1.5"
description:
- - Add the hostkey for the repo url if not already added.
- If ssh_args contains "-o StrictHostKeyChecking=no", this
- parameter is ignored.
+ - if C(yes), adds the hostkey for the repo url if not already
+ added. If ssh_args contains "-o StrictHostKeyChecking=no",
+ this parameter is ignored.
ssh_opts:
required: false
default: None
@@ -118,11 +119,20 @@ options:
description:
- if C(yes), repository will be created as a bare repo, otherwise
it will be a standard repo with a workspace.
+
+ recursive:
+ required: false
+ default: "yes"
+ choices: [ "yes", "no" ]
+ version_added: "1.6"
+ description:
+ - if C(no), repository will be cloned without the --recursive
+ option, skipping sub-modules.
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
- the git module, with the following command: ssh-keyscan remote_host.com >> /etc/ssh/ssh_known_hosts."
+ the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
'''
EXAMPLES = '''
@@ -141,8 +151,37 @@ EXAMPLES = '''
import re
import tempfile
+def get_submodule_update_params(module, git_path, cwd):
+
+ #or: git submodule [--quiet] update [--init] [-N|--no-fetch]
+ #[-f|--force] [--rebase] [--reference ] [--merge]
+ #[--recursive] [--] [...]
+
+ params = []
+
+ # run a bad submodule command to get valid params
+ cmd = "%s submodule update --help" % (git_path)
+ rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
+ lines = stderr.split('\n')
+ update_line = None
+ for line in lines:
+ if 'git submodule [--quiet] update ' in line:
+ update_line = line
+ if update_line:
+ update_line = update_line.replace('[','')
+ update_line = update_line.replace(']','')
+ update_line = update_line.replace('|',' ')
+ parts = shlex.split(update_line)
+ for part in parts:
+ if part.startswith('--'):
+ part = part.replace('--', '')
+ params.append(part)
+
+ return params
+
def write_ssh_wrapper():
- fd, wrapper_path = tempfile.mkstemp()
+ module_dir = get_module_path()
+ fd, wrapper_path = tempfile.mkstemp(prefix=module_dir + '/')
fh = os.fdopen(fd, 'w+b')
template = """#!/bin/sh
if [ -z "$GIT_SSH_OPTS" ]; then
@@ -181,26 +220,29 @@ def set_git_ssh(ssh_wrapper, key_file, ssh_opts):
if ssh_opts:
os.environ["GIT_SSH_OPTS"] = ssh_opts
-def get_version(git_path, dest, ref="HEAD"):
+def get_version(module, git_path, dest, ref="HEAD"):
''' samples the version of the git repo '''
- os.chdir(dest)
+
cmd = "%s rev-parse %s" % (git_path, ref)
- sha = os.popen(cmd).read().rstrip("\n")
+ rc, stdout, stderr = module.run_command(cmd, cwd=dest)
+ sha = stdout.rstrip('\n')
return sha
-def clone(git_path, module, repo, dest, remote, depth, version, bare, reference):
+def clone(git_path, module, repo, dest, remote, depth, version, bare,
+ reference, recursive):
''' makes a new git repo if it does not already exist '''
dest_dirname = os.path.dirname(dest)
try:
os.makedirs(dest_dirname)
except:
pass
- os.chdir(dest_dirname)
cmd = [ git_path, 'clone' ]
if bare:
cmd.append('--bare')
else:
- cmd.extend([ '--origin', remote, '--recursive' ])
+ cmd.extend([ '--origin', remote ])
+ if recursive:
+ cmd.extend([ '--recursive' ])
if is_remote_branch(git_path, module, dest, repo, version) \
or is_remote_tag(git_path, module, dest, repo, version):
cmd.extend([ '--branch', version ])
@@ -209,19 +251,20 @@ def clone(git_path, module, repo, dest, remote, depth, version, bare, reference)
if reference:
cmd.extend([ '--reference', str(reference) ])
cmd.extend([ repo, dest ])
- module.run_command(cmd, check_rc=True)
+ module.run_command(cmd, check_rc=True, cwd=dest_dirname)
if bare:
- os.chdir(dest)
if remote != 'origin':
- module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True)
+ module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
-def has_local_mods(git_path, dest, bare):
+def has_local_mods(module, git_path, dest, bare):
if bare:
return False
- os.chdir(dest)
- cmd = "%s status -s" % (git_path,)
- lines = os.popen(cmd).read().splitlines()
+
+ cmd = "%s status -s" % (git_path)
+ rc, stdout, stderr = module.run_command(cmd, cwd=dest)
+ lines = stdout.splitlines()
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
+
return len(lines) > 0
def reset(git_path, module, dest):
@@ -230,16 +273,16 @@ def reset(git_path, module, dest):
Discards any changes to tracked files in working
tree since that commit.
'''
- os.chdir(dest)
cmd = "%s reset --hard HEAD" % (git_path,)
- return module.run_command(cmd, check_rc=True)
+ return module.run_command(cmd, check_rc=True, cwd=dest)
def get_remote_head(git_path, module, dest, version, remote, bare):
cloning = False
+ cwd = None
if remote == module.params['repo']:
cloning = True
else:
- os.chdir(dest)
+ cwd = dest
if version == 'HEAD':
if cloning:
# cloning the repo, just get the remote's HEAD version
@@ -255,7 +298,7 @@ def get_remote_head(git_path, module, dest, version, remote, bare):
# appears to be a sha1. return as-is since it appears
# cannot check for a specific sha1 on remote
return version
- (rc, out, err) = module.run_command(cmd, check_rc=True )
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
if len(out) < 1:
module.fail_json(msg="Could not determine remote revision for %s" % version)
rev = out.split()[0]
@@ -263,17 +306,16 @@ def get_remote_head(git_path, module, dest, version, remote, bare):
def is_remote_tag(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
- (rc, out, err) = module.run_command(cmd, check_rc=True)
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if version in out:
return True
else:
return False
def get_branches(git_path, module, dest):
- os.chdir(dest)
branches = []
cmd = '%s branch -a' % (git_path,)
- (rc, out, err) = module.run_command(cmd)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine branch data - received %s" % out)
for line in out.split('\n'):
@@ -281,10 +323,9 @@ def get_branches(git_path, module, dest):
return branches
def get_tags(git_path, module, dest):
- os.chdir(dest)
tags = []
cmd = '%s tag' % (git_path,)
- (rc, out, err) = module.run_command(cmd)
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine tag data - received %s" % out)
for line in out.split('\n'):
@@ -293,7 +334,7 @@ def get_tags(git_path, module, dest):
def is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
- (rc, out, err) = module.run_command(cmd, check_rc=True)
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if version in out:
return True
else:
@@ -352,18 +393,17 @@ def get_head_branch(git_path, module, dest, remote, bare=False):
def fetch(git_path, module, repo, dest, version, remote, bare):
''' updates repo from remote sources '''
- os.chdir(dest)
if bare:
- (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*'])
+ (rc, out1, err1) = module.run_command([git_path, 'fetch', remote, '+refs/heads/*:refs/heads/*'], cwd=dest)
else:
- (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote))
+ (rc, out1, err1) = module.run_command("%s fetch %s" % (git_path, remote), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to download remote objects and refs")
if bare:
- (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'])
+ (rc, out2, err2) = module.run_command([git_path, 'fetch', remote, '+refs/tags/*:refs/tags/*'], cwd=dest)
else:
- (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote))
+ (rc, out2, err2) = module.run_command("%s fetch --tags %s" % (git_path, remote), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to download remote objects and refs")
(rc, out3, err3) = submodule_update(git_path, module, dest)
@@ -371,28 +411,33 @@ def fetch(git_path, module, repo, dest, version, remote, bare):
def submodule_update(git_path, module, dest):
''' init and update any submodules '''
- os.chdir(dest)
+
+ # get the valid submodule params
+ params = get_submodule_update_params(module, git_path, dest)
+
# skip submodule commands if .gitmodules is not present
if not os.path.exists(os.path.join(dest, '.gitmodules')):
return (0, '', '')
cmd = [ git_path, 'submodule', 'sync' ]
- (rc, out, err) = module.run_command(cmd, check_rc=True)
- cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ]
- (rc, out, err) = module.run_command(cmd)
+ (rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
+ if 'remote' in params:
+ cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ]
+ else:
+ cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ]
+ (rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
- module.fail_json(msg="Failed to init/update submodules")
+ module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
return (rc, out, err)
def switch_version(git_path, module, dest, remote, version):
''' once pulled, switch to a particular SHA, tag, or branch '''
- os.chdir(dest)
cmd = ''
if version != 'HEAD':
if is_remote_branch(git_path, module, dest, remote, version):
if not is_local_branch(git_path, module, dest, version):
cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
else:
- (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version))
+ (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % version)
cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
@@ -400,11 +445,11 @@ def switch_version(git_path, module, dest, remote, version):
cmd = "%s checkout --force %s" % (git_path, version)
else:
branch = get_head_branch(git_path, module, dest, remote)
- (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch))
+ (rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % branch)
cmd = "%s reset --hard %s" % (git_path, remote)
- (rc, out1, err1) = module.run_command(cmd)
+ (rc, out1, err1) = module.run_command(cmd, cwd=dest)
if rc != 0:
if version != 'HEAD':
module.fail_json(msg="Failed to checkout %s" % (version))
@@ -431,6 +476,7 @@ def main():
ssh_opts=dict(default=None, required=False),
executable=dict(default=None),
bare=dict(default='no', type='bool'),
+ recursive=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
@@ -464,6 +510,8 @@ def main():
else:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
+ recursive = module.params['recursive']
+
if bare:
gitconfig = os.path.join(dest, 'config')
else:
@@ -479,17 +527,18 @@ def main():
if module.check_mode:
remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
module.exit_json(changed=True, before=before, after=remote_head)
- clone(git_path, module, repo, dest, remote, depth, version, bare, reference)
+ clone(git_path, module, repo, dest, remote, depth, version, bare,
+ reference, recursive)
elif not update:
# Just return having found a repo already in the dest path
# this does no checking that the repo is the actual repo
# requested.
- before = get_version(git_path, dest)
+ before = get_version(module, git_path, dest)
module.exit_json(changed=False, before=before, after=before)
else:
# else do a pull
- local_mods = has_local_mods(git_path, dest, bare)
- before = get_version(git_path, dest)
+ local_mods = has_local_mods(module, git_path, dest, bare)
+ before = get_version(module, git_path, dest)
if local_mods:
# failure should happen regardless of check mode
if not force:
@@ -519,7 +568,7 @@ def main():
switch_version(git_path, module, dest, remote, version)
# determine if we changed anything
- after = get_version(git_path, dest)
+ after = get_version(module, git_path, dest)
changed = False
if before != after or local_mods:
diff --git a/source_control/github_hooks b/source_control/github_hooks
index 55eb8d3c8d3..6a8d1ced935 100644
--- a/source_control/github_hooks
+++ b/source_control/github_hooks
@@ -19,7 +19,6 @@
# along with Ansible. If not, see .
import json
-import urllib2
import base64
DOCUMENTATION = '''
@@ -51,6 +50,14 @@ options:
- This tells the githooks module what you want it to do.
required: true
choices: [ "create", "cleanall" ]
+ validate_certs:
+ description:
+ - If C(no), SSL certificates for the target repo will not be validated. This should only be used
+ on personally controlled sites using self-signed certificates.
+ required: false
+ default: 'yes'
+ choices: ['yes', 'no']
+
author: Phillip Gentry, CX Inc
'''
@@ -62,16 +69,19 @@ EXAMPLES = '''
- local_action: github_hooks action=cleanall user={{ gituser }} oauthkey={{ oauthkey }} repo={{ repo }}
'''
-def list(hookurl, oauthkey, repo, user):
+def list(module, hookurl, oauthkey, repo, user):
url = "%s/hooks" % repo
auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
- req = urllib2.Request(url)
- req.add_header("Authorization", "Basic %s" % auth)
- res = urllib2.urlopen(req)
- out = res.read()
- return False, out
-
-def clean504(hookurl, oauthkey, repo, user):
+ headers = {
+ 'Authorization': 'Basic %s' % auth,
+ }
+ response, info = fetch_url(module, url, headers=headers)
+ if info['status'] != 200:
+ return False, ''
+ else:
+ return False, response.read()
+
+def clean504(module, hookurl, oauthkey, repo, user):
current_hooks = list(hookurl, oauthkey, repo, user)[1]
decoded = json.loads(current_hooks)
@@ -79,11 +89,11 @@ def clean504(hookurl, oauthkey, repo, user):
if hook['last_response']['code'] == 504:
# print "Last response was an ERROR for hook:"
# print hook['id']
- delete(hookurl, oauthkey, repo, user, hook['id'])
+ delete(module, hookurl, oauthkey, repo, user, hook['id'])
return 0, current_hooks
-def cleanall(hookurl, oauthkey, repo, user):
+def cleanall(module, hookurl, oauthkey, repo, user):
current_hooks = list(hookurl, oauthkey, repo, user)[1]
decoded = json.loads(current_hooks)
@@ -91,11 +101,11 @@ def cleanall(hookurl, oauthkey, repo, user):
if hook['last_response']['code'] != 200:
# print "Last response was an ERROR for hook:"
# print hook['id']
- delete(hookurl, oauthkey, repo, user, hook['id'])
+ delete(module, hookurl, oauthkey, repo, user, hook['id'])
return 0, current_hooks
-def create(hookurl, oauthkey, repo, user):
+def create(module, hookurl, oauthkey, repo, user):
url = "%s/hooks" % repo
values = {
"active": True,
@@ -107,29 +117,23 @@ def create(hookurl, oauthkey, repo, user):
}
data = json.dumps(values)
auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
- out='[]'
- try :
- req = urllib2.Request(url)
- req.add_data(data)
- req.add_header("Authorization", "Basic %s" % auth)
- res = urllib2.urlopen(req)
- out = res.read()
- return 0, out
- except urllib2.HTTPError, e :
- if e.code == 422 :
- return 0, out
-
-def delete(hookurl, oauthkey, repo, user, hookid):
+ headers = {
+ 'Authorization': 'Basic %s' % auth,
+ }
+ response, info = fetch_url(module, url, data=data, headers=headers)
+ if info['status'] != 200:
+ return 0, '[]'
+ else:
+ return 0, response.read()
+
+def delete(module, hookurl, oauthkey, repo, user, hookid):
url = "%s/hooks/%s" % (repo, hookid)
auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '')
- req = urllib2.Request(url)
- req.get_method = lambda: 'DELETE'
- req.add_header("Authorization", "Basic %s" % auth)
- # req.add_header('Content-Type', 'application/xml')
- # req.add_header('Accept', 'application/xml')
- res = urllib2.urlopen(req)
- out = res.read()
- return out
+ headers = {
+ 'Authorization': 'Basic %s' % auth,
+ }
+ response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE')
+ return response.read()
def main():
module = AnsibleModule(
@@ -139,6 +143,7 @@ def main():
oauthkey=dict(required=True),
repo=dict(required=True),
user=dict(required=True),
+ validate_certs=dict(default='yes', type='bool'),
)
)
@@ -149,16 +154,16 @@ def main():
user = module.params['user']
if action == "list":
- (rc, out) = list(hookurl, oauthkey, repo, user)
+ (rc, out) = list(module, hookurl, oauthkey, repo, user)
if action == "clean504":
- (rc, out) = clean504(hookurl, oauthkey, repo, user)
+ (rc, out) = clean504(module, hookurl, oauthkey, repo, user)
if action == "cleanall":
- (rc, out) = cleanall(hookurl, oauthkey, repo, user)
+ (rc, out) = cleanall(module, hookurl, oauthkey, repo, user)
if action == "create":
- (rc, out) = create(hookurl, oauthkey, repo, user)
+ (rc, out) = create(module, hookurl, oauthkey, repo, user)
if rc != 0:
module.fail_json(msg="failed", result=out)
@@ -168,4 +173,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+
main()
diff --git a/source_control/subversion b/source_control/subversion
index 497052af005..29d62240af3 100644
--- a/source_control/subversion
+++ b/source_control/subversion
@@ -27,7 +27,7 @@ description:
version_added: "0.7"
author: Dane Summers, njharman@gmail.com
notes:
- - Requres I(svn) to be installed on the client.
+ - Requires I(svn) to be installed on the client.
requirements: []
options:
repo:
@@ -70,11 +70,20 @@ options:
description:
- Path to svn executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
+ export:
+ required: false
+ default: False
+ version_added: "1.6"
+ description:
+ - If True, do export instead of checkout/update.
'''
EXAMPLES = '''
# Checkout subversion repository to specified folder.
- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/checkout
+
+# Export subversion directory to folder
+- subversion: repo=svn+ssh://an.example.org/path/to/repo dest=/src/export export=True
'''
import re
@@ -110,6 +119,10 @@ class Subversion(object):
def checkout(self):
'''Creates new svn working directory if it does not already exist.'''
self._exec(["checkout", "-r", self.revision, self.repo, self.dest])
+
+ def export(self, force=False):
+ '''Export svn repo to directory'''
+ self._exec(["export", "-r", self.revision, self.repo, self.dest])
def switch(self):
'''Change working directory's repo.'''
@@ -163,6 +176,7 @@ def main():
username=dict(required=False),
password=dict(required=False),
executable=dict(default=None),
+ export=dict(default=False, required=False),
),
supports_check_mode=True
)
@@ -174,6 +188,7 @@ def main():
username = module.params['username']
password = module.params['password']
svn_path = module.params['executable'] or module.get_bin_path('svn', True)
+ export = module.params['export']
os.environ['LANG'] = 'C'
svn = Subversion(module, dest, repo, revision, username, password, svn_path)
@@ -183,7 +198,10 @@ def main():
local_mods = False
if module.check_mode:
module.exit_json(changed=True)
- svn.checkout()
+ if not export:
+ svn.checkout()
+ else:
+ svn.export()
elif os.path.exists("%s/.svn" % (dest, )):
# Order matters. Need to get local mods before switch to avoid false
# positives. Need to switch before revert to ensure we are reverting to
diff --git a/system/alternatives b/system/alternatives
new file mode 100755
index 00000000000..503f9745f12
--- /dev/null
+++ b/system/alternatives
@@ -0,0 +1,137 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+"""
+Ansible module to manage symbolic link alternatives.
+(c) 2014, Gabe Mulley
+
+This file is part of Ansible
+
+Ansible is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+Ansible is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with Ansible. If not, see .
+"""
+
+DOCUMENTATION = '''
+---
+module: alternatives
+short_description: Manages alternative programs for common commands
+description:
+ - Manages symbolic links using the 'update-alternatives' tool provided on debian-like systems.
+ - Useful when multiple programs are installed but provide similar functionality (e.g. different editors).
+version_added: "1.6"
+options:
+ name:
+ description:
+ - The generic name of the link.
+ required: true
+ path:
+ description:
+ - The path to the real executable that the link should point to.
+ required: true
+ link:
+ description:
+ - The path to the symbolic link that should point to the real executable.
+ required: false
+requirements: [ update-alternatives ]
+'''
+
+EXAMPLES = '''
+- name: correct java version selected
+ alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
+
+- name: alternatives link created
+ alternatives: name=hadoop-conf link=/etc/hadoop/conf path=/etc/hadoop/conf.ansible
+'''
+
+UPDATE_ALTERNATIVES = '/usr/sbin/update-alternatives'
+DEFAULT_LINK_PRIORITY = 50
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ path = dict(required=True),
+ link = dict(required=False),
+ )
+ )
+
+ params = module.params
+ name = params['name']
+ path = params['path']
+ link = params['link']
+
+ current_path = None
+ all_alternatives = []
+
+ (rc, query_output, query_error) = module.run_command(
+ [UPDATE_ALTERNATIVES, '--query', name]
+ )
+
+ # Gather the current setting and all alternatives from the query output.
+ # Query output should look something like this:
+
+ # Name: java
+ # Link: /usr/bin/java
+ # Slaves:
+ # java.1.gz /usr/share/man/man1/java.1.gz
+ # Status: manual
+ # Best: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
+ # Value: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java
+
+ # Alternative: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java
+ # Priority: 1061
+ # Slaves:
+ # java.1.gz /usr/lib/jvm/java-6-openjdk-amd64/jre/man/man1/java.1.gz
+
+ # Alternative: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java
+ # Priority: 1071
+ # Slaves:
+ # java.1.gz /usr/lib/jvm/java-7-openjdk-amd64/jre/man/man1/java.1.gz
+
+ if rc == 0:
+ for line in query_output.splitlines():
+ split_line = line.split(':')
+ if len(split_line) == 2:
+ key = split_line[0]
+ value = split_line[1].strip()
+ if key == 'Value':
+ current_path = value
+ elif key == 'Alternative':
+ all_alternatives.append(value)
+
+ if current_path != path:
+ try:
+ # install the requested path if necessary
+ if path not in all_alternatives:
+ module.run_command(
+ [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)],
+ check_rc=True
+ )
+
+ # select the requested path
+ module.run_command(
+ [UPDATE_ALTERNATIVES, '--set', name, path],
+ check_rc=True
+ )
+
+ module.exit_json(changed=True)
+ except subprocess.CalledProcessError as cpe:
+ module.fail_json(msg=str(dir(cpe)))
+ else:
+ module.exit_json(changed=False)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/system/at b/system/at
index ffac9d1d535..c63527563fd 100644
--- a/system/at
+++ b/system/at
@@ -21,17 +21,12 @@
DOCUMENTATION = '''
---
module: at
-short_description: Schedule the execution of a command or scripts via the at command.
+short_description: Schedule the execution of a command or script file via the at command.
description:
- - Use this module to schedule a command or script to run once in the future.
- - All jobs are executed in the a queue.
-version_added: "0.0"
+ - Use this module to schedule a command or script file to run once in the future.
+ - All jobs are executed in the 'a' queue.
+version_added: "1.5"
options:
- user:
- description:
- - The user to execute the at command as.
- required: false
- default: null
command:
description:
- A command to be executed in the future.
@@ -39,25 +34,29 @@ options:
default: null
script_file:
description:
- - An existing script to be executed in the future.
+ - An existing script file to be executed in the future.
required: false
default: null
- unit_count:
+ count:
description:
- - The count of units in the future to execute the command or script.
+ - The count of units in the future to execute the command or script file.
required: true
- unit_type:
+ units:
description:
- - The type of units in the future to execute the command or script.
+ - The type of units in the future to execute the command or script file.
required: true
choices: ["minutes", "hours", "days", "weeks"]
- action:
+ state:
description:
- - The action to take for the job defaulting to add. Unique will verify that there is only one entry in the queue.
- - Delete will remove all existing queued jobs.
- required: true
- choices: ["add", "delete", "unique"]
- default: add
+ - The state dictates if the command or script file should be evaluated as present(added) or absent(deleted).
+ required: false
+ choices: ["present", "absent"]
+ default: "present"
+ unique:
+ description:
+ - If a matching job is present a new job will not be added.
+ required: false
+ default: false
requirements:
- at
author: Richard Isaacson
@@ -65,33 +64,45 @@ author: Richard Isaacson
EXAMPLES = '''
# Schedule a command to execute in 20 minutes as root.
-- at: command="ls -d / > /dev/null" unit_count=20 unit_type="minutes"
-
-# Schedule a script to execute in 1 hour as the neo user.
-- at: script_file="/some/script.sh" user="neo" unit_count=1 unit_type="hours"
+- at: command="ls -d / > /dev/null" count=20 units="minutes"
# Match a command to an existing job and delete the job.
-- at: command="ls -d / > /dev/null" action="delete"
+- at: command="ls -d / > /dev/null" state="absent"
# Schedule a command to execute in 20 minutes making sure it is unique in the queue.
-- at: command="ls -d / > /dev/null" action="unique" unit_count=20 unit_type="minutes"
+- at: command="ls -d / > /dev/null" unique=true count=20 units="minutes"
'''
import os
import tempfile
-def matching_jobs(module, at_cmd, script_file, user=None):
+
+def add_job(module, result, at_cmd, count, units, command, script_file):
+ at_command = "%s now + %s %s -f %s" % (at_cmd, count, units, script_file)
+ rc, out, err = module.run_command(at_command, check_rc=True)
+ if command:
+ os.unlink(script_file)
+ result['changed'] = True
+
+
+def delete_job(module, result, at_cmd, command, script_file):
+ for matching_job in get_matching_jobs(module, at_cmd, script_file):
+ at_command = "%s -d %s" % (at_cmd, matching_job)
+ rc, out, err = module.run_command(at_command, check_rc=True)
+ result['changed'] = True
+ if command:
+ os.unlink(script_file)
+ module.exit_json(**result)
+
+
+def get_matching_jobs(module, at_cmd, script_file):
matching_jobs = []
atq_cmd = module.get_bin_path('atq', True)
# Get list of job numbers for the user.
- atq_command = "%s" % (atq_cmd)
- if user:
- atq_command = "su '%s' -c '%s'" % (user, atq_command)
- rc, out, err = module.run_command(atq_command)
- if rc != 0:
- module.fail_json(msg=err)
+ atq_command = "%s" % atq_cmd
+ rc, out, err = module.run_command(atq_command, check_rc=True)
current_jobs = out.splitlines()
if len(current_jobs) == 0:
return matching_jobs
@@ -104,100 +115,83 @@ def matching_jobs(module, at_cmd, script_file, user=None):
for current_job in current_jobs:
split_current_job = current_job.split()
at_command = "%s -c %s" % (at_cmd, split_current_job[0])
- if user:
- at_command = "su '%s' -c '%s'" % (user, at_command)
- rc, out, err = module.run_command(at_command)
- if rc != 0:
- module.fail_json(msg=err)
+ rc, out, err = module.run_command(at_command, check_rc=True)
if script_file_string in out:
matching_jobs.append(split_current_job[0])
# Return the list.
return matching_jobs
-#================================================
+
+def create_tempfile(command):
+ filed, script_file = tempfile.mkstemp(prefix='at')
+ fileh = os.fdopen(filed, 'w')
+ fileh.write(command)
+ fileh.close()
+ return script_file
+
def main():
module = AnsibleModule(
argument_spec = dict(
- user=dict(required=False),
- command=dict(required=False),
- script_file=dict(required=False),
- unit_count=dict(required=False,
- type='int'),
- unit_type=dict(required=False,
- default=None,
- choices=["minutes", "hours", "days", "weeks"],
- type="str"),
- action=dict(required=False,
- default="add",
- choices=["add", "delete", "unique"],
- type="str")
+ command=dict(required=False,
+ type='str'),
+ script_file=dict(required=False,
+ type='str'),
+ count=dict(required=False,
+ type='int'),
+ units=dict(required=False,
+ default=None,
+ choices=['minutes', 'hours', 'days', 'weeks'],
+ type='str'),
+ state=dict(required=False,
+ default='present',
+ choices=['present', 'absent'],
+ type='str'),
+ unique=dict(required=False,
+ default=False,
+ type='bool')
),
- supports_check_mode = False,
+ mutually_exclusive=[['command', 'script_file']],
+ required_one_of=[['command', 'script_file']],
+ supports_check_mode=False
)
at_cmd = module.get_bin_path('at', True)
- user = module.params['user']
command = module.params['command']
script_file = module.params['script_file']
- unit_count = module.params['unit_count']
- unit_type = module.params['unit_type']
- action = module.params['action']
+ count = module.params['count']
+ units = module.params['units']
+ state = module.params['state']
+ unique = module.params['unique']
- if ((action == 'add') and (not unit_count or not unit_type)):
- module.fail_json(msg="add action requires unit_count and unit_type")
+ if (state == 'present') and (not count or not units):
+ module.fail_json(msg="present state requires count and units")
- if (not command) and (not script_file):
- module.fail_json(msg="command or script_file not specified")
-
- if command and script_file:
- module.fail_json(msg="command and script_file are mutually exclusive")
-
- result = {}
- result['action'] = action
- result['changed'] = False
+ result = {'state': state, 'changed': False}
# If command transform it into a script_file
if command:
- filed, script_file = tempfile.mkstemp(prefix='at')
- fileh = os.fdopen(filed, 'w')
- fileh.write(command)
- fileh.close()
-
- # if delete then return
- if action == 'delete':
- for matching_job in matching_jobs(module, at_cmd, script_file, user):
- at_command = "%s -d %s" % (at_cmd, matching_job)
- if user:
- at_command = "su '%s' -c '%s'" % (user, at_ccommand)
- rc, out, err = module.run_command(at_command)
- if rc != 0:
- module.fail_json(msg=err)
- result['changed'] = True
- module.exit_json(**result)
+ script_file = create_tempfile(command)
+
+ # if absent remove existing and return
+ if state == 'absent':
+ delete_job(module, result, at_cmd, command, script_file)
# if unique if existing return unchanged
- if action == 'unique':
- if len(matching_jobs(module, at_cmd, script_file, user)) != 0:
+ if unique:
+ if len(get_matching_jobs(module, at_cmd, script_file)) != 0:
+ if command:
+ os.unlink(script_file)
module.exit_json(**result)
result['script_file'] = script_file
- result['unit_count'] = unit_count
- result['unit_type'] = unit_type
-
- at_command = "%s now + %s %s -f %s" % (at_cmd, unit_count, unit_type, script_file)
- if user:
- # We expect that if this is an installed the permissions are already correct for the user to execute it.
- at_command = "su '%s' -c '%s'" % (user, at_command)
- rc, out, err = module.run_command(at_command)
- if rc != 0:
- module.fail_json(msg=err)
- if command:
- os.unlink(script_file)
- result['changed'] = True
+ result['count'] = count
+ result['units'] = units
+
+ add_job(module, result, at_cmd, count, units, command, script_file)
module.exit_json(**result)
diff --git a/system/authorized_key b/system/authorized_key
index 1a7c8b97b0e..c40edb1f162 100644
--- a/system/authorized_key
+++ b/system/authorized_key
@@ -48,7 +48,12 @@ options:
version_added: "1.2"
manage_dir:
description:
- - Whether this module should manage the directory of the authorized_keys file. Make sure to set C(manage_dir=no) if you are using an alternate directory for authorized_keys set with C(path), since you could lock yourself out of SSH access. See the example below.
+ - Whether this module should manage the directory of the authorized key file. If
+ set, the module will create the directory, as well as set the owner and permissions
+ of an existing directory. Be sure to
+ set C(manage_dir=no) if you are using an alternate directory for
+ authorized_keys, as set with C(path), since you could lock yourself out of
+ SSH access. See the example below.
required: false
choices: [ "yes", "no" ]
default: "yes"
@@ -165,7 +170,7 @@ def keyfile(module, user, write=False, path=None, manage_dir=True):
uid = user_entry.pw_uid
gid = user_entry.pw_gid
- if manage_dir in BOOLEANS_TRUE:
+ if manage_dir:
if not os.path.exists(sshdir):
os.mkdir(sshdir, 0700)
if module.selinux_enabled():
@@ -199,33 +204,19 @@ def parseoptions(module, options):
'''
options_dict = keydict() #ordered dict
if options:
- token_exp = [
- # matches separator
- (r',+', False),
- # matches option with value, e.g. from="x,y"
- (r'([a-z0-9-]+)="((?:[^"\\]|\\.)*)"', True),
- # matches single option, e.g. no-agent-forwarding
- (r'[a-z0-9-]+', True)
- ]
-
- pos = 0
- while pos < len(options):
- match = None
- for pattern, is_valid_option in token_exp:
- regex = re.compile(pattern, re.IGNORECASE)
- match = regex.match(options, pos)
- if match:
- text = match.group(0)
- if is_valid_option:
- if len(match.groups()) == 2:
- options_dict[match.group(1)] = match.group(2)
- else:
- options_dict[text] = None
- break
- if not match:
- module.fail_json(msg="invalid option string: %s" % options)
- else:
- pos = match.end(0)
+ try:
+ # the following regex will split on commas while
+ # ignoring those commas that fall within quotes
+ regex = re.compile(r'''((?:[^,"']|"[^"]*"|'[^']*')+)''')
+ parts = regex.split(options)[1:-1]
+ for part in parts:
+ if "=" in part:
+ (key, value) = part.split("=", 1)
+ options_dict[key] = value
+ elif part != ",":
+ options_dict[part] = None
+ except:
+ module.fail_json(msg="invalid option string: %s" % options)
return options_dict
@@ -254,7 +245,7 @@ def parsekey(module, raw_key):
# split key safely
lex = shlex.shlex(raw_key)
- lex.quotes = ["'", '"']
+ lex.quotes = []
lex.commenters = '' #keep comment hashes
lex.whitespace_split = True
key_parts = list(lex)
@@ -315,7 +306,7 @@ def writekeys(module, filename, keys):
option_strings = []
for option_key in options.keys():
if options[option_key]:
- option_strings.append("%s=\"%s\"" % (option_key, options[option_key]))
+ option_strings.append("%s=%s" % (option_key, options[option_key]))
else:
option_strings.append("%s" % option_key)
diff --git a/system/capabilities b/system/capabilities
new file mode 100644
index 00000000000..f4a9f62c0d0
--- /dev/null
+++ b/system/capabilities
@@ -0,0 +1,187 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Nate Coraor
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+DOCUMENTATION = '''
+---
+module: capabilities
+short_description: Manage Linux capabilities
+description:
+ - This module manipulates files privileges using the Linux capabilities(7) system.
+version_added: "1.6"
+options:
+ path:
+ description:
+ - Specifies the path to the file to be managed.
+ required: true
+ default: null
+ capability:
+ description:
+ - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
+ required: true
+ default: null
+ aliases: [ 'cap' ]
+ state:
+ description:
+ - Whether the entry should be present or absent in the file's capabilities.
+ choices: [ "present", "absent" ]
+ default: present
+notes:
+ - The capabilities system will automatically transform operators and flags
+ into the effective set, so (for example, cap_foo=ep will probably become
+ cap_foo+ep). This module does not attempt to determine the final operator
+ and flags to compare, so you will want to ensure that your capabilities
+ argument matches the final capabilities.
+requirements: []
+author: Nate Coraor
+'''
+
+EXAMPLES = '''
+# Set cap_sys_chroot+ep on /foo
+- capabilities: path=/foo capability=cap_sys_chroot+ep state=present
+
+# Remove cap_net_bind_service from /bar
+- capabilities: path=/bar capability=cap_net_bind_service state=absent
+'''
+
+
+OPS = ( '=', '-', '+' )
+
+# ==============================================================
+
+import os
+import tempfile
+import re
+
+class CapabilitiesModule(object):
+
+ platform = 'Linux'
+ distribution = None
+
+ def __init__(self, module):
+ self.module = module
+ self.path = module.params['path'].strip()
+ self.capability = module.params['capability'].strip().lower()
+ self.state = module.params['state']
+ self.getcap_cmd = module.get_bin_path('getcap', required=True)
+ self.setcap_cmd = module.get_bin_path('setcap', required=True)
+ self.capability_tup = self._parse_cap(self.capability, op_required=self.state=='present')
+
+ self.run()
+
+ def run(self):
+
+ current = self.getcap(self.path)
+ caps = [ cap[0] for cap in current ]
+
+ if self.state == 'present' and self.capability_tup not in current:
+ # need to add capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list if it's already set (but op/flags differ)
+ current = filter(lambda x: x[0] != self.capability_tup[0], current)
+ # add new cap with correct op/flags
+ current.append( self.capability_tup )
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ elif self.state == 'absent' and self.capability_tup[0] in caps:
+ # need to remove capability
+ if self.module.check_mode:
+ self.module.exit_json(changed=True, msg='capabilities changed')
+ else:
+ # remove from current cap list and then set current list
+ current = filter(lambda x: x[0] != self.capability_tup[0], current)
+ self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
+ self.module.exit_json(changed=False, state=self.state)
+
+ def getcap(self, path):
+ rval = []
+ cmd = "%s -v %s" % (self.getcap_cmd, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ # If file xattrs are set but no caps are set the output will be:
+ # '/foo ='
+ # If file xattrs are unset the output will be:
+ # '/foo'
+ # If the file does not eixst the output will be (with rc == 0...):
+ # '/foo (No such file or directory)'
+ if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):
+ self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
+ if stdout.strip() != path:
+ caps = stdout.split(' =')[1].strip().split()
+ for cap in caps:
+ cap = cap.lower()
+ # getcap condenses capabilities with the same op/flags into a
+ # comma-separated list, so we have to parse that
+ if ',' in cap:
+ cap_group = cap.split(',')
+ cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
+ for subcap in cap_group:
+ rval.append( ( subcap, op, flags ) )
+ else:
+ rval.append(self._parse_cap(cap))
+ return rval
+
+ def setcap(self, path, caps):
+ caps = ' '.join([ ''.join(cap) for cap in caps ])
+ cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
+ rc, stdout, stderr = self.module.run_command(cmd)
+ if rc != 0:
+ self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
+ else:
+ return stdout
+
+ def _parse_cap(self, cap, op_required=True):
+ opind = -1
+ try:
+ i = 0
+ while opind == -1:
+ opind = cap.find(OPS[i])
+ i += 1
+ except:
+ if op_required:
+ self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
+ else:
+ return (cap, None, None)
+ op = cap[opind]
+ cap, flags = cap.split(op)
+ return (cap, op, flags)
+
+# ==============================================================
+# main
+
+def main():
+
+ # defining module
+ module = AnsibleModule(
+ argument_spec = dict(
+ path = dict(aliases=['key'], required=True),
+ capability = dict(aliases=['cap'], required=True),
+ state = dict(default='present', choices=['present', 'absent']),
+ ),
+ supports_check_mode=True
+ )
+
+ CapabilitiesModule(module)
+
+ sys.exit(0)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
diff --git a/system/cron b/system/cron
index 39727b4c769..32e7e872f06 100644
--- a/system/cron
+++ b/system/cron
@@ -44,7 +44,6 @@ options:
name:
description:
- Description of a crontab entry.
- required: false
default: null
user:
description:
@@ -145,6 +144,7 @@ import os
import re
import tempfile
import platform
+import pipes
CRONCMD = "/usr/bin/crontab"
@@ -190,7 +190,8 @@ class CronTab(object):
except:
raise CronTabError("Unexpected error:", sys.exc_info()[0])
else:
- (rc, out, err) = self.module.run_command(self._read_user_execute())
+ # using safely quoted shell for now, but this really should be two non-shell calls instead. FIXME
+ (rc, out, err) = self.module.run_command(self._read_user_execute(), use_unsafe_shell=True)
if rc != 0 and rc != 1: # 1 can mean that there are no jobs.
raise CronTabError("Unable to read crontab")
@@ -235,8 +236,8 @@ class CronTab(object):
# Add the entire crontab back to the user crontab
if not self.cron_file:
- # os.system(self._write_execute(path))
- (rc, out, err) = self.module.run_command(self._write_execute(path))
+ # quoting shell args for now but really this should be two non-shell calls. FIXME
+ (rc, out, err) = self.module.run_command(self._write_execute(path), use_unsafe_shell=True)
os.unlink(path)
if rc != 0:
@@ -350,9 +351,11 @@ class CronTab(object):
user = ''
if self.user:
if platform.system() == 'SunOS':
- return "su '%s' -c '%s -l'" % (self.user, CRONCMD)
+ return "su %s -c '%s -l'" % (pipes.quote(self.user), pipes.quote(CRONCMD))
+ elif platform.system() == 'AIX':
+ return "%s -l %s" % (pipes.quote(CRONCMD), pipes.quote(self.user))
else:
- user = '-u %s' % self.user
+ user = '-u %s' % pipes.quote(self.user)
return "%s %s %s" % (CRONCMD , user, '-l')
def _write_execute(self, path):
@@ -361,11 +364,11 @@ class CronTab(object):
"""
user = ''
if self.user:
- if platform.system() == 'SunOS':
- return "chown %s %s ; su '%s' -c '%s %s'" % (self.user, path, self.user, CRONCMD, path)
+ if platform.system() in [ 'SunOS', 'AIX' ]:
+ return "chown %s %s ; su '%s' -c '%s %s'" % (pipes.quote(self.user), pipes.quote(path), pipes.quote(self.user), CRONCMD, pipes.quote(path))
else:
- user = '-u %s' % self.user
- return "%s %s %s" % (CRONCMD , user, path)
+ user = '-u %s' % pipes.quote(self.user)
+ return "%s %s %s" % (CRONCMD , user, pipes.quote(path))
diff --git a/system/debconf b/system/debconf
index 5b47d6b2b18..5cb0ba1e8fc 100644
--- a/system/debconf
+++ b/system/debconf
@@ -34,6 +34,7 @@ notes:
- A number of questions have to be answered (depending on the package).
Use 'debconf-show ' on any Debian or derivative with the package
installed to see questions/settings available.
+requirements: [ debconf, debconf-utils ]
options:
name:
description:
@@ -75,7 +76,7 @@ EXAMPLES = '''
debconf: name=locales question='locales/default_environment_locale' value=fr_FR.UTF-8
# set to generate locales:
-debconf: name=locales question='locales/locales_to_be_generated value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8'
+debconf: name=locales question='locales/locales_to_be_generated' value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8'
# Accept oracle license
debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select'
@@ -84,6 +85,8 @@ debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-
debconf: name='tzdata'
'''
+import pipes
+
def get_selections(module, pkg):
cmd = [module.get_bin_path('debconf-show', True), pkg]
rc, out, err = module.run_command(' '.join(cmd))
@@ -94,7 +97,7 @@ def get_selections(module, pkg):
selections = {}
for line in out.splitlines():
- (key, value) = line.split(':')
+ (key, value) = line.split(':', 1)
selections[ key.strip('*').strip() ] = value.strip()
return selections
@@ -105,11 +108,11 @@ def set_selection(module, pkg, question, vtype, value, unseen):
data = ' '.join([ question, vtype, value ])
setsel = module.get_bin_path('debconf-set-selections', True)
- cmd = ["echo '%s %s' |" % (pkg, data), setsel]
+ cmd = ["echo %s %s |" % (pipes.quote(pkg), pipes.quote(data)), setsel]
if unseen:
cmd.append('-u')
- return module.run_command(' '.join(cmd))
+ return module.run_command(' '.join(cmd), use_unsafe_shell=True)
def main():
@@ -125,10 +128,10 @@ def main():
supports_check_mode=True,
)
- #TODO: enable passing array of optionas and/or debconf file from get-selections dump
+ #TODO: enable passing array of options and/or debconf file from get-selections dump
pkg = module.params["name"]
question = module.params["question"]
- vtype = module.params["vtype"]
+ vtype = module.params["vtype"]
value = module.params["value"]
unseen = module.params["unseen"]
@@ -140,7 +143,7 @@ def main():
if question is not None:
if vtype is None or value is None:
- module.fail_json(msg="when supliying a question you must supply a valide vtype and value")
+ module.fail_json(msg="when supplying a question you must supply a valid vtype and value")
if not question in prev or prev[question] != value:
changed = True
diff --git a/system/filesystem b/system/filesystem
index 698c71d4534..46e798f6e81 100644
--- a/system/filesystem
+++ b/system/filesystem
@@ -79,7 +79,7 @@ def main():
cmd = module.get_bin_path('blkid', required=True)
- rc,raw_fs,err = module.run_command("%s -o value -s TYPE %s" % (cmd, dev))
+ rc,raw_fs,err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev))
fs = raw_fs.strip()
diff --git a/system/firewalld b/system/firewalld
index 62c90d0656c..22db165aad3 100644
--- a/system/firewalld
+++ b/system/firewalld
@@ -85,8 +85,13 @@ try:
from firewall.client import FirewallClient
fw = FirewallClient()
+ if not fw.connected:
+ raise Exception('failed to connect to the firewalld daemon')
except ImportError:
- print "fail=True msg='firewalld required for this module'"
+ print "failed=True msg='firewalld required for this module'"
+ sys.exit(1)
+except Exception, e:
+ print "failed=True msg='%s'" % str(e)
sys.exit(1)
################
diff --git a/system/hostname b/system/hostname
index 781bdcd08aa..c6d1f819451 100644
--- a/system/hostname
+++ b/system/hostname
@@ -285,8 +285,8 @@ class FedoraStrategy(GenericStrategy):
(rc, out, err))
def get_permanent_hostname(self):
- cmd = 'hostnamectl status | awk \'/^ *Static hostname:/{printf("%s", $3)}\''
- rc, out, err = self.module.run_command(cmd)
+ cmd = 'hostnamectl --static status'
+ rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
if rc != 0:
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
(rc, out, err))
diff --git a/system/locale_gen b/system/locale_gen
new file mode 100644
index 00000000000..6225ce236dc
--- /dev/null
+++ b/system/locale_gen
@@ -0,0 +1,151 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+import os
+import os.path
+from subprocess import Popen, PIPE, call
+
+DOCUMENTATION = '''
+---
+module: locale_gen
+short_description: Creates of removes locales.
+description:
+ - Manages locales by editing /etc/locale.gen and invoking locale-gen.
+version_added: "1.6"
+options:
+ name:
+ description:
+ - Name and encoding of the locale, such as "en_GB.UTF-8".
+ required: true
+ default: null
+ aliases: []
+ state:
+ description:
+ - Whether the locale shall be present.
+ required: false
+ choices: ["present", "absent"]
+ default: "present"
+'''
+
+EXAMPLES = '''
+# Ensure a locale exists.
+- locale_gen: name=de_CH.UTF-8 state=present
+'''
+
+# ===========================================
+# location module specific support methods.
+#
+
+def is_present(name):
+ """Checks if the given locale is currently installed."""
+ output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
+ return any(fix_case(name) == fix_case(line) for line in output.splitlines())
+
+def fix_case(name):
+ """locale -a might return the encoding in either lower or upper case.
+ Passing through this function makes them uniform for comparisons."""
+ return name.replace(".utf8", ".UTF-8")
+
+def replace_line(existing_line, new_line):
+ """Replaces lines in /etc/locale.gen"""
+ with open("/etc/locale.gen", "r") as f:
+ lines = [line.replace(existing_line, new_line) for line in f]
+ with open("/etc/locale.gen", "w") as f:
+ f.write("".join(lines))
+
+def apply_change(targetState, name, encoding):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, eiter present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ encoding -- Encoding such as UTF-8.
+ """
+ if targetState=="present":
+ # Create locale.
+ replace_line("# "+name+" "+encoding, name+" "+encoding)
+ else:
+ # Delete locale.
+ replace_line(name+" "+encoding, "# "+name+" "+encoding)
+
+ localeGenExitValue = call("locale-gen")
+ if localeGenExitValue!=0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
+
+def apply_change_ubuntu(targetState, name, encoding):
+ """Create or remove locale.
+
+ Keyword arguments:
+ targetState -- Desired state, eiter present or absent.
+ name -- Name including encoding such as de_CH.UTF-8.
+ encoding -- Encoding such as UTF-8.
+ """
+ if targetState=="present":
+ # Create locale.
+ # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
+ localeGenExitValue = call(["locale-gen", name])
+ else:
+ # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
+ with open("/var/lib/locales/supported.d/local", "r") as f:
+ content = f.readlines()
+ with open("/var/lib/locales/supported.d/local", "w") as f:
+ for line in content:
+ if line!=(name+" "+encoding+"\n"):
+ f.write(line)
+ # Purge locales and regenerate.
+ # Please provide a patch if you know how to avoid regenerating the locales to keep!
+ localeGenExitValue = call(["locale-gen", "--purge"])
+
+ if localeGenExitValue!=0:
+ raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
+
+# ==============================================================
+# main
+
+def main():
+
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(choices=['present','absent'], required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+ if not "." in name:
+ module.fail_json(msg="Locale does not match pattern. Did you specify the encoding?")
+ state = module.params['state']
+
+ if not os.path.exists("/etc/locale.gen"):
+ if os.path.exists("/var/lib/locales/supported.d/local"):
+ # Ubuntu created its own system to manage locales.
+ ubuntuMode = True
+ else:
+ module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package “locales” installed?")
+ else:
+ # We found the common way to manage locales.
+ ubuntuMode = False
+
+ prev_state = "present" if is_present(name) else "absent"
+ changed = (prev_state!=state)
+
+ if module.check_mode:
+ module.exit_json(changed=changed)
+ else:
+ encoding = name.split(".")[1]
+ if changed:
+ try:
+ if ubuntuMode==False:
+ apply_change(state, name, encoding)
+ else:
+ apply_change_ubuntu(state, name, encoding)
+ except EnvironmentError as e:
+ module.fail_json(msg=e.strerror, exitValue=e.errno)
+
+ module.exit_json(name=name, changed=changed, msg="OK")
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/system/lvg b/system/lvg
index 4e24b25a5c9..906e13d6469 100644
--- a/system/lvg
+++ b/system/lvg
@@ -41,6 +41,12 @@ options:
- The size of the physical extent in megabytes. Must be a power of 2.
default: 4
required: false
+ vg_options:
+ description:
+ - Additional options to pass to C(vgcreate) when creating the volume group.
+ default: null
+ required: false
+ version_added: "1.6"
state:
choices: [ "present", "absent" ]
default: present
@@ -99,6 +105,7 @@ def main():
vg=dict(required=True),
pvs=dict(type='list'),
pesize=dict(type='int', default=4),
+ vg_options=dict(),
state=dict(choices=["absent", "present"], default='present'),
force=dict(type='bool', default='no'),
),
@@ -109,6 +116,7 @@ def main():
state = module.params['state']
force = module.boolean(module.params['force'])
pesize = module.params['pesize']
+ vgoptions = module.params.get('vg_options', '').split()
if module.params['pvs']:
dev_string = ' '.join(module.params['pvs'])
@@ -162,13 +170,13 @@ def main():
### create PV
pvcreate_cmd = module.get_bin_path('pvcreate', True)
for current_dev in dev_list:
- rc,_,err = module.run_command("%s %s"%(pvcreate_cmd,current_dev))
+ rc,_,err = module.run_command("%s %s" % (pvcreate_cmd,current_dev))
if rc == 0:
changed = True
else:
- module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err)
+ module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err)
vgcreate_cmd = module.get_bin_path('vgcreate')
- rc,_,err = module.run_command("%s -s %s %s %s"%(vgcreate_cmd, pesize, vg, dev_string))
+ rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg, dev_string])
if rc == 0:
changed = True
else:
@@ -210,7 +218,7 @@ def main():
module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err)
### add PV to our VG
vgextend_cmd = module.get_bin_path('vgextend', True)
- rc,_,err = module.run_command("%s %s %s"%(vgextend_cmd, vg, devs_to_add_string))
+ rc,_,err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string))
if rc == 0:
changed = True
else:
diff --git a/system/modprobe b/system/modprobe
index 82ca86b9bd5..73e2c827f41 100644
--- a/system/modprobe
+++ b/system/modprobe
@@ -34,11 +34,19 @@ options:
choices: [ present, absent ]
description:
- Whether the module should be present or absent.
+ params:
+ required: false
+ default: ""
+ version_added: "1.6"
+ description:
+ - Modules parameters.
'''
EXAMPLES = '''
# Add the 802.1q module
- modprobe: name=8021q state=present
+# Add the dummy module
+- modprobe: name=dummy state=present params="numdummies=2"
'''
def main():
@@ -46,6 +54,7 @@ def main():
argument_spec={
'name': {'required': True},
'state': {'default': 'present', 'choices': ['present', 'absent']},
+ 'params': {'default': ''},
},
supports_check_mode=True,
)
@@ -54,14 +63,16 @@ def main():
'failed': False,
'name': module.params['name'],
'state': module.params['state'],
+ 'params': module.params['params'],
}
# Check if module is present
try:
modules = open('/proc/modules')
present = False
+ module_name = args['name'].replace('-', '_') + ' '
for line in modules:
- if line.startswith(args['name'] + ' '):
+ if line.startswith(module_name):
present = True
break
modules.close()
@@ -81,7 +92,7 @@ def main():
# Add/remove module as needed
if args['state'] == 'present':
if not present:
- rc, _, err = module.run_command(['modprobe', args['name']])
+ rc, _, err = module.run_command(['modprobe', args['name'], args['params']])
if rc != 0:
module.fail_json(msg=err, **args)
args['changed'] = True
diff --git a/system/open_iscsi b/system/open_iscsi
index 2e57727cf59..3fd2b1a5a21 100644
--- a/system/open_iscsi
+++ b/system/open_iscsi
@@ -138,7 +138,7 @@ def iscsi_get_cached_nodes(module, portal=None):
# older versions of scsiadm don't have nice return codes
# for newer versions see iscsiadm(8); also usr/iscsiadm.c for details
# err can contain [N|n]o records...
- elif rc == 21 or (rc == 255 and err.find("o records found") != -1):
+ elif rc == 21 or (rc == 255 and "o records found" in err):
nodes = []
else:
module.fail_json(cmd=cmd, rc=rc, msg=err)
diff --git a/system/service b/system/service
index 2e26a47b636..a694d8d92b8 100644
--- a/system/service
+++ b/system/service
@@ -37,8 +37,8 @@ options:
description:
- C(started)/C(stopped) are idempotent actions that will not run
commands unless necessary. C(restarted) will always bounce the
- service. C(reloaded) will always reload. At least one of state
- and enabled are required.
+ service. C(reloaded) will always reload. B(At least one of state
+ and enabled are required.)
sleep:
required: false
version_added: "1.3"
@@ -59,8 +59,8 @@ options:
required: false
choices: [ "yes", "no" ]
description:
- - Whether the service should start on boot. At least one of state and
- enabled are required.
+ - Whether the service should start on boot. B(At least one of state and
+ enabled are required.)
runlevel:
required: false
@@ -207,7 +207,9 @@ class Service(object):
os._exit(0)
# Start the command
- p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1]))
+ if isinstance(cmd, basestring):
+ cmd = shlex.split(cmd)
+ p = subprocess.Popen(cmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=lambda: os.close(pipe[1]))
stdout = ""
stderr = ""
fds = [p.stdout, p.stderr]
@@ -410,11 +412,13 @@ class LinuxService(Service):
# adjust the service name to account for template service unit files
index = name.find('@')
if index != -1:
- name = name[:index+1]
+ template_name = name[:index+1]
+ else:
+ template_name = name
self.__systemd_unit = None
for line in out.splitlines():
- if line.startswith(name):
+ if line.startswith(template_name):
self.__systemd_unit = name
return True
return False
@@ -473,7 +477,34 @@ class LinuxService(Service):
if location.get('initctl', None):
self.svc_initctl = location['initctl']
+ def get_systemd_status_dict(self):
+ (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit,))
+ if rc != 0:
+ self.module.fail_json('failure %d running systemctl show for %r: %s' % (self.__systemd_unit, rc, err))
+ return dict(line.split('=', 1) for line in out.splitlines())
+
+ def get_systemd_service_status(self):
+ d = self.get_systemd_status_dict()
+ if d.get('ActiveState') == 'active':
+ # run-once services (for which a single successful exit indicates
+ # that they are running as designed) should not be restarted here.
+ # Thus, we are not checking d['SubState'].
+ self.running = True
+ self.crashed = False
+ elif d.get('ActiveState') == 'failed':
+ self.running = False
+ self.crashed = True
+ elif d.get('ActiveState') is None:
+ self.module.fail_json(msg='No ActiveState value in systemctl show output for %r' % (self.__systemd_unit,))
+ else:
+ self.running = False
+ self.crashed = False
+ return self.running
+
def get_service_status(self):
+ if self.svc_cmd and self.svc_cmd.endswith('systemctl'):
+ return self.get_systemd_service_status()
+
self.action = "status"
rc, status_stdout, status_stderr = self.service_control()
@@ -481,9 +512,9 @@ class LinuxService(Service):
if self.svc_initctl and self.running is None:
# check the job status by upstart response
initctl_rc, initctl_status_stdout, initctl_status_stderr = self.execute_command("%s status %s" % (self.svc_initctl, self.name))
- if initctl_status_stdout.find("stop/waiting") != -1:
+ if "stop/waiting" in initctl_status_stdout:
self.running = False
- elif initctl_status_stdout.find("start/running") != -1:
+ elif "start/running" in initctl_status_stdout:
self.running = True
if self.svc_cmd and self.svc_cmd.endswith("rc-service") and self.running is None:
@@ -523,7 +554,7 @@ class LinuxService(Service):
# if the job status is still not known check it by special conditions
if self.running is None:
- if self.name == 'iptables' and status_stdout.find("ACCEPT") != -1:
+ if self.name == 'iptables' and "ACCEPT" in status_stdout:
# iptables status command output is lame
# TODO: lookup if we can use a return code for this instead?
self.running = True
@@ -534,7 +565,7 @@ class LinuxService(Service):
def service_enable(self):
if self.enable_cmd is None:
- self.module.fail_json(msg='service name not recognized')
+ self.module.fail_json(msg='unknown init system, cannot enable service')
# FIXME: we use chkconfig or systemctl
# to decide whether to run the command here but need something
@@ -577,7 +608,7 @@ class LinuxService(Service):
self.execute_command("%s --add %s" % (self.enable_cmd, self.name))
(rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
if not self.name in out:
- self.module.fail_json(msg="unknown service name")
+ self.module.fail_json(msg="service %s does not support chkconfig" % self.name)
state = out.split()[-1]
if self.enable and ( "3:on" in out and "5:on" in out ):
return
@@ -585,9 +616,7 @@ class LinuxService(Service):
return
if self.enable_cmd.endswith("systemctl"):
- (rc, out, err) = self.execute_command("%s show %s" % (self.enable_cmd, self.__systemd_unit))
-
- d = dict(line.split('=', 1) for line in out.splitlines())
+ d = self.get_systemd_status_dict()
if "UnitFileState" in d:
if self.enable and d["UnitFileState"] == "enabled":
return
@@ -629,16 +658,16 @@ class LinuxService(Service):
if line.startswith('rename'):
self.changed = True
break
- elif self.enable and line.find('do not exist') != -1:
+ elif self.enable and 'do not exist' in line:
self.changed = True
break
- elif not self.enable and line.find('already exist') != -1:
+ elif not self.enable and 'already exist' in line:
self.changed = True
break
# Debian compatibility
for line in err.splitlines():
- if self.enable and line.find('no runlevel symlinks to modify') != -1:
+ if self.enable and 'no runlevel symlinks to modify' in line:
self.changed = True
break
@@ -658,7 +687,8 @@ class LinuxService(Service):
return self.execute_command("%s %s enable" % (self.enable_cmd, self.name))
else:
- return self.execute_command("%s -f %s remove" % (self.enable_cmd, self.name))
+ return self.execute_command("%s %s disable" % (self.enable_cmd,
+ self.name))
# we change argument depending on real binary used:
# - update-rc.d and systemctl wants enable/disable
@@ -979,10 +1009,10 @@ class SunOSService(Service):
# enabled true
# enabled false
for line in stdout.split("\n"):
- if line.find("enabled") == 0:
- if line.find("true") != -1:
+ if line.startswith("enabled"):
+ if "true" in line:
enabled = True
- if line.find("temporary") != -1:
+ if "temporary" in line:
temporary = True
startup_enabled = (enabled and not temporary) or (not enabled and temporary)
@@ -1174,7 +1204,7 @@ def main():
(rc, out, err) = service.modify_service_state()
if rc != 0:
- if err and err.find("is already") != -1:
+ if err and "is already" in err:
# upstart got confused, one such possibility is MySQL on Ubuntu 12.04
# where status may report it has no start/stop links and we could
# not get accurate status
diff --git a/system/setup b/system/setup
index f140991dc27..cc3a5855f1e 100644
--- a/system/setup
+++ b/system/setup
@@ -18,22 +18,6 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
-import os
-import array
-import fcntl
-import fnmatch
-import glob
-import platform
-import re
-import socket
-import struct
-import datetime
-import getpass
-import subprocess
-import ConfigParser
-import StringIO
-
-
DOCUMENTATION = '''
---
module: setup
@@ -87,2229 +71,22 @@ ansible all -m setup -a 'filter=facter_*'
ansible all -m setup -a 'filter=ansible_eth[0-2]'
"""
-try:
- import selinux
- HAVE_SELINUX=True
-except ImportError:
- HAVE_SELINUX=False
-
-try:
- import json
-except ImportError:
- import simplejson as json
-
-class Facts(object):
- """
- This class should only attempt to populate those facts that
- are mostly generic to all systems. This includes platform facts,
- service facts (eg. ssh keys or selinux), and distribution facts.
- Anything that requires extensive code or may have more than one
- possible implementation to establish facts for a given topic should
- subclass Facts.
- """
-
- _I386RE = re.compile(r'i[3456]86')
- # For the most part, we assume that platform.dist() will tell the truth.
- # This is the fallback to handle unknowns or exceptions
- OSDIST_DICT = { '/etc/redhat-release': 'RedHat',
- '/etc/vmware-release': 'VMwareESX',
- '/etc/openwrt_release': 'OpenWrt',
- '/etc/system-release': 'OtherLinux',
- '/etc/alpine-release': 'Alpine',
- '/etc/release': 'Solaris',
- '/etc/arch-release': 'Archlinux',
- '/etc/SuSE-release': 'SuSE',
- '/etc/gentoo-release': 'Gentoo',
- '/etc/os-release': 'Debian' }
- SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
-
- # A list of dicts. If there is a platform with more than one
- # package manager, put the preferred one last. If there is an
- # ansible module, use that as the value for the 'name' key.
- PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
- { 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
- { 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
- { 'path' : '/usr/sbin/urpmi', 'name' : 'urpmi' },
- { 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
- { 'path' : '/bin/opkg', 'name' : 'opkg' },
- { 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
- { 'path' : '/opt/local/bin/port', 'name' : 'macports' },
- { 'path' : '/sbin/apk', 'name' : 'apk' },
- { 'path' : '/usr/sbin/pkg', 'name' : 'pkgng' },
- { 'path' : '/usr/sbin/swlist', 'name' : 'SD-UX' },
- { 'path' : '/usr/bin/emerge', 'name' : 'portage' },
- ]
-
- def __init__(self):
- self.facts = {}
- self.get_platform_facts()
- self.get_distribution_facts()
- self.get_cmdline()
- self.get_public_ssh_host_keys()
- self.get_selinux_facts()
- self.get_pkg_mgr_facts()
- self.get_lsb_facts()
- self.get_date_time_facts()
- self.get_user_facts()
- self.get_local_facts()
- self.get_env_facts()
-
- def populate(self):
- return self.facts
-
- # Platform
- # platform.system() can be Linux, Darwin, Java, or Windows
- def get_platform_facts(self):
- self.facts['system'] = platform.system()
- self.facts['kernel'] = platform.release()
- self.facts['machine'] = platform.machine()
- self.facts['python_version'] = platform.python_version()
- self.facts['fqdn'] = socket.getfqdn()
- self.facts['hostname'] = platform.node().split('.')[0]
- self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
- arch_bits = platform.architecture()[0]
- self.facts['userspace_bits'] = arch_bits.replace('bit', '')
- if self.facts['machine'] == 'x86_64':
- self.facts['architecture'] = self.facts['machine']
- if self.facts['userspace_bits'] == '64':
- self.facts['userspace_architecture'] = 'x86_64'
- elif self.facts['userspace_bits'] == '32':
- self.facts['userspace_architecture'] = 'i386'
- elif Facts._I386RE.search(self.facts['machine']):
- self.facts['architecture'] = 'i386'
- if self.facts['userspace_bits'] == '64':
- self.facts['userspace_architecture'] = 'x86_64'
- elif self.facts['userspace_bits'] == '32':
- self.facts['userspace_architecture'] = 'i386'
- else:
- self.facts['architecture'] = self.facts['machine']
- if self.facts['system'] == 'Linux':
- self.get_distribution_facts()
- elif self.facts['system'] == 'AIX':
- rc, out, err = module.run_command("/usr/sbin/bootinfo -p")
- data = out.split('\n')
- self.facts['architecture'] = data[0]
-
-
- def get_local_facts(self):
-
- fact_path = module.params.get('fact_path', None)
- if not fact_path or not os.path.exists(fact_path):
- return
-
- local = {}
- for fn in sorted(glob.glob(fact_path + '/*.fact')):
- # where it will sit under local facts
- fact_base = os.path.basename(fn).replace('.fact','')
- if os.access(fn, os.X_OK):
- # run it
- # try to read it as json first
- # if that fails read it with ConfigParser
- # if that fails, skip it
- rc, out, err = module.run_command(fn)
- else:
- out = open(fn).read()
-
- # load raw json
- fact = 'loading %s' % fact_base
- try:
- fact = json.loads(out)
- except ValueError, e:
- # load raw ini
- cp = ConfigParser.ConfigParser()
- try:
- cp.readfp(StringIO.StringIO(out))
- except ConfigParser.Error, e:
- fact="error loading fact - please check content"
- else:
- fact = {}
- #print cp.sections()
- for sect in cp.sections():
- if sect not in fact:
- fact[sect] = {}
- for opt in cp.options(sect):
- val = cp.get(sect, opt)
- fact[sect][opt]=val
-
- local[fact_base] = fact
- if not local:
- return
- self.facts['local'] = local
-
- # platform.dist() is deprecated in 2.6
- # in 2.6 and newer, you should use platform.linux_distribution()
- def get_distribution_facts(self):
-
- # A list with OS Family members
- OS_FAMILY = dict(
- RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
- SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
- OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
- XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', SLES = 'Suse',
- SLED = 'Suse', OpenSuSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo', Funtoo = 'Gentoo',
- Archlinux = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
- Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
- SmartOS = 'Solaris', AIX = 'AIX', Alpine = 'Alpine', MacOSX = 'Darwin',
- FreeBSD = 'FreeBSD', HPUX = 'HP-UX'
- )
-
- if self.facts['system'] == 'AIX':
- self.facts['distribution'] = 'AIX'
- rc, out, err = module.run_command("/usr/bin/oslevel")
- data = out.split('.')
- self.facts['distribution_version'] = data[0]
- self.facts['distribution_release'] = data[1]
- elif self.facts['system'] == 'HP-UX':
- self.facts['distribution'] = 'HP-UX'
- rc, out, err = module.run_command("/usr/sbin/swlist |egrep 'HPUX.*OE.*[AB].[0-9]+\.[0-9]+'")
- data = re.search('HPUX.*OE.*([AB].[0-9]+\.[0-9]+)\.([0-9]+).*', out)
- if data:
- self.facts['distribution_version'] = data.groups()[0]
- self.facts['distribution_release'] = data.groups()[1]
- elif self.facts['system'] == 'Darwin':
- self.facts['distribution'] = 'MacOSX'
- rc, out, err = module.run_command("/usr/bin/sw_vers -productVersion")
- data = out.split()[-1]
- self.facts['distribution_version'] = data
- elif self.facts['system'] == 'FreeBSD':
- self.facts['distribution'] = 'FreeBSD'
- self.facts['distribution_release'] = platform.release()
- self.facts['distribution_version'] = platform.version()
- elif self.facts['system'] == 'OpenBSD':
- self.facts['distribution'] = 'OpenBSD'
- self.facts['distribution_release'] = platform.release()
- rc, out, err = module.run_command("/sbin/sysctl -n kern.version")
- match = re.match('OpenBSD\s[0-9]+.[0-9]+-(\S+)\s.*', out)
- if match:
- self.facts['distribution_version'] = match.groups()[0]
- else:
- self.facts['distribution_version'] = 'release'
- else:
- dist = platform.dist()
- self.facts['distribution'] = dist[0].capitalize() or 'NA'
- self.facts['distribution_version'] = dist[1] or 'NA'
- self.facts['distribution_release'] = dist[2] or 'NA'
- # Try to handle the exceptions now ...
- for (path, name) in Facts.OSDIST_DICT.items():
- if os.path.exists(path):
- if self.facts['distribution'] == 'Fedora':
- pass
- elif name == 'RedHat':
- data = get_file_content(path)
- if 'Red Hat' in data:
- self.facts['distribution'] = name
- else:
- self.facts['distribution'] = data.split()[0]
- elif name == 'OtherLinux':
- data = get_file_content(path)
- if 'Amazon' in data:
- self.facts['distribution'] = 'Amazon'
- self.facts['distribution_version'] = data.split()[-1]
- elif name == 'OpenWrt':
- data = get_file_content(path)
- if 'OpenWrt' in data:
- self.facts['distribution'] = name
- version = re.search('DISTRIB_RELEASE="(.*)"', data)
- if version:
- self.facts['distribution_version'] = version.groups()[0]
- release = re.search('DISTRIB_CODENAME="(.*)"', data)
- if release:
- self.facts['distribution_release'] = release.groups()[0]
- elif name == 'Alpine':
- data = get_file_content(path)
- self.facts['distribution'] = 'Alpine'
- self.facts['distribution_version'] = data
- elif name == 'Solaris':
- data = get_file_content(path).split('\n')[0]
- ora_prefix = ''
- if 'Oracle Solaris' in data:
- data = data.replace('Oracle ','')
- ora_prefix = 'Oracle '
- self.facts['distribution'] = data.split()[0]
- self.facts['distribution_version'] = data.split()[1]
- self.facts['distribution_release'] = ora_prefix + data
- elif name == 'SuSE':
- data = get_file_content(path).splitlines()
- self.facts['distribution_release'] = data[2].split('=')[1].strip()
- elif name == 'Debian':
- data = get_file_content(path).split('\n')[0]
- release = re.search("PRETTY_NAME.+ \(?([^ ]+?)\)?\"", data)
- if release:
- self.facts['distribution_release'] = release.groups()[0]
- else:
- self.facts['distribution'] = name
-
- self.facts['os_family'] = self.facts['distribution']
- if self.facts['distribution'] in OS_FAMILY:
- self.facts['os_family'] = OS_FAMILY[self.facts['distribution']]
-
- def get_cmdline(self):
- data = get_file_content('/proc/cmdline')
- if data:
- self.facts['cmdline'] = {}
- for piece in shlex.split(data):
- item = piece.split('=', 1)
- if len(item) == 1:
- self.facts['cmdline'][item[0]] = True
- else:
- self.facts['cmdline'][item[0]] = item[1]
-
- def get_public_ssh_host_keys(self):
- dsa_filename = '/etc/ssh/ssh_host_dsa_key.pub'
- rsa_filename = '/etc/ssh/ssh_host_rsa_key.pub'
- ecdsa_filename = '/etc/ssh/ssh_host_ecdsa_key.pub'
-
- if self.facts['system'] == 'Darwin':
- dsa_filename = '/etc/ssh_host_dsa_key.pub'
- rsa_filename = '/etc/ssh_host_rsa_key.pub'
- ecdsa_filename = '/etc/ssh_host_ecdsa_key.pub'
- dsa = get_file_content(dsa_filename)
- rsa = get_file_content(rsa_filename)
- ecdsa = get_file_content(ecdsa_filename)
- if dsa is None:
- dsa = 'NA'
- else:
- self.facts['ssh_host_key_dsa_public'] = dsa.split()[1]
- if rsa is None:
- rsa = 'NA'
- else:
- self.facts['ssh_host_key_rsa_public'] = rsa.split()[1]
- if ecdsa is None:
- ecdsa = 'NA'
- else:
- self.facts['ssh_host_key_ecdsa_public'] = ecdsa.split()[1]
-
- def get_pkg_mgr_facts(self):
- self.facts['pkg_mgr'] = 'unknown'
- for pkg in Facts.PKG_MGRS:
- if os.path.exists(pkg['path']):
- self.facts['pkg_mgr'] = pkg['name']
- if self.facts['system'] == 'OpenBSD':
- self.facts['pkg_mgr'] = 'openbsd_pkg'
-
- def get_lsb_facts(self):
- lsb_path = module.get_bin_path('lsb_release')
- if lsb_path:
- rc, out, err = module.run_command([lsb_path, "-a"])
- if rc == 0:
- self.facts['lsb'] = {}
- for line in out.split('\n'):
- if len(line) < 1:
- continue
- value = line.split(':', 1)[1].strip()
- if 'LSB Version:' in line:
- self.facts['lsb']['release'] = value
- elif 'Distributor ID:' in line:
- self.facts['lsb']['id'] = value
- elif 'Description:' in line:
- self.facts['lsb']['description'] = value
- elif 'Release:' in line:
- self.facts['lsb']['release'] = value
- elif 'Codename:' in line:
- self.facts['lsb']['codename'] = value
- if 'lsb' in self.facts and 'release' in self.facts['lsb']:
- self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
- elif lsb_path is None and os.path.exists('/etc/lsb-release'):
- self.facts['lsb'] = {}
- f = open('/etc/lsb-release', 'r')
- try:
- for line in f.readlines():
- value = line.split('=',1)[1].strip()
- if 'DISTRIB_ID' in line:
- self.facts['lsb']['id'] = value
- elif 'DISTRIB_RELEASE' in line:
- self.facts['lsb']['release'] = value
- elif 'DISTRIB_DESCRIPTION' in line:
- self.facts['lsb']['description'] = value
- elif 'DISTRIB_CODENAME' in line:
- self.facts['lsb']['codename'] = value
- finally:
- f.close()
- else:
- return self.facts
-
- if 'lsb' in self.facts and 'release' in self.facts['lsb']:
- self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
-
-
- def get_selinux_facts(self):
- if not HAVE_SELINUX:
- self.facts['selinux'] = False
- return
- self.facts['selinux'] = {}
- if not selinux.is_selinux_enabled():
- self.facts['selinux']['status'] = 'disabled'
- else:
- self.facts['selinux']['status'] = 'enabled'
- try:
- self.facts['selinux']['policyvers'] = selinux.security_policyvers()
- except OSError, e:
- self.facts['selinux']['policyvers'] = 'unknown'
- try:
- (rc, configmode) = selinux.selinux_getenforcemode()
- if rc == 0:
- self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
- else:
- self.facts['selinux']['config_mode'] = 'unknown'
- except OSError, e:
- self.facts['selinux']['config_mode'] = 'unknown'
- try:
- mode = selinux.security_getenforce()
- self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
- except OSError, e:
- self.facts['selinux']['mode'] = 'unknown'
- try:
- (rc, policytype) = selinux.selinux_getpolicytype()
- if rc == 0:
- self.facts['selinux']['type'] = policytype
- else:
- self.facts['selinux']['type'] = 'unknown'
- except OSError, e:
- self.facts['selinux']['type'] = 'unknown'
-
-
- def get_date_time_facts(self):
- self.facts['date_time'] = {}
-
- now = datetime.datetime.now()
- self.facts['date_time']['year'] = now.strftime('%Y')
- self.facts['date_time']['month'] = now.strftime('%m')
- self.facts['date_time']['day'] = now.strftime('%d')
- self.facts['date_time']['hour'] = now.strftime('%H')
- self.facts['date_time']['minute'] = now.strftime('%M')
- self.facts['date_time']['second'] = now.strftime('%S')
- self.facts['date_time']['epoch'] = now.strftime('%s')
- if self.facts['date_time']['epoch'] == '' or self.facts['date_time']['epoch'][0] == '%':
- self.facts['date_time']['epoch'] = str(int(time.time()))
- self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
- self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
- self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
- self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
- self.facts['date_time']['tz'] = time.strftime("%Z")
- self.facts['date_time']['tz_offset'] = time.strftime("%z")
-
-
- # User
- def get_user_facts(self):
- self.facts['user_id'] = getpass.getuser()
-
- def get_env_facts(self):
- self.facts['env'] = {}
- for k,v in os.environ.iteritems():
- self.facts['env'][k] = v
-
-class Hardware(Facts):
- """
- This is a generic Hardware subclass of Facts. This should be further
- subclassed to implement per platform. If you subclass this, it
- should define:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
-
- All subclasses MUST define platform.
- """
- platform = 'Generic'
-
- def __new__(cls, *arguments, **keyword):
- subclass = cls
- for sc in Hardware.__subclasses__():
- if sc.platform == platform.system():
- subclass = sc
- return super(cls, subclass).__new__(subclass, *arguments, **keyword)
-
- def __init__(self):
- Facts.__init__(self)
-
- def populate(self):
- return self.facts
-
-class LinuxHardware(Hardware):
- """
- Linux-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
-
- In addition, it also defines number of DMI facts and device facts.
- """
-
- platform = 'Linux'
- MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_dmi_facts()
- self.get_device_facts()
- self.get_mount_facts()
- return self.facts
-
- def get_memory_facts(self):
- if not os.access("/proc/meminfo", os.R_OK):
- return
- for line in open("/proc/meminfo").readlines():
- data = line.split(":", 1)
- key = data[0]
- if key in LinuxHardware.MEMORY_FACTS:
- val = data[1].strip().split(' ')[0]
- self.facts["%s_mb" % key.lower()] = long(val) / 1024
-
- def get_cpu_facts(self):
- i = 0
- physid = 0
- coreid = 0
- sockets = {}
- cores = {}
- if not os.access("/proc/cpuinfo", os.R_OK):
- return
- self.facts['processor'] = []
- for line in open("/proc/cpuinfo").readlines():
- data = line.split(":", 1)
- key = data[0].strip()
- # model name is for Intel arch, Processor (mind the uppercase P)
- # works for some ARM devices, like the Sheevaplug.
- if key == 'model name' or key == 'Processor':
- if 'processor' not in self.facts:
- self.facts['processor'] = []
- self.facts['processor'].append(data[1].strip())
- i += 1
- elif key == 'physical id':
- physid = data[1].strip()
- if physid not in sockets:
- sockets[physid] = 1
- elif key == 'core id':
- coreid = data[1].strip()
- if coreid not in sockets:
- cores[coreid] = 1
- elif key == 'cpu cores':
- sockets[physid] = int(data[1].strip())
- elif key == 'siblings':
- cores[coreid] = int(data[1].strip())
- self.facts['processor_count'] = sockets and len(sockets) or i
- self.facts['processor_cores'] = sockets.values() and sockets.values()[0] or 1
- self.facts['processor_threads_per_core'] = ((cores.values() and
- cores.values()[0] or 1) / self.facts['processor_cores'])
- self.facts['processor_vcpus'] = (self.facts['processor_threads_per_core'] *
- self.facts['processor_count'] * self.facts['processor_cores'])
-
- def get_dmi_facts(self):
- ''' learn dmi facts from system
-
- Try /sys first for dmi related facts.
- If that is not available, fall back to dmidecode executable '''
-
- if os.path.exists('/sys/devices/virtual/dmi/id/product_name'):
- # Use kernel DMI info, if available
-
- # DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
- FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
- "Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
- "Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
- "All In One", "Sub Notebook", "Space-saving", "Lunch Box",
- "Main Server Chassis", "Expansion Chassis", "Sub Chassis",
- "Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
- "Rack Mount Chassis", "Sealed-case PC", "Multi-system",
- "CompactPCI", "AdvancedTCA", "Blade" ]
-
- DMI_DICT = {
- 'bios_date': '/sys/devices/virtual/dmi/id/bios_date',
- 'bios_version': '/sys/devices/virtual/dmi/id/bios_version',
- 'form_factor': '/sys/devices/virtual/dmi/id/chassis_type',
- 'product_name': '/sys/devices/virtual/dmi/id/product_name',
- 'product_serial': '/sys/devices/virtual/dmi/id/product_serial',
- 'product_uuid': '/sys/devices/virtual/dmi/id/product_uuid',
- 'product_version': '/sys/devices/virtual/dmi/id/product_version',
- 'system_vendor': '/sys/devices/virtual/dmi/id/sys_vendor'
- }
-
- for (key,path) in DMI_DICT.items():
- data = get_file_content(path)
- if data is not None:
- if key == 'form_factor':
- try:
- self.facts['form_factor'] = FORM_FACTOR[int(data)]
- except IndexError, e:
- self.facts['form_factor'] = 'unknown (%s)' % data
- else:
- self.facts[key] = data
- else:
- self.facts[key] = 'NA'
-
- else:
- # Fall back to using dmidecode, if available
- dmi_bin = module.get_bin_path('dmidecode')
- DMI_DICT = {
- 'bios_date': 'bios-release-date',
- 'bios_version': 'bios-version',
- 'form_factor': 'chassis-type',
- 'product_name': 'system-product-name',
- 'product_serial': 'system-serial-number',
- 'product_uuid': 'system-uuid',
- 'product_version': 'system-version',
- 'system_vendor': 'system-manufacturer'
- }
- for (k, v) in DMI_DICT.items():
- if dmi_bin is not None:
- (rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
- if rc == 0:
- # Strip out commented lines (specific dmidecode output)
- thisvalue = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
- try:
- json.dumps(thisvalue)
- except UnicodeDecodeError:
- thisvalue = "NA"
-
- self.facts[k] = thisvalue
- else:
- self.facts[k] = 'NA'
- else:
- self.facts[k] = 'NA'
-
- def get_mount_facts(self):
- self.facts['mounts'] = []
- mtab = get_file_content('/etc/mtab', '')
- for line in mtab.split('\n'):
- if line.startswith('/'):
- fields = line.rstrip('\n').split()
- if(fields[2] != 'none'):
- size_total = None
- size_available = None
- try:
- statvfs_result = os.statvfs(fields[1])
- size_total = statvfs_result.f_bsize * statvfs_result.f_blocks
- size_available = statvfs_result.f_bsize * (statvfs_result.f_bavail)
- except OSError, e:
- continue
-
- self.facts['mounts'].append(
- {'mount': fields[1],
- 'device':fields[0],
- 'fstype': fields[2],
- 'options': fields[3],
- # statvfs data
- 'size_total': size_total,
- 'size_available': size_available,
- })
-
- def get_device_facts(self):
- self.facts['devices'] = {}
- lspci = module.get_bin_path('lspci')
- if lspci:
- rc, pcidata, err = module.run_command([lspci, '-D'])
- else:
- pcidata = None
-
- try:
- block_devs = os.listdir("/sys/block")
- except OSError:
- return
-
- for block in block_devs:
- virtual = 1
- sysfs_no_links = 0
- try:
- path = os.readlink(os.path.join("/sys/block/", block))
- except OSError, e:
- if e.errno == errno.EINVAL:
- path = block
- sysfs_no_links = 1
- else:
- continue
- if "virtual" in path:
- continue
- sysdir = os.path.join("/sys/block", path)
- if sysfs_no_links == 1:
- for folder in os.listdir(sysdir):
- if "device" in folder:
- virtual = 0
- break
- if virtual:
- continue
- d = {}
- diskname = os.path.basename(sysdir)
- for key in ['vendor', 'model']:
- d[key] = get_file_content(sysdir + "/device/" + key)
-
- for key,test in [ ('removable','/removable'), \
- ('support_discard','/queue/discard_granularity'),
- ]:
- d[key] = get_file_content(sysdir + test)
-
- d['partitions'] = {}
- for folder in os.listdir(sysdir):
- m = re.search("(" + diskname + "\d+)", folder)
- if m:
- part = {}
- partname = m.group(1)
- part_sysdir = sysdir + "/" + partname
-
- part['start'] = get_file_content(part_sysdir + "/start",0)
- part['sectors'] = get_file_content(part_sysdir + "/size",0)
- part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
- part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
- d['partitions'][partname] = part
-
- d['rotational'] = get_file_content(sysdir + "/queue/rotational")
- d['scheduler_mode'] = ""
- scheduler = get_file_content(sysdir + "/queue/scheduler")
- if scheduler is not None:
- m = re.match(".*?(\[(.*)\])", scheduler)
- if m:
- d['scheduler_mode'] = m.group(2)
-
- d['sectors'] = get_file_content(sysdir + "/size")
- if not d['sectors']:
- d['sectors'] = 0
- d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size")
- if not d['sectorsize']:
- d['sectorsize'] = 512
- d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
-
- d['host'] = ""
-
- # domains are numbered (0 to ffff), bus (0 to ff), slot (0 to 1f), and function (0 to 7).
- m = re.match(".+/([a-f0-9]{4}:[a-f0-9]{2}:[0|1][a-f0-9]\.[0-7])/", sysdir)
- if m and pcidata:
- pciid = m.group(1)
- did = re.escape(pciid)
- m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
- d['host'] = m.group(1)
-
- d['holders'] = []
- if os.path.isdir(sysdir + "/holders"):
- for folder in os.listdir(sysdir + "/holders"):
- if not folder.startswith("dm-"):
- continue
- name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
- if name:
- d['holders'].append(name)
- else:
- d['holders'].append(folder)
-
- self.facts['devices'][diskname] = d
-
-
-class SunOSHardware(Hardware):
- """
- In addition to the generic memory and cpu facts, this also sets
- swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
- """
- platform = 'SunOS'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- return self.facts
-
- def get_cpu_facts(self):
- physid = 0
- sockets = {}
- rc, out, err = module.run_command("/usr/bin/kstat cpu_info")
- self.facts['processor'] = []
- for line in out.split('\n'):
- if len(line) < 1:
- continue
- data = line.split(None, 1)
- key = data[0].strip()
- # "brand" works on Solaris 10 & 11. "implementation" for Solaris 9.
- if key == 'module:':
- brand = ''
- elif key == 'brand':
- brand = data[1].strip()
- elif key == 'clock_MHz':
- clock_mhz = data[1].strip()
- elif key == 'implementation':
- processor = brand or data[1].strip()
- # Add clock speed to description for SPARC CPU
- if self.facts['machine'] != 'i86pc':
- processor += " @ " + clock_mhz + "MHz"
- if 'processor' not in self.facts:
- self.facts['processor'] = []
- self.facts['processor'].append(processor)
- elif key == 'chip_id':
- physid = data[1].strip()
- if physid not in sockets:
- sockets[physid] = 1
- else:
- sockets[physid] += 1
- # Counting cores on Solaris can be complicated.
- # https://blogs.oracle.com/mandalika/entry/solaris_show_me_the_cpu
- # Treat 'processor_count' as physical sockets and 'processor_cores' as
- # virtual CPUs visisble to Solaris. Not a true count of cores for modern SPARC as
- # these processors have: sockets -> cores -> threads/virtual CPU.
- if len(sockets) > 0:
- self.facts['processor_count'] = len(sockets)
- self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
- else:
- self.facts['processor_cores'] = 'NA'
- self.facts['processor_count'] = len(self.facts['processor'])
-
- def get_memory_facts(self):
- rc, out, err = module.run_command(["/usr/sbin/prtconf"])
- for line in out.split('\n'):
- if 'Memory size' in line:
- self.facts['memtotal_mb'] = line.split()[2]
- rc, out, err = module.run_command("/usr/sbin/swap -s")
- allocated = long(out.split()[1][:-1])
- reserved = long(out.split()[5][:-1])
- used = long(out.split()[8][:-1])
- free = long(out.split()[10][:-1])
- self.facts['swapfree_mb'] = free / 1024
- self.facts['swaptotal_mb'] = (free + used) / 1024
- self.facts['swap_allocated_mb'] = allocated / 1024
- self.facts['swap_reserved_mb'] = reserved / 1024
-
-class OpenBSDHardware(Hardware):
- """
- OpenBSD-specific subclass of Hardware. Defines memory, CPU and device facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
- - processor_speed
- - devices
- """
- platform = 'OpenBSD'
- DMESG_BOOT = '/var/run/dmesg.boot'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.sysctl = self.get_sysctl()
- self.get_memory_facts()
- self.get_processor_facts()
- self.get_device_facts()
- return self.facts
-
- def get_sysctl(self):
- rc, out, err = module.run_command(["/sbin/sysctl", "hw"])
- if rc != 0:
- return dict()
- sysctl = dict()
- for line in out.splitlines():
- (key, value) = line.split('=')
- sysctl[key] = value.strip()
- return sysctl
-
- def get_memory_facts(self):
- # Get free memory. vmstat output looks like:
- # procs memory page disks traps cpu
- # r b w avm fre flt re pi po fr sr wd0 fd0 int sys cs us sy id
- # 0 0 0 47512 28160 51 0 0 0 0 0 1 0 116 89 17 0 1 99
- rc, out, err = module.run_command("/usr/bin/vmstat")
- if rc == 0:
- self.facts['memfree_mb'] = long(out.splitlines()[-1].split()[4]) / 1024
- self.facts['memtotal_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024
-
- # Get swapctl info. swapctl output looks like:
- # total: 69268 1K-blocks allocated, 0 used, 69268 available
- # And for older OpenBSD:
- # total: 69268k bytes allocated = 0k used, 69268k available
- rc, out, err = module.run_command("/sbin/swapctl -sk")
- if rc == 0:
- data = out.split()
- self.facts['swapfree_mb'] = long(data[-2].translate(None, "kmg")) / 1024
- self.facts['swaptotal_mb'] = long(data[1].translate(None, "kmg")) / 1024
-
- def get_processor_facts(self):
- processor = []
- dmesg_boot = get_file_content(OpenBSDHardware.DMESG_BOOT)
- if not dmesg_boot:
- rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
- i = 0
- for line in dmesg_boot.splitlines():
- if line.split(' ', 1)[0] == 'cpu%i:' % i:
- processor.append(line.split(' ', 1)[1])
- i = i + 1
- processor_count = i
- self.facts['processor'] = processor
- self.facts['processor_count'] = processor_count
- # I found no way to figure out the number of Cores per CPU in OpenBSD
- self.facts['processor_cores'] = 'NA'
-
- def get_device_facts(self):
- devices = []
- devices.extend(self.sysctl['hw.disknames'].split(','))
- self.facts['devices'] = devices
-
-class FreeBSDHardware(Hardware):
- """
- FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
- - devices
- """
- platform = 'FreeBSD'
- DMESG_BOOT = '/var/run/dmesg.boot'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_dmi_facts()
- self.get_device_facts()
- self.get_mount_facts()
- return self.facts
-
- def get_cpu_facts(self):
- self.facts['processor'] = []
- rc, out, err = module.run_command("/sbin/sysctl -n hw.ncpu")
- self.facts['processor_count'] = out.strip()
-
- dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
- if not dmesg_boot:
- rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
- for line in dmesg_boot.split('\n'):
- if 'CPU:' in line:
- cpu = re.sub(r'CPU:\s+', r"", line)
- self.facts['processor'].append(cpu.strip())
- if 'Logical CPUs per core' in line:
- self.facts['processor_cores'] = line.split()[4]
-
-
- def get_memory_facts(self):
- rc, out, err = module.run_command("/sbin/sysctl vm.stats")
- for line in out.split('\n'):
- data = line.split()
- if 'vm.stats.vm.v_page_size' in line:
- pagesize = long(data[1])
- if 'vm.stats.vm.v_page_count' in line:
- pagecount = long(data[1])
- if 'vm.stats.vm.v_free_count' in line:
- freecount = long(data[1])
- self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
- self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
- # Get swapinfo. swapinfo output looks like:
- # Device 1M-blocks Used Avail Capacity
- # /dev/ada0p3 314368 0 314368 0%
- #
- rc, out, err = module.run_command("/usr/sbin/swapinfo -m")
- lines = out.split('\n')
- if len(lines[-1]) == 0:
- lines.pop()
- data = lines[-1].split()
- self.facts['swaptotal_mb'] = data[1]
- self.facts['swapfree_mb'] = data[3]
-
- def get_mount_facts(self):
- self.facts['mounts'] = []
- fstab = get_file_content('/etc/fstab')
- if fstab:
- for line in fstab.split('\n'):
- if line.startswith('#') or line.strip() == '':
- continue
- fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
- self.facts['mounts'].append({'mount': fields[1] , 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
-
- def get_device_facts(self):
- sysdir = '/dev'
- self.facts['devices'] = {}
- drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = module.run_command("/sbin/sysctl kern.disks")
- slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
- if os.path.isdir(sysdir):
- dirlist = sorted(os.listdir(sysdir))
- for device in dirlist:
- d = drives.match(device)
- if d:
- self.facts['devices'][d.group(1)] = []
- s = slices.match(device)
- if s:
- self.facts['devices'][d.group(1)].append(s.group(1))
-
- def get_dmi_facts(self):
- ''' learn dmi facts from system
-
- Use dmidecode executable if available'''
-
- # Fall back to using dmidecode, if available
- dmi_bin = module.get_bin_path('dmidecode')
- DMI_DICT = dict(
- bios_date='bios-release-date',
- bios_version='bios-version',
- form_factor='chassis-type',
- product_name='system-product-name',
- product_serial='system-serial-number',
- product_uuid='system-uuid',
- product_version='system-version',
- system_vendor='system-manufacturer'
- )
- for (k, v) in DMI_DICT.items():
- if dmi_bin is not None:
- (rc, out, err) = module.run_command('%s -s %s' % (dmi_bin, v))
- if rc == 0:
- # Strip out commented lines (specific dmidecode output)
- self.facts[k] = ''.join([ line for line in out.split('\n') if not line.startswith('#') ])
- try:
- json.dumps(self.facts[k])
- except UnicodeDecodeError:
- self.facts[k] = 'NA'
- else:
- self.facts[k] = 'NA'
- else:
- self.facts[k] = 'NA'
-
-
-class NetBSDHardware(Hardware):
- """
- NetBSD-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
- - devices
- """
- platform = 'NetBSD'
- MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_mount_facts()
- return self.facts
-
- def get_cpu_facts(self):
-
- i = 0
- physid = 0
- sockets = {}
- if not os.access("/proc/cpuinfo", os.R_OK):
- return
- self.facts['processor'] = []
- for line in open("/proc/cpuinfo").readlines():
- data = line.split(":", 1)
- key = data[0].strip()
- # model name is for Intel arch, Processor (mind the uppercase P)
- # works for some ARM devices, like the Sheevaplug.
- if key == 'model name' or key == 'Processor':
- if 'processor' not in self.facts:
- self.facts['processor'] = []
- self.facts['processor'].append(data[1].strip())
- i += 1
- elif key == 'physical id':
- physid = data[1].strip()
- if physid not in sockets:
- sockets[physid] = 1
- elif key == 'cpu cores':
- sockets[physid] = int(data[1].strip())
- if len(sockets) > 0:
- self.facts['processor_count'] = len(sockets)
- self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
- else:
- self.facts['processor_count'] = i
- self.facts['processor_cores'] = 'NA'
-
- def get_memory_facts(self):
- if not os.access("/proc/meminfo", os.R_OK):
- return
- for line in open("/proc/meminfo").readlines():
- data = line.split(":", 1)
- key = data[0]
- if key in NetBSDHardware.MEMORY_FACTS:
- val = data[1].strip().split(' ')[0]
- self.facts["%s_mb" % key.lower()] = long(val) / 1024
-
- def get_mount_facts(self):
- self.facts['mounts'] = []
- fstab = get_file_content('/etc/fstab')
- if fstab:
- for line in fstab.split('\n'):
- if line.startswith('#') or line.strip() == '':
- continue
- fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
- self.facts['mounts'].append({'mount': fields[1] , 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
-
-class AIX(Hardware):
- """
- AIX-specific subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor (a list)
- - processor_cores
- - processor_count
- """
- platform = 'AIX'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_dmi_facts()
- return self.facts
-
- def get_cpu_facts(self):
- self.facts['processor'] = []
-
-
- rc, out, err = module.run_command("/usr/sbin/lsdev -Cc processor")
- if out:
- i = 0
- for line in out.split('\n'):
-
- if 'Available' in line:
- if i == 0:
- data = line.split(' ')
- cpudev = data[0]
-
- i += 1
- self.facts['processor_count'] = int(i)
-
- rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a type")
-
- data = out.split(' ')
- self.facts['processor'] = data[1]
-
- rc, out, err = module.run_command("/usr/sbin/lsattr -El " + cpudev + " -a smt_threads")
-
- data = out.split(' ')
- self.facts['processor_cores'] = int(data[1])
-
- def get_memory_facts(self):
- pagesize = 4096
- rc, out, err = module.run_command("/usr/bin/vmstat -v")
- for line in out.split('\n'):
- data = line.split()
- if 'memory pages' in line:
- pagecount = long(data[0])
- if 'free pages' in line:
- freecount = long(data[0])
- self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
- self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
- # Get swapinfo. swapinfo output looks like:
- # Device 1M-blocks Used Avail Capacity
- # /dev/ada0p3 314368 0 314368 0%
- #
- rc, out, err = module.run_command("/usr/sbin/lsps -s")
- if out:
- lines = out.split('\n')
- data = lines[1].split()
- swaptotal_mb = long(data[0].rstrip('MB'))
- percused = int(data[1].rstrip('%'))
- self.facts['swaptotal_mb'] = swaptotal_mb
- self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100)
-
- def get_dmi_facts(self):
- rc, out, err = module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
- data = out.split()
- self.facts['firmware_version'] = data[1].strip('IBM,')
-
-class HPUX(Hardware):
- """
- HP-UX-specifig subclass of Hardware. Defines memory and CPU facts:
- - memfree_mb
- - memtotal_mb
- - swapfree_mb
- - swaptotal_mb
- - processor
- - processor_cores
- - processor_count
- - model
- - firmware
- """
-
- platform = 'HP-UX'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.get_cpu_facts()
- self.get_memory_facts()
- self.get_hw_facts()
- return self.facts
-
- def get_cpu_facts(self):
- if self.facts['architecture'] == '9000/800':
- rc, out, err = module.run_command("ioscan -FkCprocessor|wc -l")
- self.facts['processor_count'] = int(out.strip())
- #Working with machinfo mess
- elif self.facts['architecture'] == 'ia64':
- if self.facts['distribution_version'] == "B.11.23":
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep 'Number of CPUs'")
- self.facts['processor_count'] = int(out.strip().split('=')[1])
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep 'processor family'")
- self.facts['processor'] = re.search('.*(Intel.*)', out).groups()[0].strip()
- rc, out, err = module.run_command("ioscan -FkCprocessor|wc -l")
- self.facts['processor_cores'] = int(out.strip())
- if self.facts['distribution_version'] == "B.11.31":
- #if machinfo return cores strings release B.11.31 > 1204
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep core|wc -l")
- if out.strip()== '0':
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep Intel")
- self.facts['processor_count'] = int(out.strip().split(" ")[0])
- #If hyperthreading is active divide cores by 2
- rc, out, err = module.run_command("/usr/sbin/psrset |grep LCPU")
- data = re.sub(' +',' ',out).strip().split(' ')
- if len(data) == 1:
- hyperthreading = 'OFF'
- else:
- hyperthreading = data[1]
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep logical")
- data = out.strip().split(" ")
- if hyperthreading == 'ON':
- self.facts['processor_cores'] = int(data[0])/2
- else:
- if len(data) == 1:
- self.facts['processor_cores'] = self.facts['processor_count']
- else:
- self.facts['processor_cores'] = int(data[0])
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep Intel |cut -d' ' -f4-")
- self.facts['processor'] = out.strip()
- else:
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |egrep 'socket[s]?$' | tail -1")
- self.facts['processor_count'] = int(out.strip().split(" ")[0])
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -e '[0-9] core' |tail -1")
- self.facts['processor_cores'] = int(out.strip().split(" ")[0])
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep Intel")
- self.facts['processor'] = out.strip()
-
- def get_memory_facts(self):
- pagesize = 4096
- rc, out, err = module.run_command("/usr/bin/vmstat|tail -1")
- data = int(re.sub(' +',' ',out).split(' ')[5].strip())
- self.facts['memfree_mb'] = pagesize * data / 1024 / 1024
- if self.facts['architecture'] == '9000/800':
- rc, out, err = module.run_command("grep Physical /var/adm/syslog/syslog.log")
- data = re.search('.*Physical: ([0-9]*) Kbytes.*',out).groups()[0].strip()
- self.facts['memtotal_mb'] = int(data) / 1024
- else:
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep Memory")
- data = re.search('Memory[\ :=]*([0-9]*).*MB.*',out).groups()[0].strip()
- self.facts['memtotal_mb'] = int(data)
- rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f -q")
- self.facts['swaptotal_mb'] = int(out.strip())
- rc, out, err = module.run_command("/usr/sbin/swapinfo -m -d -f |egrep '^dev|^fs'")
- swap = 0
- for line in out.strip().split('\n'):
- swap += int(re.sub(' +',' ',line).split(' ')[3].strip())
- self.facts['swapfree_mb'] = swap
-
- def get_hw_facts(self):
- rc, out, err = module.run_command("model")
- self.facts['model'] = out.strip()
- if self.facts['architecture'] == 'ia64':
- rc, out, err = module.run_command("/usr/contrib/bin/machinfo |grep -i 'Firmware revision' |grep -v BMC")
- self.facts['firmware_version'] = out.split(':')[1].strip()
-
-
-class Darwin(Hardware):
- """
- Darwin-specific subclass of Hardware. Defines memory and CPU facts:
- - processor
- - processor_cores
- - memtotal_mb
- - memfree_mb
- - model
- - osversion
- - osrevision
- """
- platform = 'Darwin'
-
- def __init__(self):
- Hardware.__init__(self)
-
- def populate(self):
- self.sysctl = self.get_sysctl()
- self.get_mac_facts()
- self.get_cpu_facts()
- self.get_memory_facts()
- return self.facts
-
- def get_sysctl(self):
- rc, out, err = module.run_command(["/usr/sbin/sysctl", "hw", "machdep", "kern"])
- if rc != 0:
- return dict()
- sysctl = dict()
- for line in out.splitlines():
- if line.rstrip("\n"):
- (key, value) = re.split(' = |: ', line, maxsplit=1)
- sysctl[key] = value.strip()
- return sysctl
-
- def get_system_profile(self):
- rc, out, err = module.run_command(["/usr/sbin/system_profiler", "SPHardwareDataType"])
- if rc != 0:
- return dict()
- system_profile = dict()
- for line in out.splitlines():
- if ': ' in line:
- (key, value) = line.split(': ', 1)
- system_profile[key.strip()] = ' '.join(value.strip().split())
- return system_profile
-
- def get_mac_facts(self):
- self.facts['model'] = self.sysctl['hw.model']
- self.facts['osversion'] = self.sysctl['kern.osversion']
- self.facts['osrevision'] = self.sysctl['kern.osrevision']
-
- def get_cpu_facts(self):
- if 'machdep.cpu.brand_string' in self.sysctl: # Intel
- self.facts['processor'] = self.sysctl['machdep.cpu.brand_string']
- self.facts['processor_cores'] = self.sysctl['machdep.cpu.core_count']
- else: # PowerPC
- system_profile = self.get_system_profile()
- self.facts['processor'] = '%s @ %s' % (system_profile['Processor Name'], system_profile['Processor Speed'])
- self.facts['processor_cores'] = self.sysctl['hw.physicalcpu']
-
- def get_memory_facts(self):
- self.facts['memtotal_mb'] = long(self.sysctl['hw.memsize']) / 1024 / 1024
- self.facts['memfree_mb'] = long(self.sysctl['hw.usermem']) / 1024 / 1024
-
-class Network(Facts):
- """
- This is a generic Network subclass of Facts. This should be further
- subclassed to implement per platform. If you subclass this,
- you must define:
- - interfaces (a list of interface names)
- - interface_ dictionary of ipv4, ipv6, and mac address information.
-
- All subclasses MUST define platform.
- """
- platform = 'Generic'
-
- IPV6_SCOPE = { '0' : 'global',
- '10' : 'host',
- '20' : 'link',
- '40' : 'admin',
- '50' : 'site',
- '80' : 'organization' }
-
- def __new__(cls, *arguments, **keyword):
- subclass = cls
- for sc in Network.__subclasses__():
- if sc.platform == platform.system():
- subclass = sc
- return super(cls, subclass).__new__(subclass, *arguments, **keyword)
-
- def __init__(self):
- Facts.__init__(self)
-
- def populate(self):
- return self.facts
-
-class LinuxNetwork(Network):
- """
- This is a Linux-specific subclass of Network. It defines
- - interfaces (a list of interface names)
- - interface_ dictionary of ipv4, ipv6, and mac address information.
- - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- - ipv4_address and ipv6_address: the first non-local address for each family.
- """
- platform = 'Linux'
-
- def __init__(self):
- Network.__init__(self)
-
- def populate(self):
- ip_path = module.get_bin_path('ip')
- if ip_path is None:
- return self.facts
- default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
- interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
- self.facts['interfaces'] = interfaces.keys()
- for iface in interfaces:
- self.facts[iface] = interfaces[iface]
- self.facts['default_ipv4'] = default_ipv4
- self.facts['default_ipv6'] = default_ipv6
- self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
- self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
- return self.facts
-
- def get_default_interfaces(self, ip_path):
- # Use the commands:
- # ip -4 route get 8.8.8.8 -> Google public DNS
- # ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
- # to find out the default outgoing interface, address, and gateway
- command = dict(
- v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
- v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
- )
- interface = dict(v4 = {}, v6 = {})
- for v in 'v4', 'v6':
- if v == 'v6' and self.facts['os_family'] == 'RedHat' \
- and self.facts['distribution_version'].startswith('4.'):
- continue
- if v == 'v6' and not socket.has_ipv6:
- continue
- rc, out, err = module.run_command(command[v])
- if not out:
- # v6 routing may result in
- # RTNETLINK answers: Invalid argument
- continue
- words = out.split('\n')[0].split()
- # A valid output starts with the queried address on the first line
- if len(words) > 0 and words[0] == command[v][-1]:
- for i in range(len(words) - 1):
- if words[i] == 'dev':
- interface[v]['interface'] = words[i+1]
- elif words[i] == 'src':
- interface[v]['address'] = words[i+1]
- elif words[i] == 'via' and words[i+1] != command[v][-1]:
- interface[v]['gateway'] = words[i+1]
- return interface['v4'], interface['v6']
-
- def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
- interfaces = {}
- ips = dict(
- all_ipv4_addresses = [],
- all_ipv6_addresses = [],
- )
-
- for path in glob.glob('/sys/class/net/*'):
- if not os.path.isdir(path):
- continue
- device = os.path.basename(path)
- interfaces[device] = { 'device': device }
- if os.path.exists(os.path.join(path, 'address')):
- macaddress = open(os.path.join(path, 'address')).read().strip()
- if macaddress and macaddress != '00:00:00:00:00:00':
- interfaces[device]['macaddress'] = macaddress
- if os.path.exists(os.path.join(path, 'mtu')):
- interfaces[device]['mtu'] = int(open(os.path.join(path, 'mtu')).read().strip())
- if os.path.exists(os.path.join(path, 'operstate')):
- interfaces[device]['active'] = open(os.path.join(path, 'operstate')).read().strip() != 'down'
-# if os.path.exists(os.path.join(path, 'carrier')):
-# interfaces[device]['link'] = open(os.path.join(path, 'carrier')).read().strip() == '1'
- if os.path.exists(os.path.join(path, 'device','driver', 'module')):
- interfaces[device]['module'] = os.path.basename(os.path.realpath(os.path.join(path, 'device', 'driver', 'module')))
- if os.path.exists(os.path.join(path, 'type')):
- type = open(os.path.join(path, 'type')).read().strip()
- if type == '1':
- interfaces[device]['type'] = 'ether'
- elif type == '512':
- interfaces[device]['type'] = 'ppp'
- elif type == '772':
- interfaces[device]['type'] = 'loopback'
- if os.path.exists(os.path.join(path, 'bridge')):
- interfaces[device]['type'] = 'bridge'
- interfaces[device]['interfaces'] = [ os.path.basename(b) for b in glob.glob(os.path.join(path, 'brif', '*')) ]
- if os.path.exists(os.path.join(path, 'bridge', 'bridge_id')):
- interfaces[device]['id'] = open(os.path.join(path, 'bridge', 'bridge_id')).read().strip()
- if os.path.exists(os.path.join(path, 'bridge', 'stp_state')):
- interfaces[device]['stp'] = open(os.path.join(path, 'bridge', 'stp_state')).read().strip() == '1'
- if os.path.exists(os.path.join(path, 'bonding')):
- interfaces[device]['type'] = 'bonding'
- interfaces[device]['slaves'] = open(os.path.join(path, 'bonding', 'slaves')).read().split()
- interfaces[device]['mode'] = open(os.path.join(path, 'bonding', 'mode')).read().split()[0]
- interfaces[device]['miimon'] = open(os.path.join(path, 'bonding', 'miimon')).read().split()[0]
- interfaces[device]['lacp_rate'] = open(os.path.join(path, 'bonding', 'lacp_rate')).read().split()[0]
- primary = open(os.path.join(path, 'bonding', 'primary')).read()
- if primary:
- interfaces[device]['primary'] = primary
- path = os.path.join(path, 'bonding', 'all_slaves_active')
- if os.path.exists(path):
- interfaces[device]['all_slaves_active'] = open(path).read() == '1'
-
- # Check whether a interface is in promiscuous mode
- if os.path.exists(os.path.join(path,'flags')):
- promisc_mode = False
- # The second byte indicates whether the interface is in promiscuous mode.
- # 1 = promisc
- # 0 = no promisc
- data = int(open(os.path.join(path, 'flags')).read().strip(),16)
- promisc_mode = (data & 0x0100 > 0)
- interfaces[device]['promisc'] = promisc_mode
-
- def parse_ip_output(output, secondary=False):
- for line in output.split('\n'):
- if not line:
- continue
- words = line.split()
- if words[0] == 'inet':
- if '/' in words[1]:
- address, netmask_length = words[1].split('/')
- else:
- # pointopoint interfaces do not have a prefix
- address = words[1]
- netmask_length = "32"
- address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
- netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
- netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
- network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
- iface = words[-1]
- if iface != device:
- interfaces[iface] = {}
- if not secondary or "ipv4" not in interfaces[iface]:
- interfaces[iface]['ipv4'] = {'address': address,
- 'netmask': netmask,
- 'network': network}
- else:
- if "ipv4_secondaries" not in interfaces[iface]:
- interfaces[iface]["ipv4_secondaries"] = []
- interfaces[iface]["ipv4_secondaries"].append({
- 'address': address,
- 'netmask': netmask,
- 'network': network,
- })
-
- # add this secondary IP to the main device
- if secondary:
- if "ipv4_secondaries" not in interfaces[device]:
- interfaces[device]["ipv4_secondaries"] = []
- interfaces[device]["ipv4_secondaries"].append({
- 'address': address,
- 'netmask': netmask,
- 'network': network,
- })
-
- # If this is the default address, update default_ipv4
- if 'address' in default_ipv4 and default_ipv4['address'] == address:
- default_ipv4['netmask'] = netmask
- default_ipv4['network'] = network
- default_ipv4['macaddress'] = macaddress
- default_ipv4['mtu'] = interfaces[device]['mtu']
- default_ipv4['type'] = interfaces[device].get("type", "unknown")
- default_ipv4['alias'] = words[-1]
- if not address.startswith('127.'):
- ips['all_ipv4_addresses'].append(address)
- elif words[0] == 'inet6':
- address, prefix = words[1].split('/')
- scope = words[3]
- if 'ipv6' not in interfaces[device]:
- interfaces[device]['ipv6'] = []
- interfaces[device]['ipv6'].append({
- 'address' : address,
- 'prefix' : prefix,
- 'scope' : scope
- })
- # If this is the default address, update default_ipv6
- if 'address' in default_ipv6 and default_ipv6['address'] == address:
- default_ipv6['prefix'] = prefix
- default_ipv6['scope'] = scope
- default_ipv6['macaddress'] = macaddress
- default_ipv6['mtu'] = interfaces[device]['mtu']
- default_ipv6['type'] = interfaces[device].get("type", "unknown")
- if not address == '::1':
- ips['all_ipv6_addresses'].append(address)
-
- ip_path = module.get_bin_path("ip")
- primary_data = subprocess.Popen(
- [ip_path, 'addr', 'show', 'primary', device],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
- secondary_data = subprocess.Popen(
- [ip_path, 'addr', 'show', 'secondary', device],
- stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0]
- parse_ip_output(primary_data)
- parse_ip_output(secondary_data, secondary=True)
-
- # replace : by _ in interface name since they are hard to use in template
- new_interfaces = {}
- for i in interfaces:
- if ':' in i:
- new_interfaces[i.replace(':','_')] = interfaces[i]
- else:
- new_interfaces[i] = interfaces[i]
- return new_interfaces, ips
-
-class GenericBsdIfconfigNetwork(Network):
- """
- This is a generic BSD subclass of Network using the ifconfig command.
- It defines
- - interfaces (a list of interface names)
- - interface_ dictionary of ipv4, ipv6, and mac address information.
- - all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
- It currently does not define
- - default_ipv4 and default_ipv6
- - type, mtu and network on interfaces
- """
- platform = 'Generic_BSD_Ifconfig'
-
- def __init__(self):
- Network.__init__(self)
-
- def populate(self):
-
- ifconfig_path = module.get_bin_path('ifconfig')
-
- if ifconfig_path is None:
- return self.facts
- route_path = module.get_bin_path('route')
-
- if route_path is None:
- return self.facts
-
- default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
- interfaces, ips = self.get_interfaces_info(ifconfig_path)
- self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
- self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
- self.facts['interfaces'] = interfaces.keys()
-
- for iface in interfaces:
- self.facts[iface] = interfaces[iface]
-
- self.facts['default_ipv4'] = default_ipv4
- self.facts['default_ipv6'] = default_ipv6
- self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
- self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
-
- return self.facts
-
- def get_default_interfaces(self, route_path):
-
- # Use the commands:
- # route -n get 8.8.8.8 -> Google public DNS
- # route -n get -inet6 2404:6800:400a:800::1012 -> ipv6.google.com
- # to find out the default outgoing interface, address, and gateway
-
- command = dict(
- v4 = [route_path, '-n', 'get', '8.8.8.8'],
- v6 = [route_path, '-n', 'get', '-inet6', '2404:6800:400a:800::1012']
- )
-
- interface = dict(v4 = {}, v6 = {})
-
- for v in 'v4', 'v6':
-
- if v == 'v6' and not socket.has_ipv6:
- continue
- rc, out, err = module.run_command(command[v])
- if not out:
- # v6 routing may result in
- # RTNETLINK answers: Invalid argument
- continue
- lines = out.split('\n')
- for line in lines:
- words = line.split()
- # Collect output from route command
- if len(words) > 1:
- if words[0] == 'interface:':
- interface[v]['interface'] = words[1]
- if words[0] == 'gateway:':
- interface[v]['gateway'] = words[1]
-
- return interface['v4'], interface['v6']
-
- def get_interfaces_info(self, ifconfig_path):
- interfaces = {}
- current_if = {}
- ips = dict(
- all_ipv4_addresses = [],
- all_ipv6_addresses = [],
- )
- # FreeBSD, DragonflyBSD, NetBSD, OpenBSD and OS X all implicitly add '-a'
- # when running the command 'ifconfig'.
- # Solaris must explicitly run the command 'ifconfig -a'.
- rc, out, err = module.run_command([ifconfig_path, '-a'])
-
- for line in out.split('\n'):
-
- if line:
- words = line.split()
-
- if re.match('^\S', line) and len(words) > 3:
- current_if = self.parse_interface_line(words)
- interfaces[ current_if['device'] ] = current_if
- elif words[0].startswith('options='):
- self.parse_options_line(words, current_if, ips)
- elif words[0] == 'nd6':
- self.parse_nd6_line(words, current_if, ips)
- elif words[0] == 'ether':
- self.parse_ether_line(words, current_if, ips)
- elif words[0] == 'media:':
- self.parse_media_line(words, current_if, ips)
- elif words[0] == 'status:':
- self.parse_status_line(words, current_if, ips)
- elif words[0] == 'lladdr':
- self.parse_lladdr_line(words, current_if, ips)
- elif words[0] == 'inet':
- self.parse_inet_line(words, current_if, ips)
- elif words[0] == 'inet6':
- self.parse_inet6_line(words, current_if, ips)
- else:
- self.parse_unknown_line(words, current_if, ips)
-
- return interfaces, ips
-
- def parse_interface_line(self, words):
- device = words[0][0:-1]
- current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
- current_if['flags'] = self.get_options(words[1])
- current_if['mtu'] = words[3]
- current_if['macaddress'] = 'unknown' # will be overwritten later
- return current_if
-
- def parse_options_line(self, words, current_if, ips):
- # Mac has options like this...
- current_if['options'] = self.get_options(words[0])
-
- def parse_nd6_line(self, words, current_if, ips):
- # FreBSD has options like this...
- current_if['options'] = self.get_options(words[1])
-
- def parse_ether_line(self, words, current_if, ips):
- current_if['macaddress'] = words[1]
-
- def parse_media_line(self, words, current_if, ips):
- # not sure if this is useful - we also drop information
- current_if['media'] = words[1]
- if len(words) > 2:
- current_if['media_select'] = words[2]
- if len(words) > 3:
- current_if['media_type'] = words[3][1:]
- if len(words) > 4:
- current_if['media_options'] = self.get_options(words[4])
-
- def parse_status_line(self, words, current_if, ips):
- current_if['status'] = words[1]
-
- def parse_lladdr_line(self, words, current_if, ips):
- current_if['lladdr'] = words[1]
-
- def parse_inet_line(self, words, current_if, ips):
- address = {'address': words[1]}
- # deal with hex netmask
- if re.match('([0-9a-f]){8}', words[3]) and len(words[3]) == 8:
- words[3] = '0x' + words[3]
- if words[3].startswith('0x'):
- address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
- else:
- # otherwise assume this is a dotted quad
- address['netmask'] = words[3]
- # calculate the network
- address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
- netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
- address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
- # broadcast may be given or we need to calculate
- if len(words) > 5:
- address['broadcast'] = words[5]
- else:
- address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
- # add to our list of addresses
- if not words[1].startswith('127.'):
- ips['all_ipv4_addresses'].append(address['address'])
- current_if['ipv4'].append(address)
-
- def parse_inet6_line(self, words, current_if, ips):
- address = {'address': words[1]}
- if (len(words) >= 4) and (words[2] == 'prefixlen'):
- address['prefix'] = words[3]
- if (len(words) >= 6) and (words[4] == 'scopeid'):
- address['scope'] = words[5]
- localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
- if address['address'] not in localhost6:
- ips['all_ipv6_addresses'].append(address['address'])
- current_if['ipv6'].append(address)
-
- def parse_unknown_line(self, words, current_if, ips):
- # we are going to ignore unknown lines here - this may be
- # a bad idea - but you can override it in your subclass
- pass
-
- def get_options(self, option_string):
- start = option_string.find('<') + 1
- end = option_string.rfind('>')
- if (start > 0) and (end > 0) and (end > start + 1):
- option_csv = option_string[start:end]
- return option_csv.split(',')
- else:
- return []
-
- def merge_default_interface(self, defaults, interfaces, ip_type):
- if not 'interface' in defaults.keys():
- return
- if not defaults['interface'] in interfaces:
- return
- ifinfo = interfaces[defaults['interface']]
- # copy all the interface values across except addresses
- for item in ifinfo.keys():
- if item != 'ipv4' and item != 'ipv6':
- defaults[item] = ifinfo[item]
- if len(ifinfo[ip_type]) > 0:
- for item in ifinfo[ip_type][0].keys():
- defaults[item] = ifinfo[ip_type][0][item]
-
-class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the Mac OS X/Darwin Network Class.
- It uses the GenericBsdIfconfigNetwork unchanged
- """
- platform = 'Darwin'
-
- # media line is different to the default FreeBSD one
- def parse_media_line(self, words, current_if, ips):
- # not sure if this is useful - we also drop information
- current_if['media'] = 'Unknown' # Mac does not give us this
- current_if['media_select'] = words[1]
- if len(words) > 2:
- current_if['media_type'] = words[2][1:]
- if len(words) > 3:
- current_if['media_options'] = self.get_options(words[3])
-
-
-class FreeBSDNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the FreeBSD Network Class.
- It uses the GenericBsdIfconfigNetwork unchanged.
- """
- platform = 'FreeBSD'
-
-class AIXNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the AIX Network Class.
- It uses the GenericBsdIfconfigNetwork unchanged.
- """
- platform = 'AIX'
-
- # AIX 'ifconfig -a' does not have three words in the interface line
- def get_interfaces_info(self, ifconfig_path):
- interfaces = {}
- current_if = {}
- ips = dict(
- all_ipv4_addresses = [],
- all_ipv6_addresses = [],
- )
- rc, out, err = module.run_command([ifconfig_path, '-a'])
-
- for line in out.split('\n'):
-
- if line:
- words = line.split()
-
- # only this condition differs from GenericBsdIfconfigNetwork
- if re.match('^\w*\d*:', line):
- current_if = self.parse_interface_line(words)
- interfaces[ current_if['device'] ] = current_if
- elif words[0].startswith('options='):
- self.parse_options_line(words, current_if, ips)
- elif words[0] == 'nd6':
- self.parse_nd6_line(words, current_if, ips)
- elif words[0] == 'ether':
- self.parse_ether_line(words, current_if, ips)
- elif words[0] == 'media:':
- self.parse_media_line(words, current_if, ips)
- elif words[0] == 'status:':
- self.parse_status_line(words, current_if, ips)
- elif words[0] == 'lladdr':
- self.parse_lladdr_line(words, current_if, ips)
- elif words[0] == 'inet':
- self.parse_inet_line(words, current_if, ips)
- elif words[0] == 'inet6':
- self.parse_inet6_line(words, current_if, ips)
- else:
- self.parse_unknown_line(words, current_if, ips)
-
- return interfaces, ips
-
- # AIX 'ifconfig -a' does not inform about MTU, so remove current_if['mtu'] here
- def parse_interface_line(self, words):
- device = words[0][0:-1]
- current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
- current_if['flags'] = self.get_options(words[1])
- current_if['macaddress'] = 'unknown' # will be overwritten later
- return current_if
-
-class OpenBSDNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the OpenBSD Network Class.
- It uses the GenericBsdIfconfigNetwork.
- """
- platform = 'OpenBSD'
-
- # Return macaddress instead of lladdr
- def parse_lladdr_line(self, words, current_if, ips):
- current_if['macaddress'] = words[1]
-
-class SunOSNetwork(GenericBsdIfconfigNetwork, Network):
- """
- This is the SunOS Network Class.
- It uses the GenericBsdIfconfigNetwork.
-
- Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
- so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
- """
- platform = 'SunOS'
-
- # Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
- # MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
- # 'parse_interface_line()' checks for previously seen interfaces before defining
- # 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
- def get_interfaces_info(self, ifconfig_path):
- interfaces = {}
- current_if = {}
- ips = dict(
- all_ipv4_addresses = [],
- all_ipv6_addresses = [],
- )
- rc, out, err = module.run_command([ifconfig_path, '-a'])
-
- for line in out.split('\n'):
-
- if line:
- words = line.split()
-
- if re.match('^\S', line) and len(words) > 3:
- current_if = self.parse_interface_line(words, current_if, interfaces)
- interfaces[ current_if['device'] ] = current_if
- elif words[0].startswith('options='):
- self.parse_options_line(words, current_if, ips)
- elif words[0] == 'nd6':
- self.parse_nd6_line(words, current_if, ips)
- elif words[0] == 'ether':
- self.parse_ether_line(words, current_if, ips)
- elif words[0] == 'media:':
- self.parse_media_line(words, current_if, ips)
- elif words[0] == 'status:':
- self.parse_status_line(words, current_if, ips)
- elif words[0] == 'lladdr':
- self.parse_lladdr_line(words, current_if, ips)
- elif words[0] == 'inet':
- self.parse_inet_line(words, current_if, ips)
- elif words[0] == 'inet6':
- self.parse_inet6_line(words, current_if, ips)
- else:
- self.parse_unknown_line(words, current_if, ips)
-
- # 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
- # ipv4/ipv6 lists which is ugly and hard to read.
- # This quick hack merges the dictionaries. Purely cosmetic.
- for iface in interfaces:
- for v in 'ipv4', 'ipv6':
- combined_facts = {}
- for facts in interfaces[iface][v]:
- combined_facts.update(facts)
- if len(combined_facts.keys()) > 0:
- interfaces[iface][v] = [combined_facts]
-
- return interfaces, ips
-
- def parse_interface_line(self, words, current_if, interfaces):
- device = words[0][0:-1]
- if device not in interfaces.keys():
- current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
- else:
- current_if = interfaces[device]
- flags = self.get_options(words[1])
- if 'IPv4' in flags:
- v = 'ipv4'
- if 'IPv6' in flags:
- v = 'ipv6'
- current_if[v].append({'flags': flags, 'mtu': words[3]})
- current_if['macaddress'] = 'unknown' # will be overwritten later
- return current_if
-
- # Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
- # Add leading zero to each octet where needed.
- def parse_ether_line(self, words, current_if, ips):
- macaddress = ''
- for octet in words[1].split(':'):
- octet = ('0' + octet)[-2:None]
- macaddress += (octet + ':')
- current_if['macaddress'] = macaddress[0:-1]
-
-class Virtual(Facts):
- """
- This is a generic Virtual subclass of Facts. This should be further
- subclassed to implement per platform. If you subclass this,
- you should define:
- - virtualization_type
- - virtualization_role
- - container (e.g. solaris zones, freebsd jails, linux containers)
-
- All subclasses MUST define platform.
- """
-
- def __new__(cls, *arguments, **keyword):
- subclass = cls
- for sc in Virtual.__subclasses__():
- if sc.platform == platform.system():
- subclass = sc
- return super(cls, subclass).__new__(subclass, *arguments, **keyword)
-
- def __init__(self):
- Facts.__init__(self)
-
- def populate(self):
- return self.facts
-
-class LinuxVirtual(Virtual):
- """
- This is a Linux-specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- """
- platform = 'Linux'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- # For more information, check: http://people.redhat.com/~rjones/virt-what/
- def get_virtual_facts(self):
- if os.path.exists("/proc/xen"):
- self.facts['virtualization_type'] = 'xen'
- self.facts['virtualization_role'] = 'guest'
- try:
- for line in open('/proc/xen/capabilities'):
- if "control_d" in line:
- self.facts['virtualization_role'] = 'host'
- except IOError:
- pass
- return
-
- if os.path.exists('/proc/vz'):
- self.facts['virtualization_type'] = 'openvz'
- if os.path.exists('/proc/bc'):
- self.facts['virtualization_role'] = 'host'
- else:
- self.facts['virtualization_role'] = 'guest'
- return
-
- if os.path.exists('/proc/1/cgroup'):
- for line in open('/proc/1/cgroup').readlines():
- if re.search('/lxc/', line):
- self.facts['virtualization_type'] = 'lxc'
- self.facts['virtualization_role'] = 'guest'
- return
-
- product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
-
- if product_name in ['KVM', 'Bochs']:
- self.facts['virtualization_type'] = 'kvm'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if product_name == 'RHEV Hypervisor':
- self.facts['virtualization_type'] = 'RHEV'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if product_name == 'VMware Virtual Platform':
- self.facts['virtualization_type'] = 'VMware'
- self.facts['virtualization_role'] = 'guest'
- return
-
- bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
-
- if bios_vendor == 'Xen':
- self.facts['virtualization_type'] = 'xen'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if bios_vendor == 'innotek GmbH':
- self.facts['virtualization_type'] = 'virtualbox'
- self.facts['virtualization_role'] = 'guest'
- return
-
- sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
-
- # FIXME: This does also match hyperv
- if sys_vendor == 'Microsoft Corporation':
- self.facts['virtualization_type'] = 'VirtualPC'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if sys_vendor == 'Parallels Software International Inc.':
- self.facts['virtualization_type'] = 'parallels'
- self.facts['virtualization_role'] = 'guest'
- return
-
- if os.path.exists('/proc/self/status'):
- for line in open('/proc/self/status').readlines():
- if re.match('^VxID: \d+', line):
- self.facts['virtualization_type'] = 'linux_vserver'
- if re.match('^VxID: 0', line):
- self.facts['virtualization_role'] = 'host'
- else:
- self.facts['virtualization_role'] = 'guest'
- return
-
- if os.path.exists('/proc/cpuinfo'):
- for line in open('/proc/cpuinfo').readlines():
- if re.match('^model name.*QEMU Virtual CPU', line):
- self.facts['virtualization_type'] = 'kvm'
- elif re.match('^vendor_id.*User Mode Linux', line):
- self.facts['virtualization_type'] = 'uml'
- elif re.match('^model name.*UML', line):
- self.facts['virtualization_type'] = 'uml'
- elif re.match('^vendor_id.*PowerVM Lx86', line):
- self.facts['virtualization_type'] = 'powervm_lx86'
- elif re.match('^vendor_id.*IBM/S390', line):
- self.facts['virtualization_type'] = 'ibm_systemz'
- else:
- continue
- self.facts['virtualization_role'] = 'guest'
- return
-
- # Beware that we can have both kvm and virtualbox running on a single system
- if os.path.exists("/proc/modules") and os.access('/proc/modules', os.R_OK):
- modules = []
- for line in open("/proc/modules").readlines():
- data = line.split(" ", 1)
- modules.append(data[0])
-
- if 'kvm' in modules:
- self.facts['virtualization_type'] = 'kvm'
- self.facts['virtualization_role'] = 'host'
- return
-
- if 'vboxdrv' in modules:
- self.facts['virtualization_type'] = 'virtualbox'
- self.facts['virtualization_role'] = 'host'
- return
-
-class HPUXVirtual(Virtual):
- """
- This is a HP-UX specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- """
- platform = 'HP-UX'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- def get_virtual_facts(self):
- if os.path.exists('/usr/sbin/vecheck'):
- rc, out, err = module.run_command("/usr/sbin/vecheck")
- if rc == 0:
- self.facts['virtualization_type'] = 'guest'
- self.facts['virtualization_role'] = 'HP vPar'
- if os.path.exists('/opt/hpvm/bin/hpvminfo'):
- rc, out, err = module.run_command("/opt/hpvm/bin/hpvminfo")
- if rc == 0 and re.match('.*Running.*HPVM vPar.*', out):
- self.facts['virtualization_type'] = 'guest'
- self.facts['virtualization_role'] = 'HPVM vPar'
- elif rc == 0 and re.match('.*Running.*HPVM guest.*', out):
- self.facts['virtualization_type'] = 'guest'
- self.facts['virtualization_role'] = 'HPVM IVM'
- elif rc == 0 and re.match('.*Running.*HPVM host.*', out):
- self.facts['virtualization_type'] = 'host'
- self.facts['virtualization_role'] = 'HPVM'
- if os.path.exists('/usr/sbin/parstatus'):
- rc, out, err = module.run_command("/usr/sbin/parstatus")
- if rc == 0:
- self.facts['virtualization_type'] = 'guest'
- self.facts['virtualization_role'] = 'HP nPar'
-
-
-class SunOSVirtual(Virtual):
- """
- This is a SunOS-specific subclass of Virtual. It defines
- - virtualization_type
- - virtualization_role
- - container
- """
- platform = 'SunOS'
-
- def __init__(self):
- Virtual.__init__(self)
-
- def populate(self):
- self.get_virtual_facts()
- return self.facts
-
- def get_virtual_facts(self):
- rc, out, err = module.run_command("/usr/sbin/prtdiag")
- for line in out.split('\n'):
- if 'VMware' in line:
- self.facts['virtualization_type'] = 'vmware'
- self.facts['virtualization_role'] = 'guest'
- if 'Parallels' in line:
- self.facts['virtualization_type'] = 'parallels'
- self.facts['virtualization_role'] = 'guest'
- if 'VirtualBox' in line:
- self.facts['virtualization_type'] = 'virtualbox'
- self.facts['virtualization_role'] = 'guest'
- if 'HVM domU' in line:
- self.facts['virtualization_type'] = 'xen'
- self.facts['virtualization_role'] = 'guest'
- # Check if it's a zone
- if os.path.exists("/usr/bin/zonename"):
- rc, out, err = module.run_command("/usr/bin/zonename")
- if out.rstrip() != "global":
- self.facts['container'] = 'zone'
- # Check if it's a branded zone (i.e. Solaris 8/9 zone)
- if os.path.isdir('/.SUNWnative'):
- self.facts['container'] = 'zone'
- # If it's a zone check if we can detect if our global zone is itself virtualized.
- # Relies on the "guest tools" (e.g. vmware tools) to be installed
- if 'container' in self.facts and self.facts['container'] == 'zone':
- rc, out, err = module.run_command("/usr/sbin/modinfo")
- for line in out.split('\n'):
- if 'VMware' in line:
- self.facts['virtualization_type'] = 'vmware'
- self.facts['virtualization_role'] = 'guest'
- if 'VirtualBox' in line:
- self.facts['virtualization_type'] = 'virtualbox'
- self.facts['virtualization_role'] = 'guest'
-
-def get_file_content(path, default=None):
- data = default
- if os.path.exists(path) and os.access(path, os.R_OK):
- data = open(path).read().strip()
- if len(data) == 0:
- data = default
- return data
-
-def ansible_facts():
- facts = {}
- facts.update(Facts().populate())
- facts.update(Hardware().populate())
- facts.update(Network().populate())
- facts.update(Virtual().populate())
- return facts
-
-# ===========================================
def run_setup(module):
- setup_options = {}
- facts = ansible_facts()
+ setup_options = dict(module_setup=True)
+ facts = ansible_facts(module)
for (k, v) in facts.items():
setup_options["ansible_%s" % k.replace('-', '_')] = v
# Look for the path to the facter and ohai binary and set
# the variable to that path.
-
facter_path = module.get_bin_path('facter')
ohai_path = module.get_bin_path('ohai')
# if facter is installed, and we can use --json because
# ruby-json is ALSO installed, include facter data in the JSON
-
if facter_path is not None:
rc, out, err = module.run_command(facter_path + " --json")
facter = True
@@ -2322,7 +99,6 @@ def run_setup(module):
setup_options["facter_%s" % k] = v
# ditto for ohai
-
if ohai_path is not None:
rc, out, err = module.run_command(ohai_path)
ohai = True
@@ -2359,5 +135,9 @@ def main():
module.exit_json(**data)
# import module snippets
+
from ansible.module_utils.basic import *
+
+from ansible.module_utils.facts import *
+
main()
diff --git a/system/sysctl b/system/sysctl
index 97e5bc5e6c1..ab1da5e0959 100644
--- a/system/sysctl
+++ b/system/sysctl
@@ -144,9 +144,13 @@ class SysctlModule(object):
if self.file_values[thisname] is None and self.args['state'] == "present":
self.changed = True
self.write_file = True
+ elif self.file_values[thisname] is None and self.args['state'] == "absent":
+ self.changed = False
elif self.file_values[thisname] != self.args['value']:
self.changed = True
self.write_file = True
+
+ # use the sysctl command or not?
if self.args['sysctl_set']:
if self.proc_value is None:
self.changed = True
@@ -235,7 +239,16 @@ class SysctlModule(object):
# Get the token value from the sysctl file
def read_sysctl_file(self):
- lines = open(self.sysctl_file, "r").readlines()
+
+ lines = []
+ if os.path.isfile(self.sysctl_file):
+ try:
+ f = open(self.sysctl_file, "r")
+ lines = f.readlines()
+ f.close()
+ except IOError, e:
+ self.module.fail_json(msg="Failed to open %s: %s" % (self.sysctl_file, str(e)))
+
for line in lines:
line = line.strip()
self.file_lines.append(line)
diff --git a/system/ufw b/system/ufw
new file mode 100644
index 00000000000..8496997b279
--- /dev/null
+++ b/system/ufw
@@ -0,0 +1,261 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Ahti Kitsik
+# (c) 2014, Jarno Keskikangas
+# (c) 2013, Aleksey Ovcharenko
+# (c) 2013, James Martin
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+DOCUMENTATION = '''
+---
+module: ufw
+short_description: Manage firewall with UFW
+description:
+ - Manage firewall with UFW.
+version_added: 1.6
+author: Aleksey Ovcharenko, Jarno Keskikangas, Ahti Kitsik
+notes:
+ - See C(man ufw) for more examples.
+requirements:
+ - C(ufw) package
+options:
+ state:
+ description:
+ - C(enabled) reloads firewall and enables firewall on boot.
+ - C(disabled) unloads firewall and disables firewall on boot.
+ - C(reloaded) reloads firewall.
+ - C(reset) disables and resets firewall to installation defaults.
+ required: false
+ choices: ['enabled', 'disabled', 'reloaded', 'reset']
+ policy:
+ description:
+ - Change the default policy for incoming or outgoing traffic.
+ required: false
+ alias: default
+ choices: ['allow', 'deny', 'reject']
+ direction:
+ description:
+ - Select direction for a rule or default policy command.
+ required: false
+ choices: ['in', 'out', 'incoming', 'outgoing']
+ logging:
+ description:
+ - Toggles logging. Logged packets use the LOG_KERN syslog facility.
+ choices: ['on', 'off', 'low', 'medium', 'high', 'full']
+ required: false
+ insert:
+ description:
+ - Insert the corresponding rule as rule number NUM
+ required: false
+ rule:
+ description:
+ - Add firewall rule
+ required: false
+ choices: ['allow', 'deny', 'reject', 'limit']
+ log:
+ description:
+ - Log new connections matched to this rule
+ required: false
+ choices: ['yes', 'no']
+ from_ip:
+ description:
+ - Source IP address.
+ required: false
+ aliases: ['from', 'src']
+ default: 'any'
+ from_port:
+ description:
+ - Source port.
+ required: false
+ to_ip:
+ description:
+ - Destination IP address.
+ required: false
+ aliases: ['to', 'dest']
+ default: 'any'
+ to_port:
+ description:
+ - Destination port.
+ required: false
+ aliases: ['port']
+ proto:
+ description:
+ - TCP/IP protocol.
+ choices: ['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']
+ required: false
+ name:
+ description:
+ - Use profile located in C(/etc/ufw/applications.d)
+ required: false
+ aliases: ['app']
+ delete:
+ description:
+ - Delete rule.
+ required: false
+ choices: ['yes', 'no']
+'''
+
+EXAMPLES = '''
+# Allow everything and enable UFW
+ufw: state=enabled policy=allow
+
+# Set logging
+ufw: logging=on
+
+# Sometimes it is desirable to let the sender know when traffic is
+# being denied, rather than simply ignoring it. In these cases, use
+# reject instead of deny. In addition, log rejected connections:
+ufw: rule=reject port=auth log=yes
+
+# ufw supports connection rate limiting, which is useful for protecting
+# against brute-force login attacks. ufw will deny connections if an IP
+# address has attempted to initiate 6 or more connections in the last
+# 30 seconds. See http://www.debian-administration.org/articles/187
+# for details. Typical usage is:
+ufw: rule=limit port=ssh proto=tcp
+
+# Allow OpenSSH
+ufw: rule=allow name=OpenSSH
+
+# Delete OpenSSH rule
+ufw: rule=allow name=OpenSSH delete=yes
+
+# Deny all access to port 53:
+ufw: rule=deny port=53
+
+# Allow all access to tcp port 80:
+ufw: rule=allow port=80 proto=tcp
+
+# Allow all access from RFC1918 networks to this host:
+ufw: rule=allow src={{ item }}
+with_items:
+- 10.0.0.0/8
+- 172.16.0.0/12
+- 192.168.0.0/16
+
+# Deny access to udp port 514 from host 1.2.3.4:
+ufw: rule=deny proto=udp src=1.2.3.4 port=514
+
+# Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
+ufw: rule=allow interface=eth0 direction=in proto=udp src=1.2.3.5 from_port=5469 dest=1.2.3.4 to_port=5469
+
+# Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host.
+# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
+ufw: rule=deny proto=tcp src=2001:db8::/32 port=25
+'''
+
+from operator import itemgetter
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reset']),
+ default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']),
+ logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']),
+ direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing']),
+ delete = dict(default=False, type='bool'),
+ insert = dict(default=None),
+ rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']),
+ interface = dict(default=None, aliases=['if']),
+ log = dict(default=False, type='bool'),
+ from_ip = dict(default='any', aliases=['src', 'from']),
+ from_port = dict(default=None),
+ to_ip = dict(default='any', aliases=['dest', 'to']),
+ to_port = dict(default=None, aliases=['port']),
+ proto = dict(default=None, aliases=['protocol'], choices=['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']),
+ app = dict(default=None, aliases=['name'])
+ ),
+ supports_check_mode = True,
+ mutually_exclusive = [['app', 'proto', 'logging']]
+ )
+
+ cmds = []
+
+ def execute(cmd):
+ cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
+
+ cmds.append(cmd)
+ (rc, out, err) = module.run_command(cmd)
+
+ if rc != 0:
+ module.fail_json(msg=err or out)
+
+ params = module.params
+
+ # Ensure at least one of the command arguments are given
+ command_keys = ['state', 'default', 'rule', 'logging']
+ commands = dict((key, params[key]) for key in command_keys if params[key])
+
+ if len(commands) < 1:
+ module.fail_json(msg="Not any of the command arguments %s given" % commands)
+
+ # Ensure ufw is available
+ ufw_bin = module.get_bin_path('ufw', True)
+
+ # Save the pre state and rules in order to recognize changes
+ (_, pre_state, _) = module.run_command(ufw_bin + ' status verbose')
+ (_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules")
+
+ # Execute commands
+ for (command, value) in commands.iteritems():
+ cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
+
+ if command == 'state':
+ states = { 'enabled': 'enable', 'disabled': 'disable',
+ 'reloaded': 'reload', 'reset': 'reset' }
+ execute(cmd + [['-f'], [states[value]]])
+
+ elif command == 'logging':
+ execute(cmd + [[command], [value]])
+
+ elif command == 'default':
+ execute(cmd + [[command], [value], [params['direction']]])
+
+ elif command == 'rule':
+ # Rules are constructed according to the long format
+ #
+ # ufw [--dry-run] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
+ # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
+ # [proto protocol] [app application]
+ cmd.append([module.boolean(params['delete']), 'delete'])
+ cmd.append([params['insert'], "insert %s" % params['insert']])
+ cmd.append([value])
+ cmd.append([module.boolean(params['log']), 'log'])
+
+ for (key, template) in [('direction', "%s" ), ('interface', "on %s" ),
+ ('from_ip', "from %s" ), ('from_port', "port %s" ),
+ ('to_ip', "to %s" ), ('to_port', "port %s" ),
+ ('proto', "proto %s"), ('app', "app '%s'")]:
+
+ value = params[key]
+ cmd.append([value, template % (value)])
+
+ execute(cmd)
+
+ # Get the new state
+ (_, post_state, _) = module.run_command(ufw_bin + ' status verbose')
+ (_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules")
+ changed = (pre_state != post_state) or (pre_rules != post_rules)
+
+ return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
+
+# import module snippets
+from ansible.module_utils.basic import *
+
+main()
diff --git a/system/user b/system/user
index a6d3a0ec32d..8c649c0607c 100644
--- a/system/user
+++ b/system/user
@@ -61,6 +61,8 @@ options:
except the primary group.
append:
required: false
+ default: "no"
+ choices: [ "yes", "no" ]
description:
- If C(yes), will only add groups, not set them to just the list
in I(groups).
@@ -181,6 +183,9 @@ EXAMPLES = '''
# Add the user 'johnd' with a specific uid and a primary group of 'admin'
- user: name=johnd comment="John Doe" uid=1040
+# Add the user 'james' with a bash shell, appending the group 'admins' and 'developers' to the user's groups
+- user: name=james shell=/bin/bash groups=admins,developers append=yes
+
# Remove the user 'johnd'
- user: name=johnd state=absent remove=yes
@@ -1186,6 +1191,7 @@ class SunOS(User):
lines.append(line)
continue
fields[1] = self.password
+ fields[2] = str(int(time.time() / 86400))
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
@@ -1272,6 +1278,7 @@ class SunOS(User):
lines.append(line)
continue
fields[1] = self.password
+ fields[2] = str(int(time.time() / 86400))
line = ':'.join(fields)
lines.append('%s\n' % line)
open(self.SHADOWFILE, 'w+').writelines(lines)
diff --git a/utilities/accelerate b/utilities/accelerate
index a6e84e32376..5a8c96c64a9 100644
--- a/utilities/accelerate
+++ b/utilities/accelerate
@@ -53,6 +53,14 @@ options:
if this parameter is set to true.
required: false
default: false
+ multi_key:
+ description:
+ - When enabled, the daemon will open a local socket file which can be used by future daemon executions to
+ upload a new key to the already running daemon, so that multiple users can connect using different keys.
+ This access still requires an ssh connection as the uid for which the daemon is currently running.
+ required: false
+ default: no
+ version_added: "1.6"
notes:
- See the advanced playbooks chapter for more about using accelerated mode.
requirements: [ "python-keyczar" ]
@@ -71,6 +79,7 @@ EXAMPLES = '''
'''
import base64
+import errno
import getpass
import json
import os
@@ -88,10 +97,13 @@ import traceback
import SocketServer
from datetime import datetime
-from threading import Thread
+from threading import Thread, Lock
+
+# import module snippets
+# we must import this here at the top so we can use get_module_path()
+from ansible.module_utils.basic import *
syslog.openlog('ansible-%s' % os.path.basename(__file__))
-PIDFILE = os.path.expanduser("~/.accelerate.pid")
# the chunk size to read and send, assuming mtu 1500 and
# leaving room for base64 (+33%) encoding and header (100 bytes)
@@ -107,6 +119,9 @@ def log(msg, cap=0):
if DEBUG_LEVEL >= cap:
syslog.syslog(syslog.LOG_NOTICE|syslog.LOG_DAEMON, msg)
+def v(msg):
+ log(msg, cap=1)
+
def vv(msg):
log(msg, cap=2)
@@ -116,16 +131,6 @@ def vvv(msg):
def vvvv(msg):
log(msg, cap=4)
-if os.path.exists(PIDFILE):
- try:
- data = int(open(PIDFILE).read())
- try:
- os.kill(data, signal.SIGKILL)
- except OSError:
- pass
- except ValueError:
- pass
- os.unlink(PIDFILE)
HAS_KEYCZAR = False
try:
@@ -134,10 +139,26 @@ try:
except ImportError:
pass
+SOCKET_FILE = os.path.join(get_module_path(), '.ansible-accelerate', ".local.socket")
+
+def get_pid_location(module):
+ """
+ Try to find a pid directory in the common locations, falling
+ back to the user's home directory if no others exist
+ """
+ for dir in ['/var/run', '/var/lib/run', '/run', os.path.expanduser("~/")]:
+ try:
+ if os.path.isdir(dir) and os.access(dir, os.R_OK|os.W_OK):
+ return os.path.join(dir, '.accelerate.pid')
+ except:
+ pass
+ module.fail_json(msg="couldn't find any valid directory to use for the accelerate pid file")
+
+
# NOTE: this shares a fair amount of code in common with async_wrapper, if async_wrapper were a new module we could move
# this into utils.module_common and probably should anyway
-def daemonize_self(module, password, port, minutes):
+def daemonize_self(module, password, port, minutes, pid_file):
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
try:
pid = os.fork()
@@ -158,11 +179,11 @@ def daemonize_self(module, password, port, minutes):
try:
pid = os.fork()
if pid > 0:
- log("daemon pid %s, writing %s" % (pid, PIDFILE))
- pid_file = open(PIDFILE, "w")
+ log("daemon pid %s, writing %s" % (pid, pid_file))
+ pid_file = open(pid_file, "w")
pid_file.write("%s" % pid)
pid_file.close()
- vvv("pidfile written")
+ vvv("pid file written")
sys.exit(0)
except OSError, e:
log("fork #2 failed: %d (%s)" % (e.errno, e.strerror))
@@ -174,8 +195,85 @@ def daemonize_self(module, password, port, minutes):
os.dup2(dev_null.fileno(), sys.stderr.fileno())
log("daemonizing successful")
-class ThreadWithReturnValue(Thread):
+class LocalSocketThread(Thread):
+ server = None
+ terminated = False
+
+ def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None):
+ self.server = kwargs.get('server')
+ Thread.__init__(self, group, target, name, args, kwargs, Verbose)
+
+ def run(self):
+ try:
+ if os.path.exists(SOCKET_FILE):
+ os.remove(SOCKET_FILE)
+ else:
+ dir = os.path.dirname(SOCKET_FILE)
+ if os.path.exists(dir):
+ if not os.path.isdir(dir):
+ log("The socket file path (%s) exists, but is not a directory. No local connections will be available" % dir)
+ return
+ else:
+ # make sure the directory is accessible only to this
+ # user, as socket files derive their permissions from
+ # the directory that contains them
+ os.chmod(dir, 0700)
+ elif not os.path.exists(dir):
+ os.makedirs(dir, 0700)
+ except OSError:
+ pass
+ self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ self.s.bind(SOCKET_FILE)
+ self.s.listen(5)
+ while not self.terminated:
+ try:
+ conn, addr = self.s.accept()
+ vv("received local connection")
+ data = ""
+ while "\n" not in data:
+ data += conn.recv(2048)
+ try:
+ new_key = AesKey.Read(data.strip())
+ found = False
+ for key in self.server.key_list:
+ try:
+ new_key.Decrypt(key.Encrypt("foo"))
+ found = True
+ break
+ except:
+ pass
+ if not found:
+ vv("adding new key to the key list")
+ self.server.key_list.append(new_key)
+ conn.sendall("OK\n")
+ else:
+ vv("key already exists in the key list, ignoring")
+ conn.sendall("EXISTS\n")
+
+ # update the last event time so the server doesn't
+ # shutdown sooner than expected for new cliets
+ try:
+ self.server.last_event_lock.acquire()
+ self.server.last_event = datetime.now()
+ finally:
+ self.server.last_event_lock.release()
+ except Exception, e:
+ vv("key loaded locally was invalid, ignoring (%s)" % e)
+ conn.sendall("BADKEY\n")
+ finally:
+ try:
+ conn.close()
+ except:
+ pass
+ except:
+ pass
+
+ def terminate(self):
+ self.terminated = True
+ self.s.shutdown(socket.SHUT_RDWR)
+ self.s.close()
+class ThreadWithReturnValue(Thread):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
@@ -190,24 +288,41 @@ class ThreadWithReturnValue(Thread):
return self._return
class ThreadedTCPServer(SocketServer.ThreadingTCPServer):
- def __init__(self, server_address, RequestHandlerClass, module, password, timeout):
+ key_list = []
+ last_event = datetime.now()
+ last_event_lock = Lock()
+ def __init__(self, server_address, RequestHandlerClass, module, password, timeout, use_ipv6=False):
self.module = module
- self.key = AesKey.Read(password)
+ self.key_list.append(AesKey.Read(password))
self.allow_reuse_address = True
self.timeout = timeout
- SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass)
-class ThreadedTCPV6Server(SocketServer.ThreadingTCPServer):
- def __init__(self, server_address, RequestHandlerClass, module, password, timeout):
- self.module = module
- self.address_family = socket.AF_INET6
- self.key = AesKey.Read(password)
- self.allow_reuse_address = True
- self.timeout = timeout
+ if use_ipv6:
+ self.address_family = socket.AF_INET6
+
+ if self.module.params.get('multi_key', False):
+ vv("starting thread to handle local connections for multiple keys")
+ self.local_thread = LocalSocketThread(kwargs=dict(server=self))
+ self.local_thread.start()
+
SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass)
+ def shutdown(self):
+ self.local_thread.terminate()
+ self.running = False
+ SocketServer.ThreadingTCPServer.shutdown(self)
+
class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
+ # the key to use for this connection
+ active_key = None
+
def send_data(self, data):
+ try:
+ self.server.last_event_lock.acquire()
+ self.server.last_event = datetime.now()
+ finally:
+ self.server.last_event_lock.release()
+
packed_len = struct.pack('!Q', len(data))
return self.request.sendall(packed_len + data)
@@ -216,23 +331,40 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
data = ""
vvvv("in recv_data(), waiting for the header")
while len(data) < header_len:
- d = self.request.recv(header_len - len(data))
- if not d:
- vvv("received nothing, bailing out")
+ try:
+ d = self.request.recv(header_len - len(data))
+ if not d:
+ vvv("received nothing, bailing out")
+ return None
+ data += d
+ except:
+ # probably got a connection reset
+ vvvv("exception received while waiting for recv(), returning None")
return None
- data += d
vvvv("in recv_data(), got the header, unpacking")
data_len = struct.unpack('!Q',data[:header_len])[0]
data = data[header_len:]
vvvv("data received so far (expecting %d): %d" % (data_len,len(data)))
while len(data) < data_len:
- d = self.request.recv(data_len - len(data))
- if not d:
- vvv("received nothing, bailing out")
+ try:
+ d = self.request.recv(data_len - len(data))
+ if not d:
+ vvv("received nothing, bailing out")
+ return None
+ data += d
+ vvvv("data received so far (expecting %d): %d" % (data_len,len(data)))
+ except:
+ # probably got a connection reset
+ vvvv("exception received while waiting for recv(), returning None")
return None
- data += d
- vvvv("data received so far (expecting %d): %d" % (data_len,len(data)))
vvvv("received all of the data, returning")
+
+ try:
+ self.server.last_event_lock.acquire()
+ self.server.last_event = datetime.now()
+ finally:
+ self.server.last_event_lock.release()
+
return data
def handle(self):
@@ -243,18 +375,26 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
if not data:
vvvv("received nothing back from recv_data(), breaking out")
break
- try:
- vvvv("got data, decrypting")
- data = self.server.key.Decrypt(data)
- vvvv("decryption done")
- except:
- vv("bad decrypt, skipping...")
- data2 = json.dumps(dict(rc=1))
- data2 = self.server.key.Encrypt(data2)
- self.send_data(data2)
- return
+ vvvv("got data, decrypting")
+ if not self.active_key:
+ for key in self.server.key_list:
+ try:
+ data = key.Decrypt(data)
+ self.active_key = key
+ break
+ except:
+ pass
+ else:
+ vv("bad decrypt, exiting the connection handler")
+ return
+ else:
+ try:
+ data = self.active_key.Decrypt(data)
+ except:
+ vv("bad decrypt, exiting the connection handler")
+ return
- vvvv("loading json from the data")
+ vvvv("decryption done, loading json from the data")
data = json.loads(data)
mode = data['mode']
@@ -270,7 +410,7 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
last_pong = datetime.now()
vvvv("command still running, sending keepalive packet")
data2 = json.dumps(dict(pong=True))
- data2 = self.server.key.Encrypt(data2)
+ data2 = self.active_key.Encrypt(data2)
self.send_data(data2)
time.sleep(0.1)
response = twrv._return
@@ -286,8 +426,9 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
response = self.validate_user(data)
vvvv("response result is %s" % str(response))
- data2 = json.dumps(response)
- data2 = self.server.key.Encrypt(data2)
+ json_response = json.dumps(response)
+ vvvv("dumped json is %s" % json_response)
+ data2 = self.active_key.Encrypt(json_response)
vvvv("sending the response back to the controller")
self.send_data(data2)
vvvv("done sending the response")
@@ -299,9 +440,10 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
tb = traceback.format_exc()
log("encountered an unhandled exception in the handle() function")
log("error was:\n%s" % tb)
- data2 = json.dumps(dict(rc=1, failed=True, msg="unhandled error in the handle() function"))
- data2 = self.server.key.Encrypt(data2)
- self.send_data(data2)
+ if self.active_key:
+ data2 = json.dumps(dict(rc=1, failed=True, msg="unhandled error in the handle() function"))
+ data2 = self.active_key.Encrypt(data2)
+ self.send_data(data2)
def validate_user(self, data):
if 'username' not in data:
@@ -329,11 +471,15 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
return dict(failed=True, msg='internal error: cmd is required')
if 'tmp_path' not in data:
return dict(failed=True, msg='internal error: tmp_path is required')
- if 'executable' not in data:
- return dict(failed=True, msg='internal error: executable is required')
vvvv("executing: %s" % data['cmd'])
- rc, stdout, stderr = self.server.module.run_command(data['cmd'], executable=data['executable'], close_fds=True)
+
+ use_unsafe_shell = False
+ executable = data.get('executable')
+ if executable:
+ use_unsafe_shell = True
+
+ rc, stdout, stderr = self.server.module.run_command(data['cmd'], executable=executable, use_unsafe_shell=use_unsafe_shell)
if stdout is None:
stdout = ''
if stderr is None:
@@ -358,7 +504,7 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
last = True
data = dict(data=base64.b64encode(data), last=last)
data = json.dumps(data)
- data = self.server.key.Encrypt(data)
+ data = self.active_key.Encrypt(data)
if self.send_data(data):
return dict(failed=True, stderr="failed to send data")
@@ -367,7 +513,7 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
if not response:
log("failed to get a response, aborting")
return dict(failed=True, stderr="Failed to get a response from %s" % self.host)
- response = self.server.key.Decrypt(response)
+ response = self.active_key.Decrypt(response)
response = json.loads(response)
if response.get('failed',False):
@@ -390,8 +536,14 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
final_path = None
if 'user' in data and data.get('user') != getpass.getuser():
- vv("the target user doesn't match this user, we'll move the file into place via sudo")
- (fd,out_path) = tempfile.mkstemp(prefix='ansible.', dir=os.path.expanduser('~/.ansible/tmp/'))
+ vvv("the target user doesn't match this user, we'll move the file into place via sudo")
+ tmp_path = os.path.expanduser('~/.ansible/tmp/')
+ if not os.path.exists(tmp_path):
+ try:
+ os.makedirs(tmp_path, 0700)
+ except:
+ return dict(failed=True, msg='could not create a temporary directory at %s' % tmp_path)
+ (fd,out_path) = tempfile.mkstemp(prefix='ansible.', dir=tmp_path)
out_fd = os.fdopen(fd, 'w', 0)
final_path = data['out_path']
else:
@@ -405,14 +557,14 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
bytes += len(out)
out_fd.write(out)
response = json.dumps(dict())
- response = self.server.key.Encrypt(response)
+ response = self.active_key.Encrypt(response)
self.send_data(response)
if data['last']:
break
data = self.recv_data()
if not data:
raise ""
- data = self.server.key.Decrypt(data)
+ data = self.active_key.Decrypt(data)
data = json.loads(data)
except:
out_fd.close()
@@ -428,27 +580,45 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
self.server.module.atomic_move(out_path, final_path)
return dict()
-def daemonize(module, password, port, timeout, minutes, ipv6):
+def daemonize(module, password, port, timeout, minutes, use_ipv6, pid_file):
try:
- daemonize_self(module, password, port, minutes)
+ daemonize_self(module, password, port, minutes, pid_file)
- def catcher(signum, _):
- module.exit_json(msg='timer expired')
+ def timer_handler(signum, _):
+ try:
+ server.last_event_lock.acquire()
+ td = datetime.now() - server.last_event
+ # older python timedelta objects don't have total_seconds(),
+ # so we use the formula from the docs to calculate it
+ total_seconds = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
+ if total_seconds >= minutes * 60:
+ log("server has been idle longer than the timeout, shutting down")
+ server.running = False
+ server.shutdown()
+ else:
+ # reschedule the check
+ vvvv("daemon idle for %d seconds (timeout=%d)" % (total_seconds,minutes*60))
+ signal.alarm(30)
+ except:
+ pass
+ finally:
+ server.last_event_lock.release()
- signal.signal(signal.SIGALRM, catcher)
- signal.setitimer(signal.ITIMER_REAL, 60 * minutes)
+ signal.signal(signal.SIGALRM, timer_handler)
+ signal.alarm(30)
tries = 5
while tries > 0:
try:
- if ipv6:
- server = ThreadedTCPV6Server(("::", port), ThreadedTCPRequestHandler, module, password, timeout)
+ if use_ipv6:
+ address = ("::", port)
else:
- server = ThreadedTCPServer(("0.0.0.0", port), ThreadedTCPRequestHandler, module, password, timeout)
+ address = ("0.0.0.0", port)
+ server = ThreadedTCPServer(address, ThreadedTCPRequestHandler, module, password, timeout, use_ipv6=use_ipv6)
server.allow_reuse_address = True
break
- except:
- vv("Failed to create the TCP server (tries left = %d)" % tries)
+ except Exception, e:
+ vv("Failed to create the TCP server (tries left = %d) (error: %s) " % (tries,e))
tries -= 1
time.sleep(0.2)
@@ -456,8 +626,20 @@ def daemonize(module, password, port, timeout, minutes, ipv6):
vv("Maximum number of attempts to create the TCP server reached, bailing out")
raise Exception("max # of attempts to serve reached")
- vv("serving!")
- server.serve_forever(poll_interval=0.1)
+ # run the server in a separate thread to make signal handling work
+ server_thread = Thread(target=server.serve_forever, kwargs=dict(poll_interval=0.1))
+ server_thread.start()
+ server.running = True
+
+ v("serving!")
+ while server.running:
+ time.sleep(1)
+
+ # wait for the thread to exit fully
+ server_thread.join()
+
+ v("server thread terminated, exiting!")
+ sys.exit(0)
except Exception, e:
tb = traceback.format_exc()
log("exception caught, exiting accelerated mode: %s\n%s" % (e, tb))
@@ -469,6 +651,7 @@ def main():
argument_spec = dict(
port=dict(required=False, default=5099),
ipv6=dict(required=False, default=False, type='bool'),
+ multi_key=dict(required=False, default=False, type='bool'),
timeout=dict(required=False, default=300),
password=dict(required=True),
minutes=dict(required=False, default=30),
@@ -483,14 +666,62 @@ def main():
minutes = int(module.params['minutes'])
debug = int(module.params['debug'])
ipv6 = module.params['ipv6']
+ multi_key = module.params['multi_key']
if not HAS_KEYCZAR:
module.fail_json(msg="keyczar is not installed (on the remote side)")
DEBUG_LEVEL=debug
+ pid_file = get_pid_location(module)
- daemonize(module, password, port, timeout, minutes, ipv6)
+ daemon_pid = None
+ daemon_running = False
+ if os.path.exists(pid_file):
+ try:
+ daemon_pid = int(open(pid_file).read())
+ try:
+ # sending signal 0 doesn't do anything to the
+ # process, other than tell the calling program
+ # whether other signals can be sent
+ os.kill(daemon_pid, 0)
+ except OSError, e:
+ if e.errno == errno.EPERM:
+ # no permissions means the pid is probably
+ # running, but as a different user, so fail
+ module.fail_json(msg="the accelerate daemon appears to be running as a different user that this user cannot access (pid=%d)" % daemon_pid)
+ else:
+ daemon_running = True
+ except ValueError:
+ # invalid pid file, unlink it - otherwise we don't care
+ try:
+ os.unlink(pid_file)
+ except:
+ pass
+
+ if daemon_running and multi_key:
+ # try to connect to the file socket for the daemon if it exists
+ s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ try:
+ s.connect(SOCKET_FILE)
+ s.sendall(password + '\n')
+ data = ""
+ while '\n' not in data:
+ data += s.recv(2048)
+ res = data.strip()
+ except:
+ module.fail_json(msg="failed to connect to the local socket file")
+ finally:
+ try:
+ s.close()
+ except:
+ pass
+
+ if res in ("OK", "EXISTS"):
+ module.exit_json(msg="transferred new key to the existing daemon")
+ else:
+ module.fail_json(msg="could not transfer new key: %s" % data.strip())
+ else:
+ # try to start up the daemon
+ daemonize(module, password, port, timeout, minutes, ipv6, pid_file)
-# import module snippets
-from ansible.module_utils.basic import *
main()
diff --git a/utilities/wait_for b/utilities/wait_for
index faf821e2749..3a381f06944 100644
--- a/utilities/wait_for
+++ b/utilities/wait_for
@@ -33,9 +33,11 @@ description:
are not immediately available after their init scripts return -
which is true of certain Java application servers. It is also
useful when starting guests with the M(virt) module and
- needing to pause until they are ready. This module can
- also be used to wait for a file to be available on the filesystem
- or with a regex match a string to be present in a file.
+ needing to pause until they are ready.
+ - This module can also be used to wait for a regex match a string to be present in a file.
+ - In 1.6 and later, this module can
+ also be used to wait for a file to be available or absent on the
+ filesystem.
version_added: "0.7"
options:
host:
@@ -60,10 +62,10 @@ options:
required: false
state:
description:
- - either C(present), C(started), or C(stopped)
+ - either C(present), C(started), or C(stopped), C(absent)
- When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed
- - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing
- choices: [ "present", "started", "stopped" ]
+ - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed
+ choices: [ "present", "started", "stopped", "absent" ]
default: "started"
path:
version_added: "1.4"
@@ -78,7 +80,7 @@ options:
notes: []
requirements: []
-author: Jeroen Hoekx, John Jarvis
+author: Jeroen Hoekx, John Jarvis, Andrii Radyk
'''
EXAMPLES = '''
@@ -92,6 +94,12 @@ EXAMPLES = '''
# wait until the string "completed" is in the file /tmp/foo before continuing
- wait_for: path=/tmp/foo search_regex=completed
+# wait until the lock file is removed
+- wait_for: path=/var/lock/file.lock state=absent
+
+# wait until the process is finished and pid was destroyed
+- wait_for: path=/proc/3466/status state=absent
+
'''
def main():
@@ -105,7 +113,7 @@ def main():
port=dict(default=None),
path=dict(default=None),
search_regex=dict(default=None),
- state=dict(default='started', choices=['started', 'stopped', 'present']),
+ state=dict(default='started', choices=['started', 'stopped', 'present', 'absent']),
),
)
@@ -133,23 +141,35 @@ def main():
if delay:
time.sleep(delay)
- if state == 'stopped':
+ if state in [ 'stopped', 'absent' ]:
### first wait for the stop condition
end = start + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < end:
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.settimeout(connect_timeout)
- try:
- s.connect( (host, port) )
- s.shutdown(socket.SHUT_RDWR)
- s.close()
- time.sleep(1)
- except:
- break
+ if path:
+ try:
+ f = open(path)
+ f.close()
+ time.sleep(1)
+ pass
+ except IOError:
+ break
+ elif port:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ s.settimeout(connect_timeout)
+ try:
+ s.connect( (host, port) )
+ s.shutdown(socket.SHUT_RDWR)
+ s.close()
+ time.sleep(1)
+ except:
+ break
else:
elapsed = datetime.datetime.now() - start
- module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
+ if port:
+ module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds)
+ elif path:
+ module.fail_json(msg="Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds)
elif state in ['started', 'present']:
### wait for start condition
diff --git a/web_infrastructure/apache2_module b/web_infrastructure/apache2_module
new file mode 100644
index 00000000000..73a92f40434
--- /dev/null
+++ b/web_infrastructure/apache2_module
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+#coding: utf-8 -*-
+
+# (c) 2013, Christian Berendt
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see .
+
+DOCUMENTATION = '''
+---
+module: apache2_module
+version_added: 1.6
+short_description: enables/disables a module of the Apache2 webserver
+description:
+ - Enables or disables a specified module of the Apache2 webserver.
+options:
+ name:
+ description:
+ - name of the module to enable/disable
+ required: true
+ state:
+ description:
+ - indicate the desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+
+'''
+
+EXAMPLES = '''
+# enables the Apache2 module "wsgi"
+- apache2_module: state=present name=wsgi
+
+# disables the Apache2 module "wsgi"
+- apache2_module: state=absent name wsgi
+'''
+
+def _module_is_enabled(module):
+ name = module.params['name']
+ a2enmod_binary = module.get_bin_path("a2enmod")
+ result, stdout, stderr = module.run_command("%s -q %s" % (a2enmod_binary, name))
+ return result == 0
+
+def _module_is_disabled(module):
+ return _module_is_enabled(module) == False
+
+def _disable_module(module):
+ name = module.params['name']
+
+ if _module_is_disabled(module):
+ module.exit_json(changed = False, result = "Success")
+
+ result, stdout, stderr = module.run_command("a2dismod %s" % name)
+ if result != 0:
+ module.fail_json(msg="Failed to disable module %s: %s" % (name, stdout))
+
+ module.exit_json(changed = True, result = "Disabled")
+
+def _enable_module(module):
+ name = module.params['name']
+
+ if _module_is_enabled(module):
+ module.exit_json(changed = False, result = "Success")
+
+ a2enmod_binary = module.get_bin_path("a2enmod")
+ result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name))
+ if result != 0:
+ module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout))
+
+ module.exit_json(changed = True, result = "Enabled")
+
+def main():
+ module = AnsibleModule(
+ argument_spec = dict(
+ name = dict(required=True),
+ state = dict(default='present', choices=['absent', 'present'])
+ ),
+ )
+
+ if module.params['state'] == 'present':
+ _enable_module(module)
+
+ if module.params['state'] == 'absent':
+ _disable_module(module)
+
+# import module snippets
+from ansible.module_utils.basic import *
+main()
+
diff --git a/web_infrastructure/django_manage b/web_infrastructure/django_manage
index 68eb92c1bfe..42ce3781fda 100644
--- a/web_infrastructure/django_manage
+++ b/web_infrastructure/django_manage
@@ -74,14 +74,17 @@ options:
description:
- Will skip over out-of-order missing migrations, you can only use this parameter with I(migrate)
required: false
+ version_added: "1.3"
merge:
description:
- Will run out-of-order or missing migrations as they are not rollback migrations, you can only use this parameter with 'migrate' command
required: false
+ version_added: "1.3"
link:
description:
- Will create links to the files instead of copying them, you can only use this parameter with 'collectstatic' command
required: false
+ version_added: "1.3"
notes:
- I(virtualenv) (U(http://www.virtualenv.org)) must be installed on the remote host if the virtualenv parameter is specified.
- This module will create a virtualenv if the virtualenv parameter is specified and a virtualenv does not already exist at the given location.
@@ -203,13 +206,13 @@ def main():
apps = dict(default=None, required=False),
cache_table = dict(default=None, required=False),
database = dict(default=None, required=False),
- failfast = dict(default='no', required=False, choices=BOOLEANS, aliases=['fail_fast']),
+ failfast = dict(default='no', required=False, type='bool', aliases=['fail_fast']),
fixtures = dict(default=None, required=False),
liveserver = dict(default=None, required=False, aliases=['live_server']),
testrunner = dict(default=None, required=False, aliases=['test_runner']),
- skip = dict(default=None, required=False, choices=BOOLEANS),
- merge = dict(default=None, required=False, choices=BOOLEANS),
- link = dict(default=None, required=False, choices=BOOLEANS),
+ skip = dict(default=None, required=False, type='bool'),
+ merge = dict(default=None, required=False, type='bool'),
+ link = dict(default=None, required=False, type='bool'),
),
)
@@ -232,7 +235,6 @@ def main():
_ensure_virtualenv(module)
- os.chdir(app_path)
cmd = "python manage.py %s" % (command, )
if command in noinput_commands:
@@ -251,7 +253,7 @@ def main():
if module.params[param]:
cmd = '%s %s' % (cmd, module.params[param])
- rc, out, err = module.run_command(cmd)
+ rc, out, err = module.run_command(cmd, cwd=app_path)
if rc != 0:
if command == 'createcachetable' and 'table' in err and 'already exists' in err:
out = 'Already exists.'
diff --git a/web_infrastructure/jira b/web_infrastructure/jira
new file mode 100644
index 00000000000..950fc3dbfcf
--- /dev/null
+++ b/web_infrastructure/jira
@@ -0,0 +1,347 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# (c) 2014, Steve Smith
+# Atlassian open-source approval reference OSR-76.
+#
+# This file is part of Ansible.
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+#
+
+DOCUMENTATION = """
+module: jira
+version_added: "1.6"
+short_description: create and modify issues in a JIRA instance
+description:
+ - Create and modify issues in a JIRA instance.
+
+options:
+ uri:
+ required: true
+ description:
+ - Base URI for the JIRA instance
+
+ operation:
+ required: true
+ aliases: [ command ]
+ choices: [ create, comment, edit, fetch, transition ]
+ description:
+ - The operation to perform.
+
+ username:
+ required: true
+ description:
+ - The username to log-in with.
+
+ password:
+ required: true
+ description:
+ - The password to log-in with.
+
+ project:
+ aliases: [ prj ]
+ required: false
+ description:
+ - The project for this operation. Required for issue creation.
+
+ summary:
+ required: false
+ description:
+ - The issue summary, where appropriate.
+
+ description:
+ required: false
+ description:
+ - The issue description, where appropriate.
+
+ issuetype:
+ required: false
+ description:
+ - The issue type, for issue creation.
+
+ issue:
+ required: false
+ description:
+ - An existing issue key to operate on.
+
+ comment:
+ required: false
+ description:
+ - The comment text to add.
+
+ status:
+ required: false
+ description:
+ - The desired status; only relevant for the transition operation.
+
+ assignee:
+ required: false
+ description:
+ - Sets the assignee on create or transition operations. Note not all transitions will allow this.
+
+ fields:
+ required: false
+ description:
+ - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly after merging with other required data, as when passed to create). See examples for more information, and the JIRA REST API for the structure required for various fields.
+
+notes:
+ - "Currently this only works with basic-auth."
+
+author: Steve Smith
+"""
+
+EXAMPLES = """
+# Create a new issue and add a comment to it:
+- name: Create an issue
+ jira: uri={{server}} username={{user}} password={{pass}}
+ project=ANS operation=create
+ summary="Example Issue" description="Created using Ansible" issuetype=Task
+ register: issue
+
+- name: Comment on issue
+ jira: uri={{server}} username={{user}} password={{pass}}
+ issue={{issue.meta.key}} operation=comment
+ comment="A comment added by Ansible"
+
+# Assign an existing issue using edit
+- name: Assign an issue using free-form fields
+ jira: uri={{server}} username={{user}} password={{pass}}
+ issue={{issue.meta.key}} operation=edit
+ assignee=ssmith
+
+# Create an issue with an existing assignee
+- name: Create an assigned issue
+ jira: uri={{server}} username={{user}} password={{pass}}
+ project=ANS operation=create
+ summary="Assigned issue" description="Created and assigned using Ansible"
+ issuetype=Task assignee=ssmith
+
+# Edit an issue using free-form fields
+- name: Set the labels on an issue using free-form fields
+ jira: uri={{server}} username={{user}} password={{pass}}
+ issue={{issue.meta.key}} operation=edit
+ args: { fields: {labels: ["autocreated", "ansible"]}}
+
+- name: Set the labels on an issue, YAML version
+ jira: uri={{server}} username={{user}} password={{pass}}
+ issue={{issue.meta.key}} operation=edit
+ args:
+ fields:
+ labels:
+ - "autocreated"
+ - "ansible"
+ - "yaml"
+
+# Retrieve metadata for an issue and use it to create an account
+- name: Get an issue
+ jira: uri={{server}} username={{user}} password={{pass}}
+ project=ANS operation=fetch issue="ANS-63"
+ register: issue
+
+- name: Create a unix account for the reporter
+ sudo: true
+ user: name="{{issue.meta.fields.creator.name}}" comment="{{issue.meta.fields.creator.displayName}}"
+
+# Transition an issue by target status
+- name: Close the issue
+ jira: uri={{server}} username={{user}} password={{pass}}
+ issue={{issue.meta.key}} operation=transition status="Done"
+"""
+
+import json
+import base64
+
+def request(url, user, passwd, data=None, method=None):
+ if data:
+ data = json.dumps(data)
+
+ # NOTE: fetch_url uses a password manager, which follows the
+ # standard request-then-challenge basic-auth semantics. However as
+ # JIRA allows some unauthorised operations it doesn't necessarily
+ # send the challenge, so the request occurs as the anonymous user,
+ # resulting in unexpected results. To work around this we manually
+ # inject the basic-auth header up-front to ensure that JIRA treats
+ # the requests as authorized for this user.
+ auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
+ response, info = fetch_url(module, url, data=data, method=method,
+ headers={'Content-Type':'application/json',
+ 'Authorization':"Basic %s" % auth})
+
+ if info['status'] not in (200, 204):
+ module.fail_json(msg=info['msg'])
+
+ body = response.read()
+
+ if body:
+ return json.loads(body)
+ else:
+ return {}
+
+def post(url, user, passwd, data):
+ return request(url, user, passwd, data=data, method='POST')
+
+def put(url, user, passwd, data):
+ return request(url, user, passwd, data=data, method='PUT')
+
+def get(url, user, passwd):
+ return request(url, user, passwd)
+
+
+def create(restbase, user, passwd, params):
+ createfields = {
+ 'project': { 'key': params['project'] },
+ 'summary': params['summary'],
+ 'description': params['description'],
+ 'issuetype': { 'name': params['issuetype'] }}
+
+ # Merge in any additional or overridden fields
+ if params['fields']:
+ createfields.update(params['fields'])
+
+ data = {'fields': createfields}
+
+ url = restbase + '/issue/'
+
+ ret = post(url, user, passwd, data)
+
+ return ret
+
+
+def comment(restbase, user, passwd, params):
+ data = {
+ 'body': params['comment']
+ }
+
+ url = restbase + '/issue/' + params['issue'] + '/comment'
+
+ ret = post(url, user, passwd, data)
+
+ return ret
+
+
+def edit(restbase, user, passwd, params):
+ data = {
+ 'fields': params['fields']
+ }
+
+ url = restbase + '/issue/' + params['issue']
+
+ ret = put(url, user, passwd, data)
+
+ return ret
+
+
+def fetch(restbase, user, passwd, params):
+ url = restbase + '/issue/' + params['issue']
+ ret = get(url, user, passwd)
+ return ret
+
+
+def transition(restbase, user, passwd, params):
+ # Find the transition id
+ turl = restbase + '/issue/' + params['issue'] + "/transitions"
+ tmeta = get(turl, user, passwd)
+
+ target = params['status']
+ tid = None
+ for t in tmeta['transitions']:
+ if t['name'] == target:
+ tid = t['id']
+ break
+
+ if not tid:
+ raise ValueError("Failed find valid transition for '%s'" % target)
+
+ # Perform it
+ url = restbase + '/issue/' + params['issue'] + "/transitions"
+ data = { 'transition': { "id" : tid },
+ 'fields': params['fields']}
+
+ ret = post(url, user, passwd, data)
+
+ return ret
+
+
+# Some parameters are required depending on the operation:
+OP_REQUIRED = dict(create=['project', 'issuetype', 'summary', 'description'],
+ comment=['issue', 'comment'],
+ edit=[],
+ fetch=['issue'],
+ transition=['status'])
+
+def main():
+
+ global module
+ module = AnsibleModule(
+ argument_spec=dict(
+ uri=dict(required=True),
+ operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition'],
+ aliases=['command'], required=True),
+ username=dict(required=True),
+ password=dict(required=True),
+ project=dict(),
+ summary=dict(),
+ description=dict(),
+ issuetype=dict(),
+ issue=dict(aliases=['ticket']),
+ comment=dict(),
+ status=dict(),
+ assignee=dict(),
+ fields=dict(default={})
+ ),
+ supports_check_mode=False
+ )
+
+ op = module.params['operation']
+
+ # Check we have the necessary per-operation parameters
+ missing = []
+ for parm in OP_REQUIRED[op]:
+ if not module.params[parm]:
+ missing.append(parm)
+ if missing:
+ module.fail_json(msg="Operation %s require the following missing parameters: %s" % (op, ",".join(missing)))
+
+ # Handle rest of parameters
+ uri = module.params['uri']
+ user = module.params['username']
+ passwd = module.params['password']
+ if module.params['assignee']:
+ module.params['fields']['assignee'] = { 'name': module.params['assignee'] }
+
+ if not uri.endswith('/'):
+ uri = uri+'/'
+ restbase = uri + 'rest/api/2'
+
+ # Dispatch
+ try:
+
+ # Lookup the corresponding method for this operation. This is
+ # safe as the AnsibleModule should remove any unknown operations.
+ thismod = sys.modules[__name__]
+ method = getattr(thismod, op)
+
+ ret = method(restbase, user, passwd, module.params)
+
+ except Exception as e:
+ return module.fail_json(msg=e.message)
+
+
+ module.exit_json(changed=True, meta=ret)
+
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.urls import *
+main()
diff --git a/web_infrastructure/supervisorctl b/web_infrastructure/supervisorctl
index 564368af5f4..2d458169e76 100644
--- a/web_infrastructure/supervisorctl
+++ b/web_infrastructure/supervisorctl
@@ -23,70 +23,76 @@ import os
DOCUMENTATION = '''
---
module: supervisorctl
-short_description: Manage the state of a program or group of programs running via Supervisord
+short_description: Manage the state of a program or group of programs running via supervisord
description:
- - Manage the state of a program or group of programs running via I(Supervisord)
+ - Manage the state of a program or group of programs running via supervisord
version_added: "0.7"
options:
name:
description:
- - The name of the I(supervisord) program/process to manage
+ - The name of the supervisord program or group to manage.
+ - The name will be taken as group name when it ends with a colon I(:)
+ - Group support is only available in Ansible version 1.6 or later.
required: true
default: null
config:
description:
- - configuration file path, passed as -c to supervisorctl
+ - The supervisor configuration file path
required: false
default: null
version_added: "1.3"
server_url:
description:
- - URL on which supervisord server is listening, passed as -s to supervisorctl
+ - URL on which supervisord server is listening
required: false
default: null
version_added: "1.3"
username:
description:
- - username to use for authentication with server, passed as -u to supervisorctl
+ - username to use for authentication
required: false
default: null
version_added: "1.3"
password:
description:
- - password to use for authentication with server, passed as -p to supervisorctl
+ - password to use for authentication
required: false
default: null
version_added: "1.3"
state:
description:
- - The state of service
+ - The desired state of program/group.
required: true
default: null
choices: [ "present", "started", "stopped", "restarted" ]
supervisorctl_path:
description:
- - Path to supervisorctl executable to use
+ - path to supervisorctl executable
required: false
default: null
version_added: "1.4"
-requirements:
- - supervisorctl
-requirements: [ ]
-author: Matt Wright
+notes:
+ - When C(state) = I(present), the module will call C(supervisorctl reread) then C(supervisorctl add) if the program/group does not exist.
+ - When C(state) = I(restarted), the module will call C(supervisorctl update) then call C(supervisorctl restart).
+requirements: [ "supervisorctl" ]
+author: Matt Wright, Aaron Wang
'''
EXAMPLES = '''
# Manage the state of program to be in 'started' state.
- supervisorctl: name=my_app state=started
+# Manage the state of program group to be in 'started' state.
+- supervisorctl: name='my_apps:' state=started
+
# Restart my_app, reading supervisorctl configuration from a specified file.
- supervisorctl: name=my_app state=restarted config=/var/opt/my_project/supervisord.conf
# Restart my_app, connecting to supervisord with credentials and server URL.
- supervisorctl: name=my_app state=restarted username=test password=testpass server_url=http://localhost:9001
-
'''
+
def main():
arg_spec = dict(
name=dict(required=True),
@@ -101,6 +107,10 @@ def main():
module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True)
name = module.params['name']
+ is_group = False
+ if name.endswith(':'):
+ is_group = True
+ name = name.rstrip(':')
state = module.params['state']
config = module.params.get('config')
server_url = module.params.get('server_url')
@@ -111,11 +121,12 @@ def main():
if supervisorctl_path:
supervisorctl_path = os.path.expanduser(supervisorctl_path)
if os.path.exists(supervisorctl_path) and module.is_executable(supervisorctl_path):
- supervisorctl_args = [ supervisorctl_path ]
+ supervisorctl_args = [supervisorctl_path]
else:
- module.fail_json(msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path)
+ module.fail_json(
+ msg="Provided path to supervisorctl does not exist or isn't executable: %s" % supervisorctl_path)
else:
- supervisorctl_args = [ module.get_bin_path('supervisorctl', True) ]
+ supervisorctl_args = [module.get_bin_path('supervisorctl', True)]
if config:
supervisorctl_args.extend(['-c', os.path.expanduser(config)])
@@ -133,61 +144,76 @@ def main():
args.append(name)
return module.run_command(args, **kwargs)
- rc, out, err = run_supervisorctl('status')
- present = name in out
-
- if state == 'present':
- if not present:
- if module.check_mode:
- module.exit_json(changed=True)
- run_supervisorctl('reread', check_rc=True)
- rc, out, err = run_supervisorctl('add', name)
-
- if '%s: added process group' % name in out:
- module.exit_json(changed=True, name=name, state=state)
+ def get_matched_processes():
+ matched = []
+ rc, out, err = run_supervisorctl('status')
+ for line in out.splitlines():
+ # One status line may look like one of these two:
+ # process not in group:
+ # echo_date_lonely RUNNING pid 7680, uptime 13:22:18
+ # process in group:
+ # echo_date_group:echo_date_00 RUNNING pid 7681, uptime 13:22:18
+ fields = [field for field in line.split(' ') if field != '']
+ process_name = fields[0]
+ status = fields[1]
+
+ if is_group:
+ # If there is ':', this process must be in a group.
+ if ':' in process_name:
+ group = process_name.split(':')[0]
+ if group != name:
+ continue
+ else:
+ continue
else:
- module.fail_json(msg=out, name=name, state=state)
-
- module.exit_json(changed=False, name=name, state=state)
+ if process_name != name:
+ continue
- rc, out, err = run_supervisorctl('status', name)
- running = 'RUNNING' in out or '(already running)' in out
+ matched.append((process_name, status))
+ return matched
- if running and state == 'started':
- module.exit_json(changed=False, name=name, state=state)
+ def take_action_on_processes(processes, status_filter, action, expected_result):
+ to_take_action_on = []
+ for process_name, status in processes:
+ if status_filter(status):
+ to_take_action_on.append(process_name)
- if running and state == 'stopped':
+ if len(to_take_action_on) == 0:
+ module.exit_json(changed=False, name=name, state=state)
if module.check_mode:
module.exit_json(changed=True)
- rc, out, err = run_supervisorctl('stop', name)
-
- if '%s: stopped' % name in out:
- module.exit_json(changed=True, name=name, state=state)
+ for process_name in to_take_action_on:
+ rc, out, err = run_supervisorctl(action, process_name)
+ if '%s: %s' % (process_name, expected_result) not in out:
+ module.fail_json(msg=out)
- module.fail_json(msg=out)
+ module.exit_json(changed=True, name=name, state=state, affected=to_take_action_on)
- elif state == 'restarted':
- if module.check_mode:
- module.exit_json(changed=True)
- rc, out, err = run_supervisorctl('update', name)
- rc, out, err = run_supervisorctl('restart', name)
+ if state == 'restarted':
+ rc, out, err = run_supervisorctl('update')
+ processes = get_matched_processes()
+ take_action_on_processes(processes, lambda s: True, 'restart', 'started')
- if '%s: started' % name in out:
- module.exit_json(changed=True, name=name, state=state)
+ processes = get_matched_processes()
- module.fail_json(msg=out)
+ if state == 'present':
+ if len(processes) > 0:
+ module.exit_json(changed=False, name=name, state=state)
- elif not running and state == 'started':
if module.check_mode:
module.exit_json(changed=True)
- rc, out, err = run_supervisorctl('start',name)
-
- if '%s: started' % name in out:
+ run_supervisorctl('reread', check_rc=True)
+ rc, out, err = run_supervisorctl('add', name)
+ if '%s: added process group' % name in out:
module.exit_json(changed=True, name=name, state=state)
+ else:
+ module.fail_json(msg=out, name=name, state=state)
- module.fail_json(msg=out)
+ if state == 'started':
+ take_action_on_processes(processes, lambda s: s != 'RUNNING', 'start', 'started')
- module.exit_json(changed=False, name=name, state=state)
+ if state == 'stopped':
+ take_action_on_processes(processes, lambda s: s == 'RUNNING', 'stop', 'stopped')
# import module snippets
from ansible.module_utils.basic import *