diff --git a/cloud/rax_scaling_group b/cloud/rax_scaling_group
new file mode 100644
index 00000000000..84ebfb2756f
--- /dev/null
+++ b/cloud/rax_scaling_group
@@ -0,0 +1,403 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_scaling_group
+short_description: Manipulate Rackspace Cloud Autoscale Groups
+description:
+ - Manipulate Rackspace Cloud Autoscale Groups
+version_added: 1.7
+options:
+ cooldown:
+ description:
+ - The period of time, in seconds, that must pass before any scaling can
+ occur after the previous scaling. Must be an integer between 0 and
+ 86400 (24 hrs).
+ disk_config:
+ description:
+ - Disk partitioning strategy
+ choices:
+ - auto
+ - manual
+ default: auto
+ files:
+ description:
+ - 'Files to insert into the instance. Hash of C(remotepath: localpath)'
+ default: null
+ flavor:
+ description:
+ - flavor to use for the instance
+ required: true
+ image:
+ description:
+ - image to use for the instance. Can be an C(id), C(human_id) or C(name)
+ required: true
+ key_name:
+ description:
+ - key pair to use on the instance
+ default: null
+ loadbalancers:
+ description:
+ - List of load balancer C(id) and C(port) hashes
+ max_entities:
+ description:
+ - The maximum number of entities that are allowed in the scaling group.
+ Must be an integer between 0 and 1000.
+ required: true
+ meta:
+ description:
+ - A hash of metadata to associate with the instance
+ default: null
+ min_entities:
+ description:
+ - The minimum number of entities that are allowed in the scaling group.
+ Must be an integer between 0 and 1000.
+ required: true
+ name:
+ description:
+ - Name to give the scaling group
+ required: true
+ networks:
+ description:
+ - The network to attach to the instances. If specified, you must include
+ ALL networks including the public and private interfaces. Can be C(id)
+ or C(label).
+ default:
+ - public
+ - private
+ server_name:
+ description:
+ - The base name for servers created by Autoscale
+ required: true
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author: Matt Martz
+extends_documentation_fragment: rackspace
+'''
+
+EXAMPLES = '''
+---
+- hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - rax_scaling_group:
+ credentials: ~/.raxpub
+ region: ORD
+ cooldown: 300
+ flavor: performance1-1
+ image: bb02b1a3-bc77-4d17-ab5b-421d89850fca
+ min_entities: 5
+ max_entities: 10
+ name: ASG Test
+ server_name: asgtest
+ loadbalancers:
+ - id: 228385
+ port: 80
+ register: asg
+'''
+
+import os
+
+from uuid import UUID
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
+PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
+SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
+
+
+def asg_to_dict(obj):
+ instance = {}
+ for key in dir(obj):
+ value = getattr(obj, key)
+ if key == 'policies' and isinstance(value, list):
+ policies = []
+ for policy in value:
+ policies.append(asg_to_dict(policy))
+ instance[key] = policies
+ elif (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
+ instance[key] = value
+ return instance
+
+
+def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
+ image=None, key_name=None, loadbalancers=[], meta={},
+ min_entities=0, max_entities=0, name=None, networks=[],
+ server_name=None, state='present'):
+ changed = False
+
+ au = pyrax.autoscale
+ cnw = pyrax.cloud_networks
+ if not au or not cnw:
+ module.fail_json(msg='Failed to instantiate clients. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ if state == 'present':
+ # Normalize and ensure all metadata values are strings
+ if meta:
+ for k, v in meta.items():
+ if isinstance(v, list):
+ meta[k] = ','.join(['%s' % i for i in v])
+ elif isinstance(v, dict):
+ meta[k] = json.dumps(v)
+ elif not isinstance(v, basestring):
+ meta[k] = '%s' % v
+
+ # Check if the provided image is a UUID and if not, search for an
+ # appropriate image using human_id and name
+ if image:
+ try:
+ UUID(image)
+ except ValueError:
+ try:
+ image = cs.images.find(human_id=image)
+ except(cs.exceptions.NotFound,
+ cs.exceptions.NoUniqueMatch):
+ try:
+ image = cs.images.find(name=image)
+ except (cs.exceptions.NotFound,
+ cs.exceptions.NoUniqueMatch):
+ module.fail_json(msg='No matching image found (%s)' %
+ image)
+
+ image = pyrax.utils.get_id(image)
+
+ # Check if the provided network is a UUID and if not, search for an
+ # appropriate network using label
+ nics = []
+ if networks:
+ for network in networks:
+ try:
+ UUID(network)
+ except ValueError:
+ if network.lower() == 'public':
+ nics.extend(cnw.get_server_networks(PUBLIC_NET_ID))
+ elif network.lower() == 'private':
+ nics.extend(cnw.get_server_networks(SERVICE_NET_ID))
+ else:
+ try:
+ network_obj = cnw.find_network_by_label(network)
+ except (pyrax.exceptions.NetworkNotFound,
+ pyrax.exceptions.NetworkLabelNotUnique):
+ module.fail_json(msg='No matching network found '
+ '(%s)' % network)
+ else:
+ nics.extend(cnw.get_server_networks(network_obj))
+ else:
+ nics.extend(cnw.get_server_networks(network))
+
+ for nic in nics:
+ nic.update(uuid=nic['net-id'])
+ del nic['net-id']
+
+ # Handle the file contents
+ personality = []
+ if files:
+ for rpath in files.keys():
+ lpath = os.path.expanduser(files[rpath])
+ try:
+ f = open(lpath, 'r')
+ personality.append({
+ 'path': rpath,
+ 'contents': f.read()
+ })
+ f.close()
+ except Exception, e:
+ module.fail_json(msg='Failed to load %s' % lpath)
+
+ lbs = []
+ if loadbalancers:
+ for lb in loadbalancers:
+ lb_id = lb.get('id')
+ port = lb.get('port')
+ if not lb_id or not port:
+ continue
+ lbs.append((lb_id, port))
+
+ try:
+ sg = au.find(name=name)
+ except pyrax.exceptions.NoUniqueMatch, e:
+ module.fail_json(msg='%s' % e.message)
+ except pyrax.exceptions.NotFound:
+ try:
+ sg = au.create(name, cooldown=cooldown,
+ min_entities=min_entities,
+ max_entities=max_entities,
+ launch_config_type='launch_server',
+ server_name=server_name, image=image,
+ flavor=flavor, disk_config=disk_config,
+ metadata=meta, personality=files,
+ networks=nics, load_balancers=lbs,
+ key_name=key_name)
+ changed = True
+ except Exception, e:
+ module.fail_json(msg='%s' % e.message)
+
+ if not changed:
+ # Scaling Group Updates
+ group_args = {}
+ if cooldown != sg.cooldown:
+ group_args['cooldown'] = cooldown
+
+ if min_entities != sg.min_entities:
+ group_args['min_entities'] = min_entities
+
+ if max_entities != sg.max_entities:
+ group_args['max_entities'] = max_entities
+
+ if group_args:
+ changed = True
+ sg.update(**group_args)
+
+ # Launch Configuration Updates
+ lc = sg.get_launch_config()
+ lc_args = {}
+ if server_name != lc.get('name'):
+ lc_args['name'] = server_name
+
+ if image != lc.get('image'):
+ lc_args['image'] = image
+
+ if flavor != lc.get('flavor'):
+ lc_args['flavor'] = flavor
+
+ if disk_config != lc.get('disk_config'):
+ lc_args['disk_config'] = disk_config
+
+ if meta != lc.get('metadata'):
+ lc_args['metadata'] = meta
+
+ if files != lc.get('personality'):
+ lc_args['personality'] = files
+
+ if nics != lc.get('networks'):
+ lc_args['networks'] = nics
+
+ if lbs != lc.get('load_balancers'):
+ # Work around for https://github.com/rackspace/pyrax/pull/393
+ lc_args['load_balancers'] = sg.manager._resolve_lbs(lbs)
+
+ if key_name != lc.get('key_name'):
+ lc_args['key_name'] = key_name
+
+ if lc_args:
+ # Work around for https://github.com/rackspace/pyrax/pull/389
+ if 'flavor' not in lc_args:
+ lc_args['flavor'] = lc.get('flavor')
+ changed = True
+ sg.update_launch_config(**lc_args)
+
+ sg.get()
+
+ module.exit_json(changed=changed, autoscale_group=asg_to_dict(sg))
+
+ else:
+ try:
+ sg = au.find(name=name)
+ sg.delete()
+ changed = True
+ except pyrax.exceptions.NotFound, e:
+ sg = {}
+ except Exception, e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, autoscale_group=asg_to_dict(sg))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ cooldown=dict(type='int', default=300),
+ disk_config=dict(choices=['auto', 'manual']),
+ files=dict(type='list', default=[]),
+ flavor=dict(required=True),
+ image=dict(required=True),
+ key_name=dict(),
+ loadbalancers=dict(type='list'),
+ meta=dict(type='dict', default={}),
+ min_entities=dict(type='int', required=True),
+ max_entities=dict(type='int', required=True),
+ name=dict(required=True),
+ networks=dict(type='list', default=['public', 'private']),
+ server_name=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ cooldown = module.params.get('cooldown')
+ disk_config = module.params.get('disk_config')
+ if disk_config:
+ disk_config = disk_config.upper()
+ files = module.params.get('files')
+ flavor = module.params.get('flavor')
+ image = module.params.get('image')
+ key_name = module.params.get('key_name')
+ loadbalancers = module.params.get('loadbalancers')
+ meta = module.params.get('meta')
+ min_entities = module.params.get('min_entities')
+ max_entities = module.params.get('max_entities')
+ name = module.params.get('name')
+ networks = module.params.get('networks')
+ server_name = module.params.get('server_name')
+ state = module.params.get('state')
+
+ if not 0 <= min_entities <= 1000 or not 0 <= max_entities <= 1000:
+ module.fail_json(msg='min_entities and max_entities must be an '
+ 'integer between 0 and 1000')
+
+ if not 0 <= cooldown <= 86400:
+ module.fail_json(msg='cooldown must be an integer between 0 and 86400')
+
+ setup_rax_module(module, pyrax)
+
+ rax_asg(module, cooldown=cooldown, disk_config=disk_config,
+ files=files, flavor=flavor, image=image, meta=meta,
+ key_name=key_name, loadbalancers=loadbalancers,
+ min_entities=min_entities, max_entities=max_entities,
+ name=name, networks=networks, server_name=server_name,
+ state=state)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# invoke the module
+main()
diff --git a/cloud/rax_scaling_policy b/cloud/rax_scaling_policy
new file mode 100644
index 00000000000..19174ba370c
--- /dev/null
+++ b/cloud/rax_scaling_policy
@@ -0,0 +1,298 @@
+#!/usr/bin/python
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see .
+
+# This is a DOCUMENTATION stub specific to this module, it extends
+# a documentation fragment located in ansible.utils.module_docs_fragments
+DOCUMENTATION = '''
+---
+module: rax_scaling_policy
+short_description: Manipulate Rackspace Cloud Autoscale Scaling Policy
+description:
+ - Manipulate Rackspace Cloud Autoscale Scaling Policy
+version_added: 1.7
+options:
+ at:
+ description:
+ - The UTC time when this policy will be executed. The time must be
+ formatted according to C(yyyy-MM-dd'T'HH:mm:ss.SSS) such as
+ C(2013-05-19T08:07:08Z)
+ change:
+ description:
+ - The change, either as a number of servers or as a percentage, to make
+ in the scaling group. If this is a percentage, you must set
+ I(is_percent) to C(true) also.
+ cron:
+ description:
+ - The time when the policy will be executed, as a cron entry. For
+ example, if this is parameter is set to C(1 0 * * *)
+ cooldown:
+ description:
+ - The period of time, in seconds, that must pass before any scaling can
+ occur after the previous scaling. Must be an integer between 0 and
+ 86400 (24 hrs).
+ desired_capacity:
+ description:
+ - The desired server capacity of the scaling the group; that is, how
+ many servers should be in the scaling group.
+ is_percent:
+ description:
+ - Whether the value in I(change) is a percent value
+ default: false
+ name:
+ description:
+ - Name to give the policy
+ required: true
+ policy_type:
+ description:
+ - The type of policy that will be executed for the current release.
+ choices:
+ - webhook
+ - schedule
+ required: true
+ scaling_group:
+ description:
+ - Name of the scaling group that this policy will be added to
+ required: true
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices:
+ - present
+ - absent
+ default: present
+author: Matt Martz
+extends_documentation_fragment: rackspace
+'''
+
+EXAMPLES = '''
+---
+- hosts: localhost
+ gather_facts: false
+ connection: local
+ tasks:
+ - rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ at: '2013-05-19T08:07:08Z'
+ change: 25
+ cooldown: 300
+ is_percent: true
+ name: ASG Test Policy - at
+ policy_type: schedule
+ scaling_group: ASG Test
+ register: asps_at
+
+ - rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ cron: '1 0 * * *'
+ change: 25
+ cooldown: 300
+ is_percent: true
+ name: ASG Test Policy - cron
+ policy_type: schedule
+ scaling_group: ASG Test
+ register: asp_cron
+
+ - rax_scaling_policy:
+ credentials: ~/.raxpub
+ region: ORD
+ cooldown: 300
+ desired_capacity: 5
+ name: ASG Test Policy - webhook
+ policy_type: webhook
+ scaling_group: ASG Test
+ register: asp_webhook
+'''
+
+from uuid import UUID
+
+try:
+ import pyrax
+ HAS_PYRAX = True
+except ImportError:
+ HAS_PYRAX = False
+
+NON_CALLABLES = (basestring, bool, dict, int, list, type(None))
+PUBLIC_NET_ID = "00000000-0000-0000-0000-000000000000"
+SERVICE_NET_ID = "11111111-1111-1111-1111-111111111111"
+
+
+def to_dict(obj):
+ instance = {}
+ for key in dir(obj):
+ value = getattr(obj, key)
+ if (isinstance(value, NON_CALLABLES) and not key.startswith('_')):
+ instance[key] = value
+ return instance
+
+
+def rax_asp(module, at=None, change=0, cron=None, cooldown=300,
+ desired_capacity=0, is_percent=False, name=None,
+ policy_type=None, scaling_group=None, state='present'):
+ changed = False
+
+ au = pyrax.autoscale
+ if not au:
+ module.fail_json(msg='Failed to instantiate client. This '
+ 'typically indicates an invalid region or an '
+ 'incorrectly capitalized region name.')
+
+ try:
+ UUID(scaling_group)
+ except ValueError:
+ try:
+ sg = au.find(name=scaling_group)
+ except Exception, e:
+ module.fail_json(msg='%s' % e.message)
+ else:
+ try:
+ sg = au.get(scaling_group)
+ except Exception, e:
+ module.fail_json(msg='%s' % e.message)
+
+ if state == 'present':
+ policies = filter(lambda p: name == p.name, sg.list_policies())
+ if len(policies) > 1:
+ module.fail_json(msg='No unique policy match found by name')
+ if at:
+ args = dict(at=at)
+ elif cron:
+ args = dict(cron=cron)
+ else:
+ args = None
+
+ if not policies:
+ try:
+ policy = sg.add_policy(name, policy_type=policy_type,
+ cooldown=cooldown, change=change,
+ is_percent=is_percent,
+ desired_capacity=desired_capacity,
+ args=args)
+ changed = True
+ except Exception, e:
+ module.fail_json(msg='%s' % e.message)
+
+ else:
+ policy = policies[0]
+ kwargs = {}
+ if policy_type != policy.type:
+ kwargs['policy_type'] = policy_type
+
+ if cooldown != policy.cooldown:
+ kwargs['cooldown'] = cooldown
+
+ if hasattr(policy, 'change') and change != policy.change:
+ kwargs['change'] = change
+
+ if hasattr(policy, 'changePercent') and is_percent is False:
+ kwargs['change'] = change
+ kwargs['is_percent'] = False
+ elif hasattr(policy, 'change') and is_percent is True:
+ kwargs['change'] = change
+ kwargs['is_percent'] = True
+
+ if hasattr(policy, 'desiredCapacity') and change:
+ kwargs['change'] = change
+ elif ((hasattr(policy, 'change') or
+ hasattr(policy, 'changePercent')) and desired_capacity):
+ kwargs['desired_capacity'] = desired_capacity
+
+ if hasattr(policy, 'args') and args != policy.args:
+ kwargs['args'] = args
+
+ if kwargs:
+ policy.update(**kwargs)
+ changed = True
+
+ policy.get()
+
+ module.exit_json(changed=changed, autoscale_policy=to_dict(policy))
+
+ else:
+ try:
+ policies = filter(lambda p: name == p.name, sg.list_policies())
+ if len(policies) > 1:
+ module.fail_json(msg='No unique policy match found by name')
+ elif not policies:
+ policy = {}
+ else:
+ policy.delete()
+ changed = True
+ except Exception, e:
+ module.fail_json(msg='%s' % e.message)
+
+ module.exit_json(changed=changed, autoscale_policy=to_dict(policy))
+
+
+def main():
+ argument_spec = rax_argument_spec()
+ argument_spec.update(
+ dict(
+ at=dict(),
+ change=dict(type='int'),
+ cron=dict(),
+ cooldown=dict(type='int', default=300),
+ desired_capacity=dict(type='int'),
+ is_percent=dict(type='bool', default=False),
+ name=dict(required=True),
+ policy_type=dict(required=True, choices=['webhook', 'schedule']),
+ scaling_group=dict(required=True),
+ state=dict(default='present', choices=['present', 'absent']),
+ )
+ )
+
+ module = AnsibleModule(
+ argument_spec=argument_spec,
+ required_together=rax_required_together(),
+ mutually_exclusive=[
+ ['cron', 'at'],
+ ['change', 'desired_capacity'],
+ ]
+ )
+
+ if not HAS_PYRAX:
+ module.fail_json(msg='pyrax is required for this module')
+
+ at = module.params.get('at')
+ change = module.params.get('change')
+ cron = module.params.get('cron')
+ cooldown = module.params.get('cooldown')
+ desired_capacity = module.params.get('desired_capacity')
+ is_percent = module.params.get('is_percent')
+ name = module.params.get('name')
+ policy_type = module.params.get('policy_type')
+ scaling_group = module.params.get('scaling_group')
+ state = module.params.get('state')
+
+ if (at or cron) and policy_type == 'webhook':
+ module.fail_json(msg='policy_type=schedule is required for a time '
+ 'based policy')
+
+ setup_rax_module(module, pyrax)
+
+ rax_asp(module, at=at, change=change, cron=cron, cooldown=cooldown,
+ desired_capacity=desired_capacity, is_percent=is_percent,
+ name=name, policy_type=policy_type, scaling_group=scaling_group,
+ state=state)
+
+
+# import module snippets
+from ansible.module_utils.basic import *
+from ansible.module_utils.rax import *
+
+# invoke the module
+main()