mirror of https://github.com/ansible/ansible.git
Merge branch 'devel' of github.com:ansible/ansible into devel
Conflicts: library/monitoring/pagerdutypull/6301/head
commit
265d9adbb9
@ -0,0 +1,180 @@
|
||||
'\" t
|
||||
.\" Title: ansible-galaxy
|
||||
.\" Author: [see the "AUTHOR" section]
|
||||
.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
|
||||
.\" Date: 03/16/2014
|
||||
.\" Manual: System administration commands
|
||||
.\" Source: Ansible 1.6
|
||||
.\" Language: English
|
||||
.\"
|
||||
.TH "ANSIBLE\-GALAXY" "1" "03/16/2014" "Ansible 1\&.6" "System administration commands"
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * Define some portability stuff
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.\" http://bugs.debian.org/507673
|
||||
.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
|
||||
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.ie \n(.g .ds Aq \(aq
|
||||
.el .ds Aq '
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * set default formatting
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" disable hyphenation
|
||||
.nh
|
||||
.\" disable justification (adjust text to left margin only)
|
||||
.ad l
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * MAIN CONTENT STARTS HERE *
|
||||
.\" -----------------------------------------------------------------
|
||||
.SH "NAME"
|
||||
ansible-galaxy \- manage roles using galaxy\&.ansible\&.com
|
||||
.SH "SYNOPSIS"
|
||||
.sp
|
||||
ansible\-galaxy [init|info|install|list|remove] [\-\-help] [options] \&...
|
||||
.SH "DESCRIPTION"
|
||||
.sp
|
||||
\fBAnsible Galaxy\fR is a shared repository for Ansible roles (added in ansible version 1\&.2)\&. The ansible\-galaxy command can be used to manage these roles, or by creating a skeleton framework for roles you\(cqd like to upload to Galaxy\&.
|
||||
.SH "COMMON OPTIONS"
|
||||
.PP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
.RS 4
|
||||
Show a help message related to the given sub\-command\&.
|
||||
.RE
|
||||
.SH "INSTALL"
|
||||
.sp
|
||||
The \fBinstall\fR sub\-command is used to install roles\&.
|
||||
.SS "USAGE"
|
||||
.sp
|
||||
$ ansible\-galaxy install [options] [\-r FILE | role_name(s)[,version] | tar_file(s)]
|
||||
.sp
|
||||
Roles can be installed in several different ways:
|
||||
.sp
|
||||
.RS 4
|
||||
.ie n \{\
|
||||
\h'-04'\(bu\h'+03'\c
|
||||
.\}
|
||||
.el \{\
|
||||
.sp -1
|
||||
.IP \(bu 2.3
|
||||
.\}
|
||||
A username\&.rolename[,version] \- this will install a single role\&. The Galaxy API will be contacted to provide the information about the role, and the corresponding \&.tar\&.gz will be downloaded from
|
||||
\fBgithub\&.com\fR\&. If the version is omitted, the most recent version available will be installed\&.
|
||||
.RE
|
||||
.sp
|
||||
.RS 4
|
||||
.ie n \{\
|
||||
\h'-04'\(bu\h'+03'\c
|
||||
.\}
|
||||
.el \{\
|
||||
.sp -1
|
||||
.IP \(bu 2.3
|
||||
.\}
|
||||
A file name, using
|
||||
\fB\-r\fR
|
||||
\- this will install multiple roles listed one per line\&. The format of each line is the same as above: username\&.rolename[,version]
|
||||
.RE
|
||||
.sp
|
||||
.RS 4
|
||||
.ie n \{\
|
||||
\h'-04'\(bu\h'+03'\c
|
||||
.\}
|
||||
.el \{\
|
||||
.sp -1
|
||||
.IP \(bu 2.3
|
||||
.\}
|
||||
A \&.tar\&.gz of a valid role you\(cqve downloaded directly from
|
||||
\fBgithub\&.com\fR\&. This is mainly useful when the system running Ansible does not have access to the Galaxy API, for instance when behind a firewall or proxy\&.
|
||||
.RE
|
||||
.SS "OPTIONS"
|
||||
.PP
|
||||
\fB\-f\fR, \fB\-\-force\fR
|
||||
.RS 4
|
||||
Force overwriting an existing role\&.
|
||||
.RE
|
||||
.PP
|
||||
\fB\-i\fR, \fB\-\-ignore\-errors\fR
|
||||
.RS 4
|
||||
Ignore errors and continue with the next specified role\&.
|
||||
.RE
|
||||
.PP
|
||||
\fB\-n\fR, \fB\-\-no\-deps\fR
|
||||
.RS 4
|
||||
Don\(cqt download roles listed as dependencies\&.
|
||||
.RE
|
||||
.PP
|
||||
\fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR
|
||||
.RS 4
|
||||
The path to the directory containing your roles\&. The default is the
|
||||
\fBroles_path\fR
|
||||
configured in your
|
||||
\fBansible\&.cfg\fR
|
||||
file (/etc/ansible/roles if not configured)
|
||||
.RE
|
||||
.PP
|
||||
\fB\-r\fR \fIROLE_FILE\fR, \fB\-\-role\-file=\fR\fIROLE_FILE\fR
|
||||
.RS 4
|
||||
A file containing a list of roles to be imported, as specified above\&. This option cannot be used if a rolename or \&.tar\&.gz have been specified\&.
|
||||
.RE
|
||||
.SH "REMOVE"
|
||||
.sp
|
||||
The \fBremove\fR sub\-command is used to remove one or more roles\&.
|
||||
.SS "USAGE"
|
||||
.sp
|
||||
$ ansible\-galaxy remove role1 role2 \&...
|
||||
.SS "OPTIONS"
|
||||
.PP
|
||||
\fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR
|
||||
.RS 4
|
||||
The path to the directory containing your roles\&. The default is the
|
||||
\fBroles_path\fR
|
||||
configured in your
|
||||
\fBansible\&.cfg\fR
|
||||
file (/etc/ansible/roles if not configured)
|
||||
.RE
|
||||
.SH "INIT"
|
||||
.sp
|
||||
The \fBinit\fR command is used to create an empty role suitable for uploading to https://galaxy\&.ansible\&.com (or for roles in general)\&.
|
||||
.SS "USAGE"
|
||||
.sp
|
||||
$ ansible\-galaxy init [options] role_name
|
||||
.SS "OPTIONS"
|
||||
.PP
|
||||
\fB\-f\fR, \fB\-\-force\fR
|
||||
.RS 4
|
||||
Force overwriting an existing role\&.
|
||||
.RE
|
||||
.PP
|
||||
\fB\-p\fR \fIINIT_PATH\fR, \fB\-\-init\-path=\fR\fIINIT_PATH\fR
|
||||
.RS 4
|
||||
The path in which the skeleton role will be created\&.The default is the current working directory\&.
|
||||
.RE
|
||||
.SH "LIST"
|
||||
.sp
|
||||
The \fBlist\fR sub\-command is used to show what roles are currently instaled\&. You can specify a role name, and if installed only that role will be shown\&.
|
||||
.SS "USAGE"
|
||||
.sp
|
||||
$ ansible\-galaxy list [role_name]
|
||||
.SS "OPTIONS"
|
||||
.PP
|
||||
\fB\-p\fR \fIROLES_PATH\fR, \fB\-\-roles\-path=\fR\fIROLES_PATH\fR
|
||||
.RS 4
|
||||
The path to the directory containing your roles\&. The default is the
|
||||
\fBroles_path\fR
|
||||
configured in your
|
||||
\fBansible\&.cfg\fR
|
||||
file (/etc/ansible/roles if not configured)
|
||||
.RE
|
||||
.SH "AUTHOR"
|
||||
.sp
|
||||
Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&.
|
||||
.SH "COPYRIGHT"
|
||||
.sp
|
||||
Copyright \(co 2014, Michael DeHaan
|
||||
.sp
|
||||
Ansible is released under the terms of the GPLv3 License\&.
|
||||
.SH "SEE ALSO"
|
||||
.sp
|
||||
\fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1)
|
||||
.sp
|
||||
Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible
|
@ -0,0 +1,167 @@
|
||||
ansible-galaxy(1)
|
||||
===================
|
||||
:doctype: manpage
|
||||
:man source: Ansible
|
||||
:man version: %VERSION%
|
||||
:man manual: System administration commands
|
||||
|
||||
NAME
|
||||
----
|
||||
ansible-galaxy - manage roles using galaxy.ansible.com
|
||||
|
||||
|
||||
SYNOPSIS
|
||||
--------
|
||||
ansible-galaxy [init|info|install|list|remove] [--help] [options] ...
|
||||
|
||||
|
||||
DESCRIPTION
|
||||
-----------
|
||||
|
||||
*Ansible Galaxy* is a shared repository for Ansible roles (added in
|
||||
ansible version 1.2). The ansible-galaxy command can be used to manage
|
||||
these roles, or by creating a skeleton framework for roles you'd like
|
||||
to upload to Galaxy.
|
||||
|
||||
COMMON OPTIONS
|
||||
--------------
|
||||
|
||||
*-h*, *--help*::
|
||||
|
||||
Show a help message related to the given sub-command.
|
||||
|
||||
|
||||
INSTALL
|
||||
-------
|
||||
|
||||
The *install* sub-command is used to install roles.
|
||||
|
||||
USAGE
|
||||
~~~~~
|
||||
|
||||
$ ansible-galaxy install [options] [-r FILE | role_name(s)[,version] | tar_file(s)]
|
||||
|
||||
Roles can be installed in several different ways:
|
||||
|
||||
* A username.rolename[,version] - this will install a single role. The Galaxy
|
||||
API will be contacted to provide the information about the role, and the
|
||||
corresponding .tar.gz will be downloaded from *github.com*. If the version
|
||||
is omitted, the most recent version available will be installed.
|
||||
|
||||
* A file name, using *-r* - this will install multiple roles listed one per
|
||||
line. The format of each line is the same as above: username.rolename[,version]
|
||||
|
||||
* A .tar.gz of a valid role you've downloaded directly from *github.com*. This
|
||||
is mainly useful when the system running Ansible does not have access to
|
||||
the Galaxy API, for instance when behind a firewall or proxy.
|
||||
|
||||
|
||||
OPTIONS
|
||||
~~~~~~~
|
||||
|
||||
*-f*, *--force*::
|
||||
|
||||
Force overwriting an existing role.
|
||||
|
||||
*-i*, *--ignore-errors*::
|
||||
|
||||
Ignore errors and continue with the next specified role.
|
||||
|
||||
*-n*, *--no-deps*::
|
||||
|
||||
Don't download roles listed as dependencies.
|
||||
|
||||
*-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH'::
|
||||
|
||||
The path to the directory containing your roles. The default is the *roles_path*
|
||||
configured in your *ansible.cfg* file (/etc/ansible/roles if not configured)
|
||||
|
||||
*-r* 'ROLE_FILE', *--role-file=*'ROLE_FILE'::
|
||||
|
||||
A file containing a list of roles to be imported, as specified above. This
|
||||
option cannot be used if a rolename or .tar.gz have been specified.
|
||||
|
||||
REMOVE
|
||||
------
|
||||
|
||||
The *remove* sub-command is used to remove one or more roles.
|
||||
|
||||
USAGE
|
||||
~~~~~
|
||||
|
||||
$ ansible-galaxy remove role1 role2 ...
|
||||
|
||||
OPTIONS
|
||||
~~~~~~~
|
||||
|
||||
*-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH'::
|
||||
|
||||
The path to the directory containing your roles. The default is the *roles_path*
|
||||
configured in your *ansible.cfg* file (/etc/ansible/roles if not configured)
|
||||
|
||||
INIT
|
||||
----
|
||||
|
||||
The *init* command is used to create an empty role suitable for uploading
|
||||
to https://galaxy.ansible.com (or for roles in general).
|
||||
|
||||
USAGE
|
||||
~~~~~
|
||||
|
||||
$ ansible-galaxy init [options] role_name
|
||||
|
||||
OPTIONS
|
||||
~~~~~~~
|
||||
|
||||
*-f*, *--force*::
|
||||
|
||||
Force overwriting an existing role.
|
||||
|
||||
*-p* 'INIT_PATH', *--init-path=*'INIT_PATH'::
|
||||
|
||||
The path in which the skeleton role will be created.The default is the current
|
||||
working directory.
|
||||
|
||||
LIST
|
||||
----
|
||||
|
||||
The *list* sub-command is used to show what roles are currently instaled.
|
||||
You can specify a role name, and if installed only that role will be shown.
|
||||
|
||||
USAGE
|
||||
~~~~~
|
||||
|
||||
$ ansible-galaxy list [role_name]
|
||||
|
||||
OPTIONS
|
||||
~~~~~~~
|
||||
|
||||
*-p* 'ROLES_PATH', *--roles-path=*'ROLES_PATH'::
|
||||
|
||||
The path to the directory containing your roles. The default is the *roles_path*
|
||||
configured in your *ansible.cfg* file (/etc/ansible/roles if not configured)
|
||||
|
||||
|
||||
AUTHOR
|
||||
------
|
||||
|
||||
Ansible was originally written by Michael DeHaan. See the AUTHORS file
|
||||
for a complete list of contributors.
|
||||
|
||||
|
||||
COPYRIGHT
|
||||
---------
|
||||
|
||||
Copyright © 2014, Michael DeHaan
|
||||
|
||||
Ansible is released under the terms of the GPLv3 License.
|
||||
|
||||
|
||||
SEE ALSO
|
||||
--------
|
||||
|
||||
*ansible*(1), *ansible-pull*(1), *ansible-doc*(1)
|
||||
|
||||
Extensive documentation is available in the documentation site:
|
||||
<http://docs.ansible.com>. IRC and mailing list info can be found
|
||||
in file CONTRIBUTING.md, available in: <https://github.com/ansible/ansible>
|
@ -0,0 +1,103 @@
|
||||
'\" t
|
||||
.\" Title: ansible-vault
|
||||
.\" Author: [see the "AUTHOR" section]
|
||||
.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
|
||||
.\" Date: 03/17/2014
|
||||
.\" Manual: System administration commands
|
||||
.\" Source: Ansible 1.6
|
||||
.\" Language: English
|
||||
.\"
|
||||
.TH "ANSIBLE\-VAULT" "1" "03/17/2014" "Ansible 1\&.6" "System administration commands"
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * Define some portability stuff
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.\" http://bugs.debian.org/507673
|
||||
.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
|
||||
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.ie \n(.g .ds Aq \(aq
|
||||
.el .ds Aq '
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * set default formatting
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" disable hyphenation
|
||||
.nh
|
||||
.\" disable justification (adjust text to left margin only)
|
||||
.ad l
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * MAIN CONTENT STARTS HERE *
|
||||
.\" -----------------------------------------------------------------
|
||||
.SH "NAME"
|
||||
ansible-vault \- manage encrypted YAML data\&.
|
||||
.SH "SYNOPSIS"
|
||||
.sp
|
||||
ansible\-vault [create|decrypt|edit|encrypt|rekey] [\-\-help] [options] file_name
|
||||
.SH "DESCRIPTION"
|
||||
.sp
|
||||
\fBansible\-vault\fR can encrypt any structured data file used by Ansible\&. This can include \fBgroup_vars/\fR or \fBhost_vars/\fR inventory variables, variables loaded by \fBinclude_vars\fR or \fBvars_files\fR, or variable files passed on the ansible\-playbook command line with \fB\-e @file\&.yml\fR or \fB\-e @file\&.json\fR\&. Role variables and defaults are also included!
|
||||
.sp
|
||||
Because Ansible tasks, handlers, and so on are also data, these can also be encrypted with vault\&. If you\(cqd like to not betray what variables you are even using, you can go as far to keep an individual task file entirely encrypted\&.
|
||||
.SH "COMMON OPTIONS"
|
||||
.sp
|
||||
The following options are available to all sub\-commands:
|
||||
.PP
|
||||
\fB\-\-vault\-password\-file=\fR\fIFILE\fR
|
||||
.RS 4
|
||||
A file containing the vault password to be used during the encryption/decryption steps\&. Be sure to keep this file secured if it is used\&.
|
||||
.RE
|
||||
.PP
|
||||
\fB\-h\fR, \fB\-\-help\fR
|
||||
.RS 4
|
||||
Show a help message related to the given sub\-command\&.
|
||||
.RE
|
||||
.PP
|
||||
\fB\-\-debug\fR
|
||||
.RS 4
|
||||
Enable debugging output for troubleshooting\&.
|
||||
.RE
|
||||
.SH "CREATE"
|
||||
.sp
|
||||
\fB$ ansible\-vault create [options] FILE\fR
|
||||
.sp
|
||||
The \fBcreate\fR sub\-command is used to initialize a new encrypted file\&.
|
||||
.sp
|
||||
First you will be prompted for a password\&. The password used with vault currently must be the same for all files you wish to use together at the same time\&.
|
||||
.sp
|
||||
After providing a password, the tool will launch whatever editor you have defined with $EDITOR, and defaults to vim\&. Once you are done with the editor session, the file will be saved as encrypted data\&.
|
||||
.sp
|
||||
The default cipher is AES (which is shared\-secret based)\&.
|
||||
.SH "EDIT"
|
||||
.sp
|
||||
\fB$ ansible\-vault edit [options] FILE\fR
|
||||
.sp
|
||||
The \fBedit\fR sub\-command is used to modify a file which was previously encrypted using ansible\-vault\&.
|
||||
.sp
|
||||
This command will decrypt the file to a temporary file and allow you to edit the file, saving it back when done and removing the temporary file\&.
|
||||
.SH "REKEY"
|
||||
.sp
|
||||
*$ ansible\-vault rekey [options] FILE_1 [FILE_2, \&..., FILE_N]
|
||||
.sp
|
||||
The \fBrekey\fR command is used to change the password on a vault\-encrypted files\&. This command can update multiple files at once, and will prompt for both the old and new passwords before modifying any data\&.
|
||||
.SH "ENCRYPT"
|
||||
.sp
|
||||
*$ ansible\-vault encrypt [options] FILE_1 [FILE_2, \&..., FILE_N]
|
||||
.sp
|
||||
The \fBencrypt\fR sub\-command is used to encrypt pre\-existing data files\&. As with the \fBrekey\fR command, you can specify multiple files in one command\&.
|
||||
.SH "DECRYPT"
|
||||
.sp
|
||||
*$ ansible\-vault decrypt [options] FILE_1 [FILE_2, \&..., FILE_N]
|
||||
.sp
|
||||
The \fBdecrypt\fR sub\-command is used to remove all encryption from data files\&. The files will be stored as plain\-text YAML once again, so be sure that you do not run this command on data files with active passwords or other sensitive data\&. In most cases, users will want to use the \fBedit\fR sub\-command to modify the files securely\&.
|
||||
.SH "AUTHOR"
|
||||
.sp
|
||||
Ansible was originally written by Michael DeHaan\&. See the AUTHORS file for a complete list of contributors\&.
|
||||
.SH "COPYRIGHT"
|
||||
.sp
|
||||
Copyright \(co 2014, Michael DeHaan
|
||||
.sp
|
||||
Ansible is released under the terms of the GPLv3 License\&.
|
||||
.SH "SEE ALSO"
|
||||
.sp
|
||||
\fBansible\fR(1), \fBansible\-pull\fR(1), \fBansible\-doc\fR(1)
|
||||
.sp
|
||||
Extensive documentation is available in the documentation site: http://docs\&.ansible\&.com\&. IRC and mailing list info can be found in file CONTRIBUTING\&.md, available in: https://github\&.com/ansible/ansible
|
@ -0,0 +1,245 @@
|
||||
Google Cloud Platform Guide
|
||||
===========================
|
||||
|
||||
.. gce_intro:
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
.. note:: This section of the documentation is under construction. We are in the process of adding more examples about all of the GCE modules and how they work together. Upgrades via github pull requests are welcomed!
|
||||
|
||||
Ansible contains modules for managing Google Compute Engine resources, including creating instances, controlling network access, working with persistent disks, and managing
|
||||
load balancers. Additionally, there is an inventory plugin that can automatically suck down all of your GCE instances into Ansible dynamic inventory, and create groups by tag and other properties.
|
||||
|
||||
The GCE modules all require the apache-libcloud module, which you can install from pip:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ pip install apache-libcloud
|
||||
|
||||
.. note:: If you're using Ansible on Mac OS X, libcloud also needs to access a CA cert chain. You'll need to download one (you can get one for `here <http://curl.haxx.se/docs/caextract.html>`_.)
|
||||
|
||||
Credentials
|
||||
-----------
|
||||
|
||||
To work with the GCE modules, you'll first need to get some credentials. You can create new one from the `console <https://console.developers.google.com/>`_ by going to the "APIs and Auth" section. Once you've created a new client ID and downloaded the generated private key (in the `pkcs12 format <http://en.wikipedia.org/wiki/PKCS_12>`_), you'll need to convert the key by running the following command:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret -nodes -nocerts | openssl rsa -out pkey.pem
|
||||
|
||||
There are two different ways to provide credentials to Ansible so that it can talk with Google Cloud for provisioning and configuration actions:
|
||||
|
||||
* by providing to the modules directly
|
||||
* by populating a ``secrets.py`` file
|
||||
|
||||
Calling Modules By Passing Credentials
|
||||
``````````````````````````````````````
|
||||
|
||||
For the GCE modules you can specify the credentials as arguments:
|
||||
|
||||
* ``service_account_email``: email associated with the project
|
||||
* ``pem_file``: path to the pem file
|
||||
* ``project_id``: id of the project
|
||||
|
||||
For example, to create a new instance using the cloud module, you can use the following configuration:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
- name: Create instance(s)
|
||||
hosts: localhost
|
||||
connection: local
|
||||
gather_facts: no
|
||||
|
||||
vars:
|
||||
service_account_email: unique-id@developer.gserviceaccount.com
|
||||
pem_file: /path/to/project.pem
|
||||
project_id: project-id
|
||||
machine_type: n1-standard-1
|
||||
image: debian-7
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Launch instances
|
||||
gce:
|
||||
instance_names: dev
|
||||
machine_type: "{{ machine_type }}"
|
||||
image: "{{ image }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
pem_file: "{{ pem_file }}"
|
||||
project_id: "{{ project_id }}"
|
||||
|
||||
Calling Modules with secrets.py
|
||||
```````````````````````````````
|
||||
|
||||
Create a file ``secrets.py`` looking like following, and put it in some folder which is in your ``$PYTHONPATH``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
GCE_PARAMS = ('i...@project.googleusercontent.com', '/path/to/project.pem')
|
||||
GCE_KEYWORD_PARAMS = {'project': 'project-name'}
|
||||
|
||||
Now the modules can be used as above, but the account information can be omitted.
|
||||
|
||||
GCE Dynamic Inventory
|
||||
---------------------
|
||||
|
||||
The best way to interact with your hosts is to use the gce inventory plugin, which dynamically queries GCE and tells Ansible what nodes can be managed.
|
||||
|
||||
Note that when using the inventory script ``gce.py``, you also need to populate the ``gce.ini`` file that you can find in the plugins/inventory directory of the ansible checkout.
|
||||
|
||||
To use the GCE dynamic inventory script, copy ``gce.py`` from ``plugins/inventory`` into your inventory directory and make it executable. You can specify credentials for ``gce.py`` using the ``GCE_INI_PATH`` environment variable -- the default is to look for gce.ini in the same directory as the inventory script.
|
||||
|
||||
Let's see if inventory is working:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ ./gce.py --list
|
||||
|
||||
You should see output describing the hosts you have, if any, running in Google Compute Engine.
|
||||
|
||||
Now let's see if we can use the inventory script to talk to Google.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ GCE_INI_PATH=~/.gce.ini ansible all -i gce.py -m setup
|
||||
hostname | success >> {
|
||||
"ansible_facts": {
|
||||
"ansible_all_ipv4_addresses": [
|
||||
"x.x.x.x"
|
||||
],
|
||||
|
||||
As with all dynamic inventory plugins in Ansible, you can configure the inventory path in ansible.cfg. The recommended way to use the inventory is to create an ``inventory`` directory, and place both the ``gce.py`` script and a file containing ``localhost`` in it. This can allow for cloud inventory to be used alongside local inventory (such as a physical datacenter) or machines running in different providers.
|
||||
|
||||
Executing ``ansible`` or ``ansible-playbook`` and specifying the ``inventory`` directory instead of an individual file will cause ansible to evaluate each file in that directory for inventory.
|
||||
|
||||
Let's once again use our inventory script to see if it can talk to Google Cloud:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
$ ansible all -i inventory/ -m setup
|
||||
hostname | success >> {
|
||||
"ansible_facts": {
|
||||
"ansible_all_ipv4_addresses": [
|
||||
"x.x.x.x"
|
||||
],
|
||||
|
||||
The output should be similar to the previous command. If you're wanting less output and just want to check for SSH connectivity, use "-m" ping instead.
|
||||
|
||||
Use Cases
|
||||
---------
|
||||
|
||||
For the following use case, let's use this small shell script as a wrapper.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
#!/bin/bash
|
||||
PLAYBOOK="$1"
|
||||
|
||||
if [ -z $PLAYBOOK ]; then
|
||||
echo "You need to pass a playback as argument to this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export SSL_CERT_FILE=$(pwd)/cacert.cer
|
||||
export ANSIBLE_HOST_KEY_CHECKING=False
|
||||
|
||||
if [ ! -f "$SSL_CERT_FILE" ]; then
|
||||
curl -O http://curl.haxx.se/ca/cacert.pem
|
||||
fi
|
||||
|
||||
ansible-playbook -v -i inventory/ "$PLAYBOOK"
|
||||
|
||||
|
||||
Create an instance
|
||||
``````````````````
|
||||
|
||||
The GCE module provides the ability to provision instances within Google Compute Engine. The provisioning task is typically performed from your Ansible control server against Google Cloud's API.
|
||||
|
||||
A playbook would looks like this:
|
||||
|
||||
.. code-block:: yaml
|
||||
|
||||
- name: Create instance(s)
|
||||
hosts: localhost
|
||||
gather_facts: no
|
||||
connection: local
|
||||
|
||||
vars:
|
||||
machine_type: n1-standard-1 # default
|
||||
image: debian-7
|
||||
service_account_email: unique-id@developer.gserviceaccount.com
|
||||
pem_file: /path/to/project.pem
|
||||
project_id: project-id
|
||||
|
||||
tasks:
|
||||
- name: Launch instances
|
||||
gce:
|
||||
instance_names: dev
|
||||
machine_type: "{{ machine_type }}"
|
||||
image: "{{ image }}"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
pem_file: "{{ pem_file }}"
|
||||
project_id: "{{ project_id }}"
|
||||
tags: webserver
|
||||
register: gce
|
||||
|
||||
- name: Wait for SSH to come up
|
||||
wait_for: host={{ item.public_ip }} port=22 delay=10 timeout=60
|
||||
with_items: gce.instance_data
|
||||
|
||||
- name: add_host hostname={{ item.public_ip }} groupname=new_instances
|
||||
|
||||
- name: Manage new instances
|
||||
hosts: new_instances
|
||||
connection: ssh
|
||||
roles:
|
||||
- base_configuration
|
||||
- production_server
|
||||
|
||||
Note that use of the "add_host" module above creates a temporary, in-memory group. This means that a play in the same playbook can then manage machines
|
||||
in the 'new_instances' group, if so desired. Any sort of arbitrary configuration is possible at this point.
|
||||
|
||||
Configuring instances in a group
|
||||
````````````````````````````````
|
||||
|
||||
All of the created instances in GCE are grouped by tag. Since this is a cloud, it's probably best to ignore hostnames and just focus on group management.
|
||||
|
||||
Normally we'd also use roles here, but the following example is a simple one. Here we will also use the "gce_net" module to open up access to port 80 on
|
||||
these nodes.
|
||||
|
||||
The variables in the 'vars' section could also be kept in a 'vars_files' file or something encrypted with Ansible-vault, if you so choose. This is just
|
||||
a basic example of what is possible::
|
||||
|
||||
- name: Setup web servers
|
||||
hosts: tag_webserver
|
||||
gather_facts: no
|
||||
|
||||
vars:
|
||||
machine_type: n1-standard-1 # default
|
||||
image: debian-7
|
||||
service_account_email: unique-id@developer.gserviceaccount.com
|
||||
pem_file: /path/to/project.pem
|
||||
project_id: project-id
|
||||
|
||||
roles:
|
||||
|
||||
- name: Install lighttpd
|
||||
apt: pkg=lighttpd state=installed
|
||||
sudo: True
|
||||
|
||||
- name: Allow HTTP
|
||||
local_action: gce_net
|
||||
args:
|
||||
fwname: "all-http"
|
||||
name: "default"
|
||||
allowed: "tcp:80"
|
||||
state: "present"
|
||||
service_account_email: "{{ service_account_email }}"
|
||||
pem_file: "{{ pem_file }}"
|
||||
project_id: "{{ project_id }}"
|
||||
|
||||
By pointing your browser to the IP of the server, you should see a page welcoming you.
|
||||
|
||||
Upgrades to this documentation are welcome, hit the github link at the top right of this page if you would like to make additions!
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,252 @@
|
||||
import os
|
||||
import re
|
||||
import types
|
||||
import ConfigParser
|
||||
import shlex
|
||||
|
||||
|
||||
class RegistrationBase(object):
|
||||
def __init__(self, module, username=None, password=None):
|
||||
self.module = module
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
def configure(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
def enable(self):
|
||||
# Remove any existing redhat.repo
|
||||
redhat_repo = '/etc/yum.repos.d/redhat.repo'
|
||||
if os.path.isfile(redhat_repo):
|
||||
os.unlink(redhat_repo)
|
||||
|
||||
def register(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
def unregister(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
def unsubscribe(self):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
def update_plugin_conf(self, plugin, enabled=True):
|
||||
plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin
|
||||
if os.path.isfile(plugin_conf):
|
||||
cfg = ConfigParser.ConfigParser()
|
||||
cfg.read([plugin_conf])
|
||||
if enabled:
|
||||
cfg.set('main', 'enabled', 1)
|
||||
else:
|
||||
cfg.set('main', 'enabled', 0)
|
||||
fd = open(plugin_conf, 'rwa+')
|
||||
cfg.write(fd)
|
||||
fd.close()
|
||||
|
||||
def subscribe(self, **kwargs):
|
||||
raise NotImplementedError("Must be implemented by a sub-class")
|
||||
|
||||
|
||||
class Rhsm(RegistrationBase):
|
||||
def __init__(self, module, username=None, password=None):
|
||||
RegistrationBase.__init__(self, module, username, password)
|
||||
self.config = self._read_config()
|
||||
self.module = module
|
||||
|
||||
def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'):
|
||||
'''
|
||||
Load RHSM configuration from /etc/rhsm/rhsm.conf.
|
||||
Returns:
|
||||
* ConfigParser object
|
||||
'''
|
||||
|
||||
# Read RHSM defaults ...
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp.read(rhsm_conf)
|
||||
|
||||
# Add support for specifying a default value w/o having to standup some configuration
|
||||
# Yeah, I know this should be subclassed ... but, oh well
|
||||
def get_option_default(self, key, default=''):
|
||||
sect, opt = key.split('.', 1)
|
||||
if self.has_section(sect) and self.has_option(sect, opt):
|
||||
return self.get(sect, opt)
|
||||
else:
|
||||
return default
|
||||
|
||||
cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser)
|
||||
|
||||
return cp
|
||||
|
||||
def enable(self):
|
||||
'''
|
||||
Enable the system to receive updates from subscription-manager.
|
||||
This involves updating affected yum plugins and removing any
|
||||
conflicting yum repositories.
|
||||
'''
|
||||
RegistrationBase.enable(self)
|
||||
self.update_plugin_conf('rhnplugin', False)
|
||||
self.update_plugin_conf('subscription-manager', True)
|
||||
|
||||
def configure(self, **kwargs):
|
||||
'''
|
||||
Configure the system as directed for registration with RHN
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'config']
|
||||
|
||||
# Pass supplied **kwargs as parameters to subscription-manager. Ignore
|
||||
# non-configuration parameters and replace '_' with '.'. For example,
|
||||
# 'server_hostname' becomes '--system.hostname'.
|
||||
for k,v in kwargs.items():
|
||||
if re.search(r'^(system|rhsm)_', k):
|
||||
args.append('--%s=%s' % (k.replace('_','.'), v))
|
||||
|
||||
self.module.run_command(args, check_rc=True)
|
||||
|
||||
@property
|
||||
def is_registered(self):
|
||||
'''
|
||||
Determine whether the current system
|
||||
Returns:
|
||||
* Boolean - whether the current system is currently registered to
|
||||
RHN.
|
||||
'''
|
||||
# Quick version...
|
||||
if False:
|
||||
return os.path.isfile('/etc/pki/consumer/cert.pem') and \
|
||||
os.path.isfile('/etc/pki/consumer/key.pem')
|
||||
|
||||
args = ['subscription-manager', 'identity']
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=False)
|
||||
if rc == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def register(self, username, password, autosubscribe, activationkey):
|
||||
'''
|
||||
Register the current system to the provided RHN server
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'register']
|
||||
|
||||
# Generate command arguments
|
||||
if activationkey:
|
||||
args.append('--activationkey "%s"' % activationkey)
|
||||
else:
|
||||
if autosubscribe:
|
||||
args.append('--autosubscribe')
|
||||
if username:
|
||||
args.extend(['--username', username])
|
||||
if password:
|
||||
args.extend(['--password', password])
|
||||
|
||||
# Do the needful...
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||
|
||||
def unsubscribe(self):
|
||||
'''
|
||||
Unsubscribe a system from all subscribed channels
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'unsubscribe', '--all']
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||
|
||||
def unregister(self):
|
||||
'''
|
||||
Unregister a currently registered system
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
args = ['subscription-manager', 'unregister']
|
||||
rc, stderr, stdout = self.module.run_command(args, check_rc=True)
|
||||
|
||||
def subscribe(self, regexp):
|
||||
'''
|
||||
Subscribe current system to available pools matching the specified
|
||||
regular expression
|
||||
Raises:
|
||||
* Exception - if error occurs while running command
|
||||
'''
|
||||
|
||||
# Available pools ready for subscription
|
||||
available_pools = RhsmPools(self.module)
|
||||
|
||||
for pool in available_pools.filter(regexp):
|
||||
pool.subscribe()
|
||||
|
||||
|
||||
class RhsmPool(object):
|
||||
'''
|
||||
Convenience class for housing subscription information
|
||||
'''
|
||||
|
||||
def __init__(self, module, **kwargs):
|
||||
self.module = module
|
||||
for k,v in kwargs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def __str__(self):
|
||||
return str(self.__getattribute__('_name'))
|
||||
|
||||
def subscribe(self):
|
||||
args = "subscription-manager subscribe --pool %s" % self.PoolId
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
|
||||
if rc == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class RhsmPools(object):
|
||||
"""
|
||||
This class is used for manipulating pools subscriptions with RHSM
|
||||
"""
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
self.products = self._load_product_list()
|
||||
|
||||
def __iter__(self):
|
||||
return self.products.__iter__()
|
||||
|
||||
def _load_product_list(self):
|
||||
"""
|
||||
Loads list of all availaible pools for system in data structure
|
||||
"""
|
||||
args = "subscription-manager list --available"
|
||||
rc, stdout, stderr = self.module.run_command(args, check_rc=True)
|
||||
|
||||
products = []
|
||||
for line in stdout.split('\n'):
|
||||
# Remove leading+trailing whitespace
|
||||
line = line.strip()
|
||||
# An empty line implies the end of a output group
|
||||
if len(line) == 0:
|
||||
continue
|
||||
# If a colon ':' is found, parse
|
||||
elif ':' in line:
|
||||
(key, value) = line.split(':',1)
|
||||
key = key.strip().replace(" ", "") # To unify
|
||||
value = value.strip()
|
||||
if key in ['ProductName', 'SubscriptionName']:
|
||||
# Remember the name for later processing
|
||||
products.append(RhsmPool(self.module, _name=value, key=value))
|
||||
elif products:
|
||||
# Associate value with most recently recorded product
|
||||
products[-1].__setattr__(key, value)
|
||||
# FIXME - log some warning?
|
||||
#else:
|
||||
# warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value))
|
||||
return products
|
||||
|
||||
def filter(self, regexp='^$'):
|
||||
'''
|
||||
Return a list of RhsmPools whose name matches the provided regular expression
|
||||
'''
|
||||
r = re.compile(regexp)
|
||||
for product in self.products:
|
||||
if r.search(product._name):
|
||||
yield product
|
||||
|
@ -0,0 +1,319 @@
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
try:
|
||||
import urllib
|
||||
HAS_URLLIB = True
|
||||
except:
|
||||
HAS_URLLIB = False
|
||||
|
||||
try:
|
||||
import urllib2
|
||||
HAS_URLLIB2 = True
|
||||
except:
|
||||
HAS_URLLIB2 = False
|
||||
|
||||
try:
|
||||
import urlparse
|
||||
HAS_URLPARSE = True
|
||||
except:
|
||||
HAS_URLPARSE = False
|
||||
|
||||
try:
|
||||
import ssl
|
||||
HAS_SSL=True
|
||||
except:
|
||||
HAS_SSL=False
|
||||
|
||||
import socket
|
||||
import tempfile
|
||||
|
||||
|
||||
# This is a dummy cacert provided for Mac OS since you need at least 1
|
||||
# ca cert, regardless of validity, for Python on Mac OS to use the
|
||||
# keychain functionality in OpenSSL for validating SSL certificates.
|
||||
# See: http://mercurial.selenic.com/wiki/CACertificates#Mac_OS_X_10.6_and_higher
|
||||
DUMMY_CA_CERT = """-----BEGIN CERTIFICATE-----
|
||||
MIICvDCCAiWgAwIBAgIJAO8E12S7/qEpMA0GCSqGSIb3DQEBBQUAMEkxCzAJBgNV
|
||||
BAYTAlVTMRcwFQYDVQQIEw5Ob3J0aCBDYXJvbGluYTEPMA0GA1UEBxMGRHVyaGFt
|
||||
MRAwDgYDVQQKEwdBbnNpYmxlMB4XDTE0MDMxODIyMDAyMloXDTI0MDMxNTIyMDAy
|
||||
MlowSTELMAkGA1UEBhMCVVMxFzAVBgNVBAgTDk5vcnRoIENhcm9saW5hMQ8wDQYD
|
||||
VQQHEwZEdXJoYW0xEDAOBgNVBAoTB0Fuc2libGUwgZ8wDQYJKoZIhvcNAQEBBQAD
|
||||
gY0AMIGJAoGBANtvpPq3IlNlRbCHhZAcP6WCzhc5RbsDqyh1zrkmLi0GwcQ3z/r9
|
||||
gaWfQBYhHpobK2Tiq11TfraHeNB3/VfNImjZcGpN8Fl3MWwu7LfVkJy3gNNnxkA1
|
||||
4Go0/LmIvRFHhbzgfuo9NFgjPmmab9eqXJceqZIlz2C8xA7EeG7ku0+vAgMBAAGj
|
||||
gaswgagwHQYDVR0OBBYEFPnN1nPRqNDXGlCqCvdZchRNi/FaMHkGA1UdIwRyMHCA
|
||||
FPnN1nPRqNDXGlCqCvdZchRNi/FaoU2kSzBJMQswCQYDVQQGEwJVUzEXMBUGA1UE
|
||||
CBMOTm9ydGggQ2Fyb2xpbmExDzANBgNVBAcTBkR1cmhhbTEQMA4GA1UEChMHQW5z
|
||||
aWJsZYIJAO8E12S7/qEpMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEA
|
||||
MUB80IR6knq9K/tY+hvPsZer6eFMzO3JGkRFBh2kn6JdMDnhYGX7AXVHGflrwNQH
|
||||
qFy+aenWXsC0ZvrikFxbQnX8GVtDADtVznxOi7XzFw7JOxdsVrpXgSN0eh0aMzvV
|
||||
zKPZsZ2miVGclicJHzm5q080b1p/sZtuKIEZk6vZqEg=
|
||||
-----END CERTIFICATE-----
|
||||
"""
|
||||
|
||||
|
||||
class RequestWithMethod(urllib2.Request):
|
||||
'''
|
||||
Workaround for using DELETE/PUT/etc with urllib2
|
||||
Originally contained in library/net_infrastructure/dnsmadeeasy
|
||||
'''
|
||||
|
||||
def __init__(self, url, method, data=None, headers={}):
|
||||
self._method = method
|
||||
urllib2.Request.__init__(self, url, data, headers)
|
||||
|
||||
def get_method(self):
|
||||
if self._method:
|
||||
return self._method
|
||||
else:
|
||||
return urllib2.Request.get_method(self)
|
||||
|
||||
|
||||
class SSLValidationHandler(urllib2.BaseHandler):
|
||||
'''
|
||||
A custom handler class for SSL validation.
|
||||
|
||||
Based on:
|
||||
http://stackoverflow.com/questions/1087227/validate-ssl-certificates-with-python
|
||||
http://techknack.net/python-urllib2-handlers/
|
||||
'''
|
||||
|
||||
def __init__(self, module, hostname, port):
|
||||
self.module = module
|
||||
self.hostname = hostname
|
||||
self.port = port
|
||||
|
||||
def get_ca_certs(self):
|
||||
# tries to find a valid CA cert in one of the
|
||||
# standard locations for the current distribution
|
||||
|
||||
ca_certs = []
|
||||
paths_checked = []
|
||||
platform = get_platform()
|
||||
distribution = get_distribution()
|
||||
|
||||
# build a list of paths to check for .crt/.pem files
|
||||
# based on the platform type
|
||||
paths_checked.append('/etc/ssl/certs')
|
||||
if platform == 'Linux':
|
||||
paths_checked.append('/etc/pki/ca-trust/extracted/pem')
|
||||
paths_checked.append('/etc/pki/tls/certs')
|
||||
paths_checked.append('/usr/share/ca-certificates/cacert.org')
|
||||
elif platform == 'FreeBSD':
|
||||
paths_checked.append('/usr/local/share/certs')
|
||||
elif platform == 'OpenBSD':
|
||||
paths_checked.append('/etc/ssl')
|
||||
elif platform == 'NetBSD':
|
||||
ca_certs.append('/etc/openssl/certs')
|
||||
|
||||
# fall back to a user-deployed cert in a standard
|
||||
# location if the OS platform one is not available
|
||||
paths_checked.append('/etc/ansible')
|
||||
|
||||
tmp_fd, tmp_path = tempfile.mkstemp()
|
||||
|
||||
# Write the dummy ca cert if we are running on Mac OS X
|
||||
if platform == 'Darwin':
|
||||
os.write(tmp_fd, DUMMY_CA_CERT)
|
||||
|
||||
# for all of the paths, find any .crt or .pem files
|
||||
# and compile them into single temp file for use
|
||||
# in the ssl check to speed up the test
|
||||
for path in paths_checked:
|
||||
if os.path.exists(path) and os.path.isdir(path):
|
||||
dir_contents = os.listdir(path)
|
||||
for f in dir_contents:
|
||||
full_path = os.path.join(path, f)
|
||||
if os.path.isfile(full_path) and os.path.splitext(f)[1] in ('.crt','.pem'):
|
||||
try:
|
||||
cert_file = open(full_path, 'r')
|
||||
os.write(tmp_fd, cert_file.read())
|
||||
cert_file.close()
|
||||
except:
|
||||
pass
|
||||
|
||||
return (tmp_path, paths_checked)
|
||||
|
||||
def http_request(self, req):
|
||||
tmp_ca_cert_path, paths_checked = self.get_ca_certs()
|
||||
try:
|
||||
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
ssl_s = ssl.wrap_socket(s, ca_certs=tmp_ca_cert_path, cert_reqs=ssl.CERT_REQUIRED)
|
||||
ssl_s.connect((self.hostname, self.port))
|
||||
ssl_s.close()
|
||||
except (ssl.SSLError, socket.error), e:
|
||||
# fail if we tried all of the certs but none worked
|
||||
if 'connection refused' in str(e).lower():
|
||||
self.module.fail_json(msg='Failed to connect to %s:%s.' % (self.hostname, self.port))
|
||||
else:
|
||||
self.module.fail_json(
|
||||
msg='Failed to validate the SSL certificate for %s:%s. ' % (self.hostname, self.port) + \
|
||||
'Use validate_certs=no or make sure your managed systems have a valid CA certificate installed. ' + \
|
||||
'Paths checked for this platform: %s' % ", ".join(paths_checked)
|
||||
)
|
||||
try:
|
||||
# cleanup the temp file created, don't worry
|
||||
# if it fails for some reason
|
||||
os.remove(tmp_ca_cert_path)
|
||||
except:
|
||||
pass
|
||||
|
||||
return req
|
||||
|
||||
https_request = http_request
|
||||
|
||||
|
||||
def url_argument_spec():
|
||||
'''
|
||||
Creates an argument spec that can be used with any module
|
||||
that will be requesting content via urllib/urllib2
|
||||
'''
|
||||
return dict(
|
||||
url = dict(),
|
||||
force = dict(default='no', aliases=['thirsty'], type='bool'),
|
||||
http_agent = dict(default='ansible-httpget'),
|
||||
use_proxy = dict(default='yes', type='bool'),
|
||||
validate_certs = dict(default='yes', type='bool'),
|
||||
)
|
||||
|
||||
|
||||
def fetch_url(module, url, data=None, headers=None, method=None,
|
||||
use_proxy=False, force=False, last_mod_time=None, timeout=10):
|
||||
'''
|
||||
Fetches a file from an HTTP/FTP server using urllib2
|
||||
'''
|
||||
|
||||
if not HAS_URLLIB:
|
||||
module.fail_json(msg='urllib is not installed')
|
||||
if not HAS_URLLIB2:
|
||||
module.fail_json(msg='urllib2 is not installed')
|
||||
elif not HAS_URLPARSE:
|
||||
module.fail_json(msg='urlparse is not installed')
|
||||
|
||||
r = None
|
||||
handlers = []
|
||||
info = dict(url=url)
|
||||
|
||||
# Get validate_certs from the module params
|
||||
validate_certs = module.params.get('validate_certs', True)
|
||||
|
||||
parsed = urlparse.urlparse(url)
|
||||
if parsed[0] == 'https':
|
||||
if not HAS_SSL and validate_certs:
|
||||
module.fail_json(msg='SSL validation is not available in your version of python. You can use validate_certs=no, however this is unsafe and not recommended')
|
||||
elif validate_certs:
|
||||
# do the cert validation
|
||||
netloc = parsed[1]
|
||||
if '@' in netloc:
|
||||
netloc = netloc.split('@', 1)[1]
|
||||
if ':' in netloc:
|
||||
hostname, port = netloc.split(':', 1)
|
||||
else:
|
||||
hostname = netloc
|
||||
port = 443
|
||||
# create the SSL validation handler and
|
||||
# add it to the list of handlers
|
||||
ssl_handler = SSLValidationHandler(module, hostname, port)
|
||||
handlers.append(ssl_handler)
|
||||
|
||||
if parsed[0] != 'ftp' and '@' in parsed[1]:
|
||||
credentials, netloc = parsed[1].split('@', 1)
|
||||
if ':' in credentials:
|
||||
username, password = credentials.split(':', 1)
|
||||
else:
|
||||
username = credentials
|
||||
password = ''
|
||||
parsed = list(parsed)
|
||||
parsed[1] = netloc
|
||||
|
||||
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||
# this creates a password manager
|
||||
passman.add_password(None, netloc, username, password)
|
||||
# because we have put None at the start it will always
|
||||
# use this username/password combination for urls
|
||||
# for which `theurl` is a super-url
|
||||
|
||||
authhandler = urllib2.HTTPBasicAuthHandler(passman)
|
||||
# create the AuthHandler
|
||||
handlers.append(authhandler)
|
||||
|
||||
#reconstruct url without credentials
|
||||
url = urlparse.urlunparse(parsed)
|
||||
|
||||
if not use_proxy:
|
||||
proxyhandler = urllib2.ProxyHandler({})
|
||||
handlers.append(proxyhandler)
|
||||
|
||||
opener = urllib2.build_opener(*handlers)
|
||||
urllib2.install_opener(opener)
|
||||
|
||||
if method:
|
||||
if method.upper() not in ('OPTIONS','GET','HEAD','POST','PUT','DELETE','TRACE','CONNECT'):
|
||||
module.fail_json(msg='invalid HTTP request method; %s' % method.upper())
|
||||
request = RequestWithMethod(url, method.upper(), data)
|
||||
else:
|
||||
request = urllib2.Request(url, data)
|
||||
|
||||
# add the custom agent header, to help prevent issues
|
||||
# with sites that block the default urllib agent string
|
||||
request.add_header('User-agent', module.params.get('http_agent'))
|
||||
|
||||
# if we're ok with getting a 304, set the timestamp in the
|
||||
# header, otherwise make sure we don't get a cached copy
|
||||
if last_mod_time and not force:
|
||||
tstamp = last_mod_time.strftime('%a, %d %b %Y %H:%M:%S +0000')
|
||||
request.add_header('If-Modified-Since', tstamp)
|
||||
else:
|
||||
request.add_header('cache-control', 'no-cache')
|
||||
|
||||
# user defined headers now, which may override things we've set above
|
||||
if headers:
|
||||
if not isinstance(headers, dict):
|
||||
module.fail_json("headers provided to fetch_url() must be a dict")
|
||||
for header in headers:
|
||||
request.add_header(header, headers[header])
|
||||
|
||||
try:
|
||||
if sys.version_info < (2,6,0):
|
||||
# urlopen in python prior to 2.6.0 did not
|
||||
# have a timeout parameter
|
||||
r = urllib2.urlopen(request, None)
|
||||
else:
|
||||
r = urllib2.urlopen(request, None, timeout)
|
||||
info.update(r.info())
|
||||
info['url'] = r.geturl() # The URL goes in too, because of redirects.
|
||||
info.update(dict(msg="OK (%s bytes)" % r.headers.get('Content-Length', 'unknown'), status=200))
|
||||
except urllib2.HTTPError, e:
|
||||
info.update(dict(msg=str(e), status=e.code))
|
||||
except urllib2.URLError, e:
|
||||
code = int(getattr(e, 'code', -1))
|
||||
info.update(dict(msg="Request failed: %s" % str(e), status=code))
|
||||
|
||||
return r, info
|
||||
|
@ -0,0 +1,121 @@
|
||||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||
# (c) 2013, Michael Scherer <misc@zarb.org>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import distutils.spawn
|
||||
import os
|
||||
import subprocess
|
||||
from ansible import errors
|
||||
from ansible.callbacks import vvv
|
||||
|
||||
class Connection(object):
|
||||
''' Local lxc based connections '''
|
||||
|
||||
def _search_executable(self, executable):
|
||||
cmd = distutils.spawn.find_executable(executable)
|
||||
if not cmd:
|
||||
raise errors.AnsibleError("%s command not found in PATH") % executable
|
||||
return cmd
|
||||
|
||||
def _check_domain(self, domain):
|
||||
p = subprocess.Popen([self.cmd, '-q', '-c', 'lxc:///', 'dominfo', domain],
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
p.communicate()
|
||||
if p.returncode:
|
||||
raise errors.AnsibleError("%s is not a lxc defined in libvirt" % domain)
|
||||
|
||||
def __init__(self, runner, host, port, *args, **kwargs):
|
||||
self.lxc = host
|
||||
|
||||
self.cmd = self._search_executable('virsh')
|
||||
|
||||
self._check_domain(host)
|
||||
|
||||
self.runner = runner
|
||||
self.host = host
|
||||
# port is unused, since this is local
|
||||
self.port = port
|
||||
|
||||
def connect(self, port=None):
|
||||
''' connect to the lxc; nothing to do here '''
|
||||
|
||||
vvv("THIS IS A LOCAL LXC DIR", host=self.lxc)
|
||||
|
||||
return self
|
||||
|
||||
def _generate_cmd(self, executable, cmd):
|
||||
if executable:
|
||||
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', executable , '-c', cmd]
|
||||
else:
|
||||
local_cmd = '%s -q -c lxc:/// lxc-enter-namespace %s -- %s' % (self.cmd, self.lxc, cmd)
|
||||
return local_cmd
|
||||
|
||||
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'):
|
||||
''' run a command on the chroot '''
|
||||
|
||||
# We enter lxc as root so sudo stuff can be ignored
|
||||
local_cmd = self._generate_cmd(executable, cmd)
|
||||
|
||||
vvv("EXEC %s" % (local_cmd), host=self.lxc)
|
||||
p = subprocess.Popen(local_cmd, shell=isinstance(local_cmd, basestring),
|
||||
cwd=self.runner.basedir,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
stdout, stderr = p.communicate()
|
||||
return (p.returncode, '', stdout, stderr)
|
||||
|
||||
def _normalize_path(self, path, prefix):
|
||||
if not path.startswith(os.path.sep):
|
||||
path = os.path.join(os.path.sep, path)
|
||||
normpath = os.path.normpath(path)
|
||||
return os.path.join(prefix, normpath[1:])
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
''' transfer a file from local to lxc '''
|
||||
|
||||
out_path = self._normalize_path(out_path, '/')
|
||||
vvv("PUT %s TO %s" % (in_path, out_path), host=self.lxc)
|
||||
|
||||
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/tee', out_path]
|
||||
vvv("EXEC %s" % (local_cmd), host=self.lxc)
|
||||
|
||||
p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout, stderr = p.communicate(open(in_path,'rb').read())
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
''' fetch a file from lxc to local '''
|
||||
|
||||
in_path = self._normalize_path(in_path, '/')
|
||||
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.lxc)
|
||||
|
||||
local_cmd = [self.cmd, '-q', '-c', 'lxc:///', 'lxc-enter-namespace', self.lxc, '--', '/bin/cat', in_path]
|
||||
vvv("EXEC %s" % (local_cmd), host=self.lxc)
|
||||
|
||||
p = subprocess.Popen(local_cmd, cwd=self.runner.basedir,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
stdout, stderr = p.communicate()
|
||||
open(out_path,'wb').write(stdout)
|
||||
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection; nothing to do here '''
|
||||
pass
|
@ -0,0 +1,76 @@
|
||||
# (c) 2014, Will Thames <will@thames.id.au>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# AWS only documentation fragment
|
||||
DOCUMENTATION = """
|
||||
options:
|
||||
ec2_url:
|
||||
description:
|
||||
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Must be specified if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
aws_secret_key:
|
||||
description:
|
||||
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'ec2_secret_key', 'secret_key' ]
|
||||
aws_access_key:
|
||||
description:
|
||||
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'ec2_access_key', 'access_key' ]
|
||||
validate_certs:
|
||||
description:
|
||||
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
profile:
|
||||
description:
|
||||
- uses a boto profile. Only works with boto >= 2.24.0
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "1.6"
|
||||
security_token:
|
||||
description:
|
||||
- security token to authenticate against AWS
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "1.6"
|
||||
requirements:
|
||||
- boto
|
||||
notes:
|
||||
- The following environment variables can be used C(AWS_ACCESS_KEY) or
|
||||
C(EC2_ACCESS_KEY) or C(AWS_ACCESS_KEY_ID),
|
||||
C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY) or C(AWS_SECRET_ACCESS_KEY),
|
||||
C(AWS_REGION) or C(EC2_REGION), C(AWS_SECURITY_TOKEN)
|
||||
- Ansible uses the boto configuration file (typically ~/.boto) if no
|
||||
credentials are provided. See http://boto.readthedocs.org/en/latest/boto_config_tut.html
|
||||
- C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
|
||||
AWS region, when required, but
|
||||
this can also be configured in the boto config file
|
||||
"""
|
@ -0,0 +1,58 @@
|
||||
# (c) 2014, Matt Martz <matt@sivel.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard files documentation fragment
|
||||
DOCUMENTATION = """
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- 'path to the file being managed. Aliases: I(dest), I(name)'
|
||||
required: true
|
||||
default: []
|
||||
aliases: ['dest', 'name']
|
||||
state:
|
||||
description:
|
||||
- If C(directory), all immediate subdirectories will be created if they
|
||||
do not exist. If C(file), the file will NOT be created if it does not
|
||||
exist, see the M(copy) or M(template) module if you want that behavior.
|
||||
If C(link), the symbolic link will be created or changed. Use C(hard)
|
||||
for hardlinks. If C(absent), directories will be recursively deleted,
|
||||
and files or symlinks will be unlinked. If C(touch) (new in 1.4), an empty file will
|
||||
be created if the c(path) does not exist, while an existing file or
|
||||
directory will receive updated file access and modification times (similar
|
||||
to the way `touch` works from the command line).
|
||||
required: false
|
||||
default: file
|
||||
choices: [ file, link, directory, hard, touch, absent ]
|
||||
src:
|
||||
required: false
|
||||
default: null
|
||||
choices: []
|
||||
description:
|
||||
- path of the file to link to (applies only to C(state= link or hard)). Will accept absolute,
|
||||
relative and nonexisting (with C(force)) paths. Relative paths are not expanded.
|
||||
recurse:
|
||||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
version_added: "1.1"
|
||||
description:
|
||||
- recursively set the specified file attributes (applies only to state=directory)
|
||||
"""
|
@ -0,0 +1,122 @@
|
||||
# (c) 2014, Matt Martz <matt@sivel.net>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
class ModuleDocFragment(object):
|
||||
|
||||
# Standard Rackspace only documentation fragment
|
||||
DOCUMENTATION = """
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides I(credentials))
|
||||
aliases:
|
||||
- password
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if I(api_key) and
|
||||
I(username) are provided)
|
||||
default: null
|
||||
aliases:
|
||||
- creds_file
|
||||
env:
|
||||
description:
|
||||
- Environment as configured in ~/.pyrax.cfg,
|
||||
see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration)
|
||||
version_added: 1.5
|
||||
region:
|
||||
description:
|
||||
- Region to create an instance in
|
||||
default: DFW
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides I(credentials))
|
||||
verify_ssl:
|
||||
description:
|
||||
- Whether or not to require SSL validation of API endpoints
|
||||
version_added: 1.5
|
||||
requirements:
|
||||
- pyrax
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
|
||||
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
"""
|
||||
|
||||
# Documentation fragment including attributes to enable communication
|
||||
# of other OpenStack clouds. Not all rax modules support this.
|
||||
OPENSTACK = """
|
||||
options:
|
||||
api_key:
|
||||
description:
|
||||
- Rackspace API key (overrides I(credentials))
|
||||
aliases:
|
||||
- password
|
||||
auth_endpoint:
|
||||
description:
|
||||
- The URI of the authentication service
|
||||
default: https://identity.api.rackspacecloud.com/v2.0/
|
||||
version_added: 1.5
|
||||
credentials:
|
||||
description:
|
||||
- File to find the Rackspace credentials in (ignored if I(api_key) and
|
||||
I(username) are provided)
|
||||
default: null
|
||||
aliases:
|
||||
- creds_file
|
||||
env:
|
||||
description:
|
||||
- Environment as configured in ~/.pyrax.cfg,
|
||||
see U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#pyrax-configuration)
|
||||
version_added: 1.5
|
||||
identity_type:
|
||||
description:
|
||||
- Authentication machanism to use, such as rackspace or keystone
|
||||
default: rackspace
|
||||
version_added: 1.5
|
||||
region:
|
||||
description:
|
||||
- Region to create an instance in
|
||||
default: DFW
|
||||
tenant_id:
|
||||
description:
|
||||
- The tenant ID used for authentication
|
||||
version_added: 1.5
|
||||
tenant_name:
|
||||
description:
|
||||
- The tenant name used for authentication
|
||||
version_added: 1.5
|
||||
username:
|
||||
description:
|
||||
- Rackspace username (overrides I(credentials))
|
||||
verify_ssl:
|
||||
description:
|
||||
- Whether or not to require SSL validation of API endpoints
|
||||
version_added: 1.5
|
||||
requirements:
|
||||
- pyrax
|
||||
notes:
|
||||
- The following environment variables can be used, C(RAX_USERNAME),
|
||||
C(RAX_API_KEY), C(RAX_CREDS_FILE), C(RAX_CREDENTIALS), C(RAX_REGION).
|
||||
- C(RAX_CREDENTIALS) and C(RAX_CREDS_FILE) points to a credentials file
|
||||
appropriate for pyrax. See U(https://github.com/rackspace/pyrax/blob/master/docs/getting_started.md#authenticating)
|
||||
- C(RAX_USERNAME) and C(RAX_API_KEY) obviate the use of a credentials file
|
||||
- C(RAX_REGION) defines a Rackspace Public Cloud region (DFW, ORD, LON, ...)
|
||||
"""
|
@ -0,0 +1,242 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: digital_ocean_domain
|
||||
short_description: Create/delete a DNS record in DigitalOcean
|
||||
description:
|
||||
- Create/delete a DNS record in DigitalOcean.
|
||||
version_added: "1.6"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the target.
|
||||
default: present
|
||||
choices: ['present', 'active', 'absent', 'deleted']
|
||||
client_id:
|
||||
description:
|
||||
- Digital Ocean manager id.
|
||||
api_key:
|
||||
description:
|
||||
- Digital Ocean api key.
|
||||
id:
|
||||
description:
|
||||
- Numeric, the droplet id you want to operate on.
|
||||
name:
|
||||
description:
|
||||
- String, this is the name of the droplet - must be formatted by hostname rules, or the name of a SSH key, or the name of a domain.
|
||||
ip:
|
||||
description:
|
||||
- The IP address to point a domain at.
|
||||
|
||||
notes:
|
||||
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a domain record
|
||||
|
||||
- digital_ocean_domain: >
|
||||
state=present
|
||||
name=my.digitalocean.domain
|
||||
ip=127.0.0.1
|
||||
|
||||
# Create a droplet and a corresponding domain record
|
||||
|
||||
- digital_cean_droplet: >
|
||||
state=present
|
||||
name=test_droplet
|
||||
size_id=1
|
||||
region_id=2
|
||||
image_id=3
|
||||
register: test_droplet
|
||||
|
||||
- digital_ocean_domain: >
|
||||
state=present
|
||||
name={{ test_droplet.name }}.my.domain
|
||||
ip={{ test_droplet.ip_address }}
|
||||
'''
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
|
||||
try:
|
||||
from dopy.manager import DoError, DoManager
|
||||
except ImportError as e:
|
||||
print "failed=True msg='dopy required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
class TimeoutError(DoError):
|
||||
def __init__(self, msg, id):
|
||||
super(TimeoutError, self).__init__(msg)
|
||||
self.id = id
|
||||
|
||||
class JsonfyMixIn(object):
|
||||
def to_json(self):
|
||||
return self.__dict__
|
||||
|
||||
class DomainRecord(JsonfyMixIn):
|
||||
manager = None
|
||||
|
||||
def __init__(self, json):
|
||||
self.__dict__.update(json)
|
||||
update_attr = __init__
|
||||
|
||||
def update(self, data = None, record_type = None):
|
||||
json = self.manager.edit_domain_record(self.domain_id,
|
||||
self.id,
|
||||
record_type if record_type is not None else self.record_type,
|
||||
data if data is not None else self.data)
|
||||
self.__dict__.update(json)
|
||||
return self
|
||||
|
||||
def destroy(self):
|
||||
json = self.manager.destroy_domain_record(self.domain_id, self.id)
|
||||
return json
|
||||
|
||||
class Domain(JsonfyMixIn):
|
||||
manager = None
|
||||
|
||||
def __init__(self, domain_json):
|
||||
self.__dict__.update(domain_json)
|
||||
|
||||
def destroy(self):
|
||||
self.manager.destroy_domain(self.id)
|
||||
|
||||
def records(self):
|
||||
json = self.manager.all_domain_records(self.id)
|
||||
return map(DomainRecord, json)
|
||||
|
||||
@classmethod
|
||||
def add(cls, name, ip):
|
||||
json = cls.manager.new_domain(name, ip)
|
||||
return cls(json)
|
||||
|
||||
@classmethod
|
||||
def setup(cls, client_id, api_key):
|
||||
cls.manager = DoManager(client_id, api_key)
|
||||
DomainRecord.manager = cls.manager
|
||||
|
||||
@classmethod
|
||||
def list_all(cls):
|
||||
domains = cls.manager.all_domains()
|
||||
return map(cls, domains)
|
||||
|
||||
@classmethod
|
||||
def find(cls, name=None, id=None):
|
||||
if name is None and id is None:
|
||||
return False
|
||||
|
||||
domains = Domain.list_all()
|
||||
|
||||
if id is not None:
|
||||
for domain in domains:
|
||||
if domain.id == id:
|
||||
return domain
|
||||
|
||||
if name is not None:
|
||||
for domain in domains:
|
||||
if domain.name == name:
|
||||
return domain
|
||||
|
||||
return False
|
||||
|
||||
def core(module):
|
||||
def getkeyordie(k):
|
||||
v = module.params[k]
|
||||
if v is None:
|
||||
module.fail_json(msg='Unable to load %s' % k)
|
||||
return v
|
||||
|
||||
try:
|
||||
# params['client_id'] will be None even if client_id is not passed in
|
||||
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
|
||||
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
|
||||
except KeyError, e:
|
||||
module.fail_json(msg='Unable to load %s' % e.message)
|
||||
|
||||
changed = True
|
||||
state = module.params['state']
|
||||
|
||||
Domain.setup(client_id, api_key)
|
||||
if state in ('present'):
|
||||
domain = Domain.find(id=module.params["id"])
|
||||
|
||||
if not domain:
|
||||
domain = Domain.find(name=getkeyordie("name"))
|
||||
|
||||
if not domain:
|
||||
domain = Domain.add(getkeyordie("name"),
|
||||
getkeyordie("ip"))
|
||||
module.exit_json(changed=True, domain=domain.to_json())
|
||||
else:
|
||||
records = domain.records()
|
||||
at_record = None
|
||||
for record in records:
|
||||
if record.name == "@":
|
||||
at_record = record
|
||||
|
||||
if not at_record.data == getkeyordie("ip"):
|
||||
record.update(data=getkeyordie("ip"), record_type='A')
|
||||
module.exit_json(changed=True, domain=Domain.find(id=record.domain_id).to_json())
|
||||
|
||||
module.exit_json(changed=False, domain=domain.to_json())
|
||||
|
||||
elif state in ('absent'):
|
||||
domain = None
|
||||
if "id" in module.params:
|
||||
domain = Domain.find(id=module.params["id"])
|
||||
|
||||
if not domain and "name" in module.params:
|
||||
domain = Domain.find(name=module.params["name"])
|
||||
|
||||
if not domain:
|
||||
module.exit_json(changed=False, msg="Domain not found.")
|
||||
|
||||
event_json = domain.destroy()
|
||||
module.exit_json(changed=True, event=event_json)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(choices=['active', 'present', 'absent', 'deleted'], default='present'),
|
||||
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
|
||||
api_key = dict(aliases=['API_KEY'], no_log=True),
|
||||
name = dict(type='str'),
|
||||
id = dict(aliases=['droplet_id'], type='int'),
|
||||
ip = dict(type='str'),
|
||||
),
|
||||
required_one_of = (
|
||||
['id', 'name'],
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
core(module)
|
||||
except TimeoutError as e:
|
||||
module.fail_json(msg=str(e), id=e.id)
|
||||
except (DoError, Exception) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
main()
|
@ -0,0 +1,178 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: digital_ocean_sshkey
|
||||
short_description: Create/delete an SSH key in DigitalOcean
|
||||
description:
|
||||
- Create/delete an SSH key.
|
||||
version_added: "1.6"
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the target.
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
client_id:
|
||||
description:
|
||||
- Digital Ocean manager id.
|
||||
api_key:
|
||||
description:
|
||||
- Digital Ocean api key.
|
||||
id:
|
||||
description:
|
||||
- Numeric, the SSH key id you want to operate on.
|
||||
name:
|
||||
description:
|
||||
- String, this is the name of an SSH key to create or destroy.
|
||||
ssh_pub_key:
|
||||
description:
|
||||
- The public SSH key you want to add to your account.
|
||||
|
||||
notes:
|
||||
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
|
||||
'''
|
||||
|
||||
|
||||
EXAMPLES = '''
|
||||
# Ensure a SSH key is present
|
||||
# If a key matches this name, will return the ssh key id and changed = False
|
||||
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
|
||||
|
||||
- digital_ocean_sshkey: >
|
||||
state=present
|
||||
name=my_ssh_key
|
||||
ssh_pub_key='ssh-rsa AAAA...'
|
||||
client_id=XXX
|
||||
api_key=XXX
|
||||
|
||||
'''
|
||||
|
||||
import sys
|
||||
import os
|
||||
import time
|
||||
|
||||
try:
|
||||
from dopy.manager import DoError, DoManager
|
||||
except ImportError as e:
|
||||
print "failed=True msg='dopy required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
class TimeoutError(DoError):
|
||||
def __init__(self, msg, id):
|
||||
super(TimeoutError, self).__init__(msg)
|
||||
self.id = id
|
||||
|
||||
class JsonfyMixIn(object):
|
||||
def to_json(self):
|
||||
return self.__dict__
|
||||
|
||||
class SSH(JsonfyMixIn):
|
||||
manager = None
|
||||
|
||||
def __init__(self, ssh_key_json):
|
||||
self.__dict__.update(ssh_key_json)
|
||||
update_attr = __init__
|
||||
|
||||
def destroy(self):
|
||||
self.manager.destroy_ssh_key(self.id)
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def setup(cls, client_id, api_key):
|
||||
cls.manager = DoManager(client_id, api_key)
|
||||
|
||||
@classmethod
|
||||
def find(cls, name):
|
||||
if not name:
|
||||
return False
|
||||
keys = cls.list_all()
|
||||
for key in keys:
|
||||
if key.name == name:
|
||||
return key
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def list_all(cls):
|
||||
json = cls.manager.all_ssh_keys()
|
||||
return map(cls, json)
|
||||
|
||||
@classmethod
|
||||
def add(cls, name, key_pub):
|
||||
json = cls.manager.new_ssh_key(name, key_pub)
|
||||
return cls(json)
|
||||
|
||||
def core(module):
|
||||
def getkeyordie(k):
|
||||
v = module.params[k]
|
||||
if v is None:
|
||||
module.fail_json(msg='Unable to load %s' % k)
|
||||
return v
|
||||
|
||||
try:
|
||||
# params['client_id'] will be None even if client_id is not passed in
|
||||
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
|
||||
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
|
||||
except KeyError, e:
|
||||
module.fail_json(msg='Unable to load %s' % e.message)
|
||||
|
||||
changed = True
|
||||
state = module.params['state']
|
||||
|
||||
SSH.setup(client_id, api_key)
|
||||
name = getkeyordie('name')
|
||||
if state in ('present'):
|
||||
key = SSH.find(name)
|
||||
if key:
|
||||
module.exit_json(changed=False, ssh_key=key.to_json())
|
||||
key = SSH.add(name, getkeyordie('ssh_pub_key'))
|
||||
module.exit_json(changed=True, ssh_key=key.to_json())
|
||||
|
||||
elif state in ('absent'):
|
||||
key = SSH.find(name)
|
||||
if not key:
|
||||
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
|
||||
key.destroy()
|
||||
module.exit_json(changed=True)
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
|
||||
api_key = dict(aliases=['API_KEY'], no_log=True),
|
||||
name = dict(type='str'),
|
||||
id = dict(aliases=['droplet_id'], type='int'),
|
||||
ssh_pub_key = dict(type='str'),
|
||||
),
|
||||
required_one_of = (
|
||||
['id', 'name'],
|
||||
),
|
||||
)
|
||||
|
||||
try:
|
||||
core(module)
|
||||
except TimeoutError as e:
|
||||
module.fail_json(msg=str(e), id=e.id)
|
||||
except (DoError, Exception) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
main()
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue