mirror of https://github.com/ansible/ansible.git
Merge remote-tracking branch 'upstream/devel' into devel
Conflicts: library/files/stat Resolved: Using theirspull/10224/head
commit
2f3dff95f5
@ -0,0 +1,16 @@
|
||||
[submodule "lib/ansible/modules/core"]
|
||||
path = lib/ansible/modules/core
|
||||
url = https://github.com/ansible/ansible-modules-core.git
|
||||
branch = devel
|
||||
[submodule "lib/ansible/modules/extras"]
|
||||
path = lib/ansible/modules/extras
|
||||
url = https://github.com/ansible/ansible-modules-extras.git
|
||||
branch = devel
|
||||
[submodule "v2/ansible/modules/core"]
|
||||
path = v2/ansible/modules/core
|
||||
url = https://github.com/ansible/ansible-modules-core.git
|
||||
branch = devel
|
||||
[submodule "v2/ansible/modules/extras"]
|
||||
path = v2/ansible/modules/extras
|
||||
url = https://github.com/ansible/ansible-modules-extras.git
|
||||
branch = devel
|
@ -1,11 +1,11 @@
|
||||
include README.md packaging/rpm/ansible.spec COPYING
|
||||
include examples/hosts
|
||||
include examples/ansible.cfg
|
||||
graft examples/playbooks
|
||||
include packaging/distutils/setup.py
|
||||
include lib/ansible/module_utils/powershell.ps1
|
||||
recursive-include lib/ansible/modules *
|
||||
recursive-include docs *
|
||||
recursive-include library *
|
||||
include Makefile
|
||||
include VERSION
|
||||
include MANIFEST.in
|
||||
prune lib/ansible/modules/core/.git
|
||||
prune lib/ansible/modules/extras/.git
|
||||
|
@ -1,10 +0,0 @@
|
||||
Ansible Guru
|
||||
````````````
|
||||
|
||||
While many users should be able to get on fine with the documentation, mailing list, and IRC, sometimes you want a bit more.
|
||||
|
||||
`Ansible Guru <http://ansible.com/ansible-guru>`_ is an offering from Ansible, Inc that helps users who would like more dedicated help with Ansible, including building playbooks, best practices, architecture suggestions, and more -- all from our awesome support and services team. It also includes some useful discounts and also some free T-shirts, though you shouldn't get it just for the free shirts! It's a great way to train up to becoming an Ansible expert.
|
||||
|
||||
For those interested, click through the link above. You can sign up in minutes!
|
||||
|
||||
For users looking for more hands-on help, we also have some more information on our `Services page <http://www.ansible.com/ansible-services>`_, and support is also included with :doc:`tower`.
|
@ -0,0 +1,34 @@
|
||||
Start and Step
|
||||
======================
|
||||
|
||||
This shows a few alternative ways to run playbooks. These modes are very useful for testing new plays or debugging.
|
||||
|
||||
|
||||
.. _start_at_task:
|
||||
|
||||
Start-at-task
|
||||
`````````````
|
||||
If you want to start executing your playbook at a particular task, you can do so with the ``--start-at`` option::
|
||||
|
||||
ansible-playbook playbook.yml --start-at="install packages"
|
||||
|
||||
The above will start executing your playbook at a task named "install packages".
|
||||
|
||||
|
||||
.. _step:
|
||||
|
||||
Step
|
||||
````
|
||||
|
||||
Playbooks can also be executed interactively with ``--step``::
|
||||
|
||||
ansible-playbook playbook.yml --step
|
||||
|
||||
This will cause ansible to stop on each task, and ask if it should execute that task.
|
||||
Say you had a task called "configure ssh", the playbook run will stop and ask::
|
||||
|
||||
Perform task: configure ssh (y/n/c):
|
||||
|
||||
Answering "y" will execute the task, answering "n" will skip the task, and answering "c"
|
||||
will continue executing all the remaining tasks without asking.
|
||||
|
@ -1,45 +1,78 @@
|
||||
#!/bin/bash
|
||||
# usage: source ./hacking/env-setup [-q]
|
||||
# usage: source hacking/env-setup [-q]
|
||||
# modifies environment for running Ansible from checkout
|
||||
|
||||
# Default values for shell variables we use
|
||||
PYTHONPATH=${PYTHONPATH-""}
|
||||
PATH=${PATH-""}
|
||||
MANPATH=${MANPATH-""}
|
||||
verbosity=${1-info} # Defaults to `info' if unspecified
|
||||
|
||||
if [ "$verbosity" = -q ]; then
|
||||
verbosity=silent
|
||||
fi
|
||||
|
||||
# When run using source as directed, $0 gets set to bash, so we must use $BASH_SOURCE
|
||||
if [ -n "$BASH_SOURCE" ] ; then
|
||||
HACKING_DIR=`dirname $BASH_SOURCE`
|
||||
elif [ $(basename $0) = "env-setup" ]; then
|
||||
HACKING_DIR=`dirname $0`
|
||||
HACKING_DIR=$(dirname "$BASH_SOURCE")
|
||||
elif [ $(basename -- "$0") = "env-setup" ]; then
|
||||
HACKING_DIR=$(dirname "$0")
|
||||
elif [ -n "$KSH_VERSION" ]; then
|
||||
HACKING_DIR=$(dirname "${.sh.file}")
|
||||
else
|
||||
HACKING_DIR="$PWD/hacking"
|
||||
fi
|
||||
# The below is an alternative to readlink -fn which doesn't exist on OS X
|
||||
# Source: http://stackoverflow.com/a/1678636
|
||||
FULL_PATH=`python -c "import os; print(os.path.realpath('$HACKING_DIR'))"`
|
||||
ANSIBLE_HOME=`dirname "$FULL_PATH"`
|
||||
FULL_PATH=$(python -c "import os; print(os.path.realpath('$HACKING_DIR'))")
|
||||
ANSIBLE_HOME=$(dirname "$FULL_PATH")
|
||||
|
||||
PREFIX_PYTHONPATH="$ANSIBLE_HOME/lib"
|
||||
PREFIX_PATH="$ANSIBLE_HOME/bin"
|
||||
PREFIX_MANPATH="$ANSIBLE_HOME/docs/man"
|
||||
|
||||
[[ $PYTHONPATH != ${PREFIX_PYTHONPATH}* ]] && export PYTHONPATH=$PREFIX_PYTHONPATH:$PYTHONPATH
|
||||
[[ $PATH != ${PREFIX_PATH}* ]] && export PATH=$PREFIX_PATH:$PATH
|
||||
unset ANSIBLE_LIBRARY
|
||||
export ANSIBLE_LIBRARY="$ANSIBLE_HOME/library:`python $HACKING_DIR/get_library.py`"
|
||||
[[ $MANPATH != ${PREFIX_MANPATH}* ]] && export MANPATH=$PREFIX_MANPATH:$MANPATH
|
||||
|
||||
# Print out values unless -q is set
|
||||
|
||||
if [ $# -eq 0 -o "$1" != "-q" ] ; then
|
||||
echo ""
|
||||
echo "Setting up Ansible to run out of checkout..."
|
||||
echo ""
|
||||
echo "PATH=$PATH"
|
||||
echo "PYTHONPATH=$PYTHONPATH"
|
||||
echo "ANSIBLE_LIBRARY=$ANSIBLE_LIBRARY"
|
||||
echo "MANPATH=$MANPATH"
|
||||
echo ""
|
||||
|
||||
echo "Remember, you may wish to specify your host file with -i"
|
||||
echo ""
|
||||
echo "Done!"
|
||||
echo ""
|
||||
expr "$PYTHONPATH" : "${PREFIX_PYTHONPATH}.*" > /dev/null || export PYTHONPATH="$PREFIX_PYTHONPATH:$PYTHONPATH"
|
||||
expr "$PATH" : "${PREFIX_PATH}.*" > /dev/null || export PATH="$PREFIX_PATH:$PATH"
|
||||
expr "$MANPATH" : "${PREFIX_MANPATH}.*" > /dev/null || export MANPATH="$PREFIX_MANPATH:$MANPATH"
|
||||
|
||||
#
|
||||
# Generate egg_info so that pkg_resources works
|
||||
#
|
||||
|
||||
# Do the work in a function so we don't repeat ourselves later
|
||||
gen_egg_info()
|
||||
{
|
||||
python setup.py egg_info
|
||||
if [ -e "$PREFIX_PYTHONPATH/ansible.egg-info" ] ; then
|
||||
rm -r "$PREFIX_PYTHONPATH/ansible.egg-info"
|
||||
fi
|
||||
mv "ansible.egg-info" "$PREFIX_PYTHONPATH"
|
||||
}
|
||||
|
||||
if [ "$ANSIBLE_HOME" != "$PWD" ] ; then
|
||||
current_dir="$PWD"
|
||||
else
|
||||
current_dir="$ANSIBLE_HOME"
|
||||
fi
|
||||
cd "$ANSIBLE_HOME"
|
||||
if [ "$verbosity" = silent ] ; then
|
||||
gen_egg_info > /dev/null 2>&1
|
||||
else
|
||||
gen_egg_info
|
||||
fi
|
||||
cd "$current_dir"
|
||||
|
||||
if [ "$verbosity" != silent ] ; then
|
||||
cat <<- EOF
|
||||
|
||||
Setting up Ansible to run out of checkout...
|
||||
|
||||
PATH=$PATH
|
||||
PYTHONPATH=$PYTHONPATH
|
||||
MANPATH=$MANPATH
|
||||
|
||||
Remember, you may wish to specify your host file with -i
|
||||
|
||||
Done!
|
||||
|
||||
EOF
|
||||
fi
|
||||
|
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
git pull --rebase
|
||||
git submodule update --init --recursive
|
@ -1,748 +0,0 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2013 Google Inc.
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# This is a custom functional test script for the Google Compute Engine
|
||||
# ansible modules. In order to run these tests, you must:
|
||||
# 1) Create a Google Cloud Platform account and enable the Google
|
||||
# Compute Engine service and billing
|
||||
# 2) Download, install, and configure 'gcutil'
|
||||
# see [https://developers.google.com/compute/docs/gcutil/]
|
||||
# 3) Convert your GCE Service Account private key from PKCS12 to PEM format
|
||||
# $ openssl pkcs12 -in pkey.pkcs12 -passin pass:notasecret \
|
||||
# > -nodes -nocerts | openssl rsa -out pkey.pem
|
||||
# 4) Make sure you have libcloud 0.13.3 or later installed.
|
||||
# 5) Make sure you have a libcloud 'secrets.py' file in your PYTHONPATH
|
||||
# 6) Set GCE_PARAMS and GCE_KEYWORD_PARMS in your 'secrets.py' file.
|
||||
# 7) Set up a simple hosts file
|
||||
# $ echo 127.0.0.1 > ~/ansible_hosts
|
||||
# $ echo "export ANSIBLE_HOSTS='~/ansible_hosts'" >> ~/.bashrc
|
||||
# $ . ~/.bashrc
|
||||
# 8) Set up your ansible 'hacking' environment
|
||||
# $ cd ~/ansible
|
||||
# $ . hacking/env-setup
|
||||
# $ export ANSIBLE_HOST_KEY_CHECKING=no
|
||||
# $ ansible all -m ping
|
||||
# 9) Set your PROJECT variable below
|
||||
# 10) Run and time the tests and log output, take ~30 minutes to run
|
||||
# $ time stdbuf -oL python test/gce_tests.py 2>&1 | tee log
|
||||
#
|
||||
# Last update: gcutil-1.11.0 and v1beta16
|
||||
|
||||
# Set this to your test Project ID
|
||||
PROJECT="google.com:erjohnso"
|
||||
|
||||
# debugging
|
||||
DEBUG=False # lots of debugging output
|
||||
VERBOSE=True # on failure, display ansible command and expected/actual result
|
||||
|
||||
# location - note that some tests rely on the module's 'default'
|
||||
# region/zone, which should match the settings below.
|
||||
REGION="us-central1"
|
||||
ZONE="%s-a" % REGION
|
||||
|
||||
# Peeking is a way to trigger looking at a specified set of resources
|
||||
# before and/or after a test run. The 'test_cases' data structure below
|
||||
# has a few tests with 'peek_before' and 'peek_after'. When those keys
|
||||
# are set and PEEKING_ENABLED is True, then these steps will be executed
|
||||
# to aid in debugging tests. Normally, this is not needed.
|
||||
PEEKING_ENABLED=False
|
||||
|
||||
# disks
|
||||
DNAME="aaaaa-ansible-disk"
|
||||
DNAME2="aaaaa-ansible-disk2"
|
||||
DNAME6="aaaaa-ansible-inst6"
|
||||
DNAME7="aaaaa-ansible-inst7"
|
||||
USE_PD="true"
|
||||
KERNEL="https://www.googleapis.com/compute/v1beta16/projects/google/global/kernels/gce-no-conn-track-v20130813"
|
||||
|
||||
# instances
|
||||
INAME="aaaaa-ansible-inst"
|
||||
INAME2="aaaaa-ansible-inst2"
|
||||
INAME3="aaaaa-ansible-inst3"
|
||||
INAME4="aaaaa-ansible-inst4"
|
||||
INAME5="aaaaa-ansible-inst5"
|
||||
INAME6="aaaaa-ansible-inst6"
|
||||
INAME7="aaaaa-ansible-inst7"
|
||||
TYPE="n1-standard-1"
|
||||
IMAGE="https://www.googleapis.com/compute/v1beta16/projects/debian-cloud/global/images/debian-7-wheezy-v20131014"
|
||||
NETWORK="default"
|
||||
SCOPES="https://www.googleapis.com/auth/userinfo.email,https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/devstorage.full_control"
|
||||
|
||||
# networks / firewalls
|
||||
NETWK1="ansible-network1"
|
||||
NETWK2="ansible-network2"
|
||||
NETWK3="ansible-network3"
|
||||
CIDR1="10.240.16.0/24"
|
||||
CIDR2="10.240.32.0/24"
|
||||
CIDR3="10.240.64.0/24"
|
||||
GW1="10.240.16.1"
|
||||
GW2="10.240.32.1"
|
||||
FW1="ansible-fwrule1"
|
||||
FW2="ansible-fwrule2"
|
||||
FW3="ansible-fwrule3"
|
||||
FW4="ansible-fwrule4"
|
||||
|
||||
# load-balancer tests
|
||||
HC1="ansible-hc1"
|
||||
HC2="ansible-hc2"
|
||||
HC3="ansible-hc3"
|
||||
LB1="ansible-lb1"
|
||||
LB2="ansible-lb2"
|
||||
|
||||
from commands import getstatusoutput as run
|
||||
import sys
|
||||
|
||||
test_cases = [
|
||||
{'id': '01', 'desc': 'Detach / Delete disk tests',
|
||||
'setup': ['gcutil addinstance "%s" --wait_until_running --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --persistent_boot_disk=%s' % (INAME, ZONE, TYPE, NETWORK, SCOPES, IMAGE, USE_PD),
|
||||
'gcutil adddisk "%s" --size_gb=2 --zone=%s --wait_until_complete' % (DNAME, ZONE)],
|
||||
|
||||
'tests': [
|
||||
{'desc': 'DETACH_ONLY but disk not found [success]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % ("missing-disk", INAME, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "%s", "name": "missing-disk", "state": "absent", "zone": "%s"}' % (INAME, ZONE),
|
||||
},
|
||||
{'desc': 'DETACH_ONLY but instance not found [success]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % (DNAME, "missing-instance", ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "missing-instance", "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (DNAME, ZONE),
|
||||
},
|
||||
{'desc': 'DETACH_ONLY but neither disk nor instance exists [success]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % ("missing-disk", "missing-instance", ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "missing-instance", "name": "missing-disk", "state": "absent", "zone": "%s"}' % (ZONE),
|
||||
},
|
||||
{'desc': 'DETACH_ONLY but disk is not currently attached [success]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % (DNAME, INAME, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "detach_only": true, "detached_from_instance": "%s", "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (INAME, DNAME, ZONE),
|
||||
},
|
||||
{'desc': 'DETACH_ONLY disk is attached and should be detached [success]',
|
||||
'setup': ['gcutil attachdisk --disk="%s,mode=READ_ONLY" --zone=%s %s' % (DNAME, ZONE, INAME), 'sleep 10'],
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s instance_name=%s zone=%s detach_only=yes state=absent' % (DNAME, INAME, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": true, "detach_only": true, "detached_from_instance": "%s", "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (INAME, INAME, DNAME, ZONE),
|
||||
'teardown': ['gcutil detachdisk --zone=%s --device_name=%s %s' % (ZONE, DNAME, INAME)],
|
||||
},
|
||||
{'desc': 'DETACH_ONLY but not instance specified [FAIL]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s zone=%s detach_only=yes state=absent' % (DNAME, ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must specify an instance name when detaching a disk"}',
|
||||
},
|
||||
{'desc': 'DELETE but disk not found [success]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s zone=%s state=absent' % ("missing-disk", ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "name": "missing-disk", "state": "absent", "zone": "%s"}' % (ZONE),
|
||||
},
|
||||
{'desc': 'DELETE but disk is attached [FAIL]',
|
||||
'setup': ['gcutil attachdisk --disk="%s,mode=READ_ONLY" --zone=%s %s' % (DNAME, ZONE, INAME), 'sleep 10'],
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s zone=%s state=absent' % (DNAME, ZONE),
|
||||
'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"The disk resource 'projects/%s/zones/%s/disks/%s' is already being used by 'projects/%s/zones/%s/instances/%s'\"}" % (PROJECT, ZONE, DNAME, PROJECT, ZONE, INAME),
|
||||
'teardown': ['gcutil detachdisk --zone=%s --device_name=%s %s' % (ZONE, DNAME, INAME)],
|
||||
},
|
||||
{'desc': 'DELETE disk [success]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s zone=%s state=absent' % (DNAME, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "size_gb": 2, "state": "absent", "zone": "%s"}' % (DNAME, ZONE),
|
||||
},
|
||||
],
|
||||
'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE),
|
||||
'sleep 15',
|
||||
'gcutil deletedisk -f "%s" --zone=%s' % (INAME, ZONE),
|
||||
'sleep 10',
|
||||
'gcutil deletedisk -f "%s" --zone=%s' % (DNAME, ZONE),
|
||||
'sleep 10'],
|
||||
},
|
||||
|
||||
{'id': '02', 'desc': 'Create disk but do not attach (e.g. no instance_name param)',
|
||||
'setup': [],
|
||||
'tests': [
|
||||
{'desc': 'CREATE_NO_ATTACH "string" for size_gb [FAIL]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s size_gb="foo" zone=%s' % (DNAME, ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}',
|
||||
},
|
||||
{'desc': 'CREATE_NO_ATTACH negative size_gb [FAIL]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s size_gb=-2 zone=%s' % (DNAME, ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}',
|
||||
},
|
||||
{'desc': 'CREATE_NO_ATTACH size_gb exceeds quota [FAIL]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s size_gb=9999 zone=%s' % ("big-disk", ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Requested disk size exceeds quota"}',
|
||||
},
|
||||
{'desc': 'CREATE_NO_ATTACH create the disk [success]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s zone=%s' % (DNAME, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "size_gb": 10, "state": "present", "zone": "%s"}' % (DNAME, ZONE),
|
||||
},
|
||||
{'desc': 'CREATE_NO_ATTACH but disk already exists [success]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s zone=%s' % (DNAME, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "name": "%s", "size_gb": 10, "state": "present", "zone": "%s"}' % (DNAME, ZONE),
|
||||
},
|
||||
],
|
||||
'teardown': ['gcutil deletedisk -f "%s" --zone=%s' % (DNAME, ZONE),
|
||||
'sleep 10'],
|
||||
},
|
||||
|
||||
{'id': '03', 'desc': 'Create and attach disk',
|
||||
'setup': ['gcutil addinstance "%s" --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --persistent_boot_disk=%s' % (INAME2, ZONE, TYPE, NETWORK, SCOPES, IMAGE, USE_PD),
|
||||
'gcutil addinstance "%s" --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --persistent_boot_disk=%s' % (INAME, ZONE, "g1-small", NETWORK, SCOPES, IMAGE, USE_PD),
|
||||
'gcutil adddisk "%s" --size_gb=2 --zone=%s' % (DNAME, ZONE),
|
||||
'gcutil adddisk "%s" --size_gb=2 --zone=%s --wait_until_complete' % (DNAME2, ZONE),],
|
||||
'tests': [
|
||||
{'desc': 'CREATE_AND_ATTACH "string" for size_gb [FAIL]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s size_gb="foo" instance_name=%s zone=%s' % (DNAME, INAME, ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}',
|
||||
},
|
||||
{'desc': 'CREATE_AND_ATTACH negative size_gb [FAIL]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s size_gb=-2 instance_name=%s zone=%s' % (DNAME, INAME, ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Must supply a size_gb larger than 1 GB"}',
|
||||
},
|
||||
{'desc': 'CREATE_AND_ATTACH size_gb exceeds quota [FAIL]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s size_gb=9999 instance_name=%s zone=%s' % ("big-disk", INAME, ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Requested disk size exceeds quota"}',
|
||||
},
|
||||
{'desc': 'CREATE_AND_ATTACH missing instance [FAIL]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s instance_name=%s zone=%s' % (DNAME, "missing-instance", ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Instance %s does not exist in zone %s"}' % ("missing-instance", ZONE),
|
||||
},
|
||||
{'desc': 'CREATE_AND_ATTACH disk exists but not attached [success]',
|
||||
'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s instance_name=%s zone=%s' % (DNAME, INAME, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": true, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME, DNAME, ZONE),
|
||||
'peek_after': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
|
||||
},
|
||||
{'desc': 'CREATE_AND_ATTACH disk exists already attached [success]',
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s instance_name=%s zone=%s' % (DNAME, INAME, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": false, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME, DNAME, ZONE),
|
||||
},
|
||||
{'desc': 'CREATE_AND_ATTACH attached RO, attempt RO to 2nd inst [success]',
|
||||
'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s instance_name=%s zone=%s' % (DNAME, INAME2, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": true, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME2, DNAME, ZONE),
|
||||
'peek_after': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
|
||||
},
|
||||
{'desc': 'CREATE_AND_ATTACH attached RO, attach RW to self [FAILED no-op]',
|
||||
'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s instance_name=%s zone=%s mode=READ_WRITE' % (DNAME, INAME, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"attached_mode": "READ_ONLY", "attached_to_instance": "%s", "changed": false, "name": "%s", "size_gb": 2, "state": "present", "zone": "%s"}' % (INAME, DNAME, ZONE),
|
||||
},
|
||||
{'desc': 'CREATE_AND_ATTACH attached RW, attach RW to other [FAIL]',
|
||||
'setup': ['gcutil attachdisk --disk=%s,mode=READ_WRITE --zone=%s %s' % (DNAME2, ZONE, INAME), 'sleep 10'],
|
||||
'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s instance_name=%s zone=%s mode=READ_WRITE' % (DNAME2, INAME2, ZONE),
|
||||
'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[200], API error code[RESOURCE_IN_USE] and message: The disk resource 'projects/%s/zones/%s/disks/%s' is already being used in read-write mode\"}" % (PROJECT, ZONE, DNAME2),
|
||||
'peek_after': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
|
||||
},
|
||||
{'desc': 'CREATE_AND_ATTACH attach too many disks to inst [FAIL]',
|
||||
'setup': ['gcutil adddisk aa-disk-dummy --size_gb=2 --zone=%s' % (ZONE),
|
||||
'gcutil adddisk aa-disk-dummy2 --size_gb=2 --zone=%s --wait_until_complete' % (ZONE),
|
||||
'gcutil attachdisk --disk=aa-disk-dummy --zone=%s %s' % (ZONE, INAME),
|
||||
'sleep 5'],
|
||||
'peek_before': ["gcutil --format=csv listinstances --zone=%s --filter=\"name eq 'aaaa.*'\"" % (ZONE)],
|
||||
'm': 'gce_pd',
|
||||
'a': 'name=%s instance_name=%s zone=%s' % ("aa-disk-dummy2", INAME, ZONE),
|
||||
'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[200], API error code[LIMIT_EXCEEDED] and message: Exceeded limit 'maximum_persistent_disks' on resource 'projects/%s/zones/%s/instances/%s'. Limit: 4\"}" % (PROJECT, ZONE, INAME),
|
||||
'teardown': ['gcutil detachdisk --device_name=aa-disk-dummy --zone=%s %s' % (ZONE, INAME),
|
||||
'sleep 3',
|
||||
'gcutil deletedisk -f aa-disk-dummy --zone=%s' % (ZONE),
|
||||
'sleep 10',
|
||||
'gcutil deletedisk -f aa-disk-dummy2 --zone=%s' % (ZONE),
|
||||
'sleep 10'],
|
||||
},
|
||||
],
|
||||
'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME2, ZONE),
|
||||
'sleep 15',
|
||||
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE),
|
||||
'sleep 15',
|
||||
'gcutil deletedisk -f "%s" --zone=%s' % (INAME, ZONE),
|
||||
'sleep 10',
|
||||
'gcutil deletedisk -f "%s" --zone=%s' % (INAME2, ZONE),
|
||||
'sleep 10',
|
||||
'gcutil deletedisk -f "%s" --zone=%s' % (DNAME, ZONE),
|
||||
'sleep 10',
|
||||
'gcutil deletedisk -f "%s" --zone=%s' % (DNAME2, ZONE),
|
||||
'sleep 10'],
|
||||
},
|
||||
|
||||
{'id': '04', 'desc': 'Delete / destroy instances',
|
||||
'setup': ['gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME, ZONE, TYPE, IMAGE),
|
||||
'gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME2, ZONE, TYPE, IMAGE),
|
||||
'gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME3, ZONE, TYPE, IMAGE),
|
||||
'gcutil addinstance "%s" --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME4, ZONE, TYPE, IMAGE),
|
||||
'gcutil addinstance "%s" --wait_until_running --zone=%s --machine_type=%s --image="%s" --persistent_boot_disk=false' % (INAME5, ZONE, TYPE, IMAGE)],
|
||||
'tests': [
|
||||
{'desc': 'DELETE instance, bad zone param [FAIL]',
|
||||
'm': 'gce',
|
||||
'a': 'name=missing-inst zone=bogus state=absent',
|
||||
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "value of zone must be one of: us-central1-a,us-central1-b,us-central2-a,europe-west1-a,europe-west1-b, got: bogus"}',
|
||||
},
|
||||
{'desc': 'DELETE non-existent instance, no-op [success]',
|
||||
'm': 'gce',
|
||||
'a': 'name=missing-inst zone=%s state=absent' % (ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "name": "missing-inst", "state": "absent", "zone": "%s"}' % (ZONE),
|
||||
},
|
||||
{'desc': 'DELETE an existing named instance [success]',
|
||||
'm': 'gce',
|
||||
'a': 'name=%s zone=%s state=absent' % (INAME, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "state": "absent", "zone": "%s"}' % (INAME, ZONE),
|
||||
},
|
||||
{'desc': 'DELETE list of instances with a non-existent one [success]',
|
||||
'm': 'gce',
|
||||
'a': 'instance_names=%s,missing,%s zone=%s state=absent' % (INAME2,INAME3, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "instance_names": ["%s", "%s"], "state": "absent", "zone": "%s"}' % (INAME2, INAME3, ZONE),
|
||||
},
|
||||
{'desc': 'DELETE list of instances all pre-exist [success]',
|
||||
'm': 'gce',
|
||||
'a': 'instance_names=%s,%s zone=%s state=absent' % (INAME4,INAME5, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "instance_names": ["%s", "%s"], "state": "absent", "zone": "%s"}' % (INAME4, INAME5, ZONE),
|
||||
},
|
||||
],
|
||||
'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE),
|
||||
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME2, ZONE),
|
||||
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME3, ZONE),
|
||||
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME4, ZONE),
|
||||
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME5, ZONE),
|
||||
'sleep 10'],
|
||||
},
|
||||
|
||||
{'id': '05', 'desc': 'Create instances',
|
||||
'setup': ['gcutil adddisk --source_image=%s --zone=%s %s --wait_until_complete' % (IMAGE, ZONE, DNAME7),
|
||||
'gcutil addinstance boo --wait_until_running --zone=%s --machine_type=%s --network=%s --disk=%s,mode=READ_WRITE,boot --kernel=%s' % (ZONE,TYPE,NETWORK,DNAME7,KERNEL),
|
||||
],
|
||||
'tests': [
|
||||
{'desc': 'CREATE_INSTANCE invalid image arg [FAIL]',
|
||||
'm': 'gce',
|
||||
'a': 'name=foo image=foo',
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required create instance variable"}',
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE metadata a list [FAIL]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce',
|
||||
'a': 'name=%s zone=%s metadata=\'[\\"foo\\":\\"bar\\",\\"baz\\":1]\'' % (INAME,ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata syntax"}',
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE metadata not a dict [FAIL]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce',
|
||||
'a': 'name=%s zone=%s metadata=\\"foo\\":\\"bar\\",\\"baz\\":1' % (INAME,ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata syntax"}',
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE with metadata form1 [FAIL]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce',
|
||||
'a': 'name=%s zone=%s metadata=\'{"foo":"bar","baz":1}\'' % (INAME,ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata: malformed string"}',
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE with metadata form2 [FAIL]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce',
|
||||
'a': 'name=%s zone=%s metadata={\'foo\':\'bar\',\'baz\':1}' % (INAME,ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata: malformed string"}',
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE with metadata form3 [FAIL]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce',
|
||||
'a': 'name=%s zone=%s metadata="foo:bar" '% (INAME,ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata syntax"}',
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE with metadata form4 [FAIL]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce',
|
||||
'a': 'name=%s zone=%s metadata="{\'foo\':\'bar\'}"'% (INAME,ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "bad metadata: malformed string"}',
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE invalid image arg [FAIL]',
|
||||
'm': 'gce',
|
||||
'a': 'instance_names=foo,bar image=foo',
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required create instance variable"}',
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE single inst, using defaults [success]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce',
|
||||
'a': 'name=%s' % (INAME),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": "debian-7-wheezy-v20130816", "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.175.15", "public_ip": "173.255.120.190", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME, ZONE, INAME, ZONE),
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE the same instance again, no-op [success]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce',
|
||||
'a': 'name=%s' % (INAME),
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "instance_data": [{"image": "debian-7-wheezy-v20130816", "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.175.15", "public_ip": "173.255.120.190", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME, ZONE, INAME, ZONE),
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE instance with alt type [success]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce',
|
||||
'a': 'name=%s machine_type=n1-standard-2' % (INAME2),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": "debian-7-wheezy-v20130816", "machine_type": "n1-standard-2", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.192.227", "public_ip": "173.255.121.233", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME2, ZONE, INAME2, ZONE),
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE instance with root pd [success]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce',
|
||||
'a': 'name=%s persistent_boot_disk=yes' % (INAME3),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": null, "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.178.140", "public_ip": "173.255.121.176", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME3, ZONE, INAME3, ZONE),
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE instance with root pd, that already exists [success]',
|
||||
'setup': ['gcutil adddisk --source_image=%s --zone=%s %s --wait_until_complete' % (IMAGE, ZONE, DNAME6),],
|
||||
'strip_numbers': True,
|
||||
'm': 'gce',
|
||||
'a': 'name=%s zone=%s persistent_boot_disk=yes' % (INAME6, ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": null, "machine_type": "n1-standard-1", "metadata": {}, "name": "%s", "network": "default", "private_ip": "10.240.178.140", "public_ip": "173.255.121.176", "status": "RUNNING", "tags": [], "zone": "%s"}], "name": "%s", "state": "present", "zone": "%s"}' % (INAME6, ZONE, INAME6, ZONE),
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE instance with root pd attached to other inst [FAIL]',
|
||||
'm': 'gce',
|
||||
'a': 'name=%s zone=%s persistent_boot_disk=yes' % (INAME7, ZONE),
|
||||
'r': '127.0.0.1 | FAILED >> {"failed": true, "msg": "Unexpected error attempting to create instance %s, error: The disk resource \'projects/%s/zones/%s/disks/%s\' is already being used in read-write mode"}' % (INAME7,PROJECT,ZONE,DNAME7),
|
||||
},
|
||||
{'desc': 'CREATE_INSTANCE use *all* the options! [success]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce',
|
||||
'a': 'instance_names=%s,%s metadata=\'{\\"foo\\":\\"bar\\", \\"baz\\":1}\' tags=t1,t2,t3 zone=%s image=centos-6-v20130731 persistent_boot_disk=yes' % (INAME4,INAME5,ZONE),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "instance_data": [{"image": null, "machine_type": "n1-standard-1", "metadata": {"baz": "1", "foo": "bar"}, "name": "%s", "network": "default", "private_ip": "10.240.130.4", "public_ip": "173.255.121.97", "status": "RUNNING", "tags": ["t1", "t2", "t3"], "zone": "%s"}, {"image": null, "machine_type": "n1-standard-1", "metadata": {"baz": "1", "foo": "bar"}, "name": "%s", "network": "default", "private_ip": "10.240.207.226", "public_ip": "173.255.121.85", "status": "RUNNING", "tags": ["t1", "t2", "t3"], "zone": "%s"}], "instance_names": ["%s", "%s"], "state": "present", "zone": "%s"}' % (INAME4, ZONE, INAME5, ZONE, INAME4, INAME5, ZONE),
|
||||
},
|
||||
],
|
||||
'teardown': ['gcutil deleteinstance -f "%s" --zone=%s' % (INAME, ZONE),
|
||||
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME2, ZONE),
|
||||
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME3, ZONE),
|
||||
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME4, ZONE),
|
||||
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME5, ZONE),
|
||||
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME6, ZONE),
|
||||
'gcutil deleteinstance -f "%s" --zone=%s' % (INAME7, ZONE),
|
||||
'gcutil deleteinstance -f boo --zone=%s' % (ZONE),
|
||||
'sleep 10',
|
||||
'gcutil deletedisk -f "%s" --zone=%s' % (INAME3, ZONE),
|
||||
'gcutil deletedisk -f "%s" --zone=%s' % (INAME4, ZONE),
|
||||
'gcutil deletedisk -f "%s" --zone=%s' % (INAME5, ZONE),
|
||||
'gcutil deletedisk -f "%s" --zone=%s' % (INAME6, ZONE),
|
||||
'gcutil deletedisk -f "%s" --zone=%s' % (INAME7, ZONE),
|
||||
'sleep 10'],
|
||||
},
|
||||
|
||||
{'id': '06', 'desc': 'Delete / destroy networks and firewall rules',
|
||||
'setup': ['gcutil addnetwork --range="%s" --gateway="%s" %s' % (CIDR1, GW1, NETWK1),
|
||||
'gcutil addnetwork --range="%s" --gateway="%s" %s' % (CIDR2, GW2, NETWK2),
|
||||
'sleep 5',
|
||||
'gcutil addfirewall --allowed="tcp:80" --network=%s %s' % (NETWK1, FW1),
|
||||
'gcutil addfirewall --allowed="tcp:80" --network=%s %s' % (NETWK2, FW2),
|
||||
'sleep 5'],
|
||||
'tests': [
|
||||
{'desc': 'DELETE bogus named firewall [success]',
|
||||
'm': 'gce_net',
|
||||
'a': 'fwname=missing-fwrule state=absent',
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "fwname": "missing-fwrule", "state": "absent"}',
|
||||
},
|
||||
{'desc': 'DELETE bogus named network [success]',
|
||||
'm': 'gce_net',
|
||||
'a': 'name=missing-network state=absent',
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "name": "missing-network", "state": "absent"}',
|
||||
},
|
||||
{'desc': 'DELETE named firewall rule [success]',
|
||||
'm': 'gce_net',
|
||||
'a': 'fwname=%s state=absent' % (FW1),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "fwname": "%s", "state": "absent"}' % (FW1),
|
||||
'teardown': ['sleep 5'], # pause to give GCE time to delete fwrule
|
||||
},
|
||||
{'desc': 'DELETE unused named network [success]',
|
||||
'm': 'gce_net',
|
||||
'a': 'name=%s state=absent' % (NETWK1),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "state": "absent"}' % (NETWK1),
|
||||
},
|
||||
{'desc': 'DELETE named network *and* fwrule [success]',
|
||||
'm': 'gce_net',
|
||||
'a': 'name=%s fwname=%s state=absent' % (NETWK2, FW2),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "fwname": "%s", "name": "%s", "state": "absent"}' % (FW2, NETWK2),
|
||||
},
|
||||
],
|
||||
'teardown': ['gcutil deletenetwork -f %s' % (NETWK1),
|
||||
'gcutil deletenetwork -f %s' % (NETWK2),
|
||||
'sleep 5',
|
||||
'gcutil deletefirewall -f %s' % (FW1),
|
||||
'gcutil deletefirewall -f %s' % (FW2)],
|
||||
},
|
||||
|
||||
{'id': '07', 'desc': 'Create networks and firewall rules',
|
||||
'setup': ['gcutil addnetwork --range="%s" --gateway="%s" %s' % (CIDR1, GW1, NETWK1),
|
||||
'sleep 5',
|
||||
'gcutil addfirewall --allowed="tcp:80" --network=%s %s' % (NETWK1, FW1),
|
||||
'sleep 5'],
|
||||
'tests': [
|
||||
{'desc': 'CREATE network without specifying ipv4_range [FAIL]',
|
||||
'm': 'gce_net',
|
||||
'a': 'name=fail',
|
||||
'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Missing required 'ipv4_range' parameter\"}",
|
||||
},
|
||||
{'desc': 'CREATE network with specifying bad ipv4_range [FAIL]',
|
||||
'm': 'gce_net',
|
||||
'a': 'name=fail ipv4_range=bad_value',
|
||||
'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[400], API error code[None] and message: Invalid value for field 'resource.IPv4Range': 'bad_value'. Must be a CIDR address range that is contained in the RFC1918 private address blocks: [10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16]\"}",
|
||||
},
|
||||
{'desc': 'CREATE existing network, not changed [success]',
|
||||
'm': 'gce_net',
|
||||
'a': 'name=%s ipv4_range=%s' % (NETWK1, CIDR1),
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "ipv4_range": "%s", "name": "%s", "state": "present"}' % (CIDR1, NETWK1),
|
||||
},
|
||||
{'desc': 'CREATE new network, changed [success]',
|
||||
'm': 'gce_net',
|
||||
'a': 'name=%s ipv4_range=%s' % (NETWK2, CIDR2),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "ipv4_range": "10.240.32.0/24", "name": "%s", "state": "present"}' % (NETWK2),
|
||||
},
|
||||
{'desc': 'CREATE new fw rule missing params [FAIL]',
|
||||
'm': 'gce_net',
|
||||
'a': 'name=%s fwname=%s' % (NETWK1, FW1),
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required firewall rule parameter(s)"}',
|
||||
},
|
||||
{'desc': 'CREATE new fw rule bad params [FAIL]',
|
||||
'm': 'gce_net',
|
||||
'a': 'name=%s fwname=broken allowed=blah src_tags="one,two"' % (NETWK1),
|
||||
'r': "127.0.0.1 | FAILED >> {\"changed\": false, \"failed\": true, \"msg\": \"Unexpected response: HTTP return_code[400], API error code[None] and message: Invalid value for field 'resource.allowed[0].IPProtocol': 'blah'. Must be one of [\\\"tcp\\\", \\\"udp\\\", \\\"icmp\\\"] or an IP protocol number between 0 and 255\"}",
|
||||
},
|
||||
{'desc': 'CREATE existing fw rule [success]',
|
||||
'm': 'gce_net',
|
||||
'a': 'name=%s fwname=%s allowed="tcp:80" src_tags="one,two"' % (NETWK1, FW1),
|
||||
'r': '127.0.0.1 | success >> {"allowed": "tcp:80", "changed": false, "fwname": "%s", "ipv4_range": "%s", "name": "%s", "src_range": null, "src_tags": ["one", "two"], "state": "present"}' % (FW1, CIDR1, NETWK1),
|
||||
},
|
||||
{'desc': 'CREATE new fw rule [success]',
|
||||
'm': 'gce_net',
|
||||
'a': 'name=%s fwname=%s allowed="tcp:80" src_tags="one,two"' % (NETWK1, FW3),
|
||||
'r': '127.0.0.1 | success >> {"allowed": "tcp:80", "changed": true, "fwname": "%s", "ipv4_range": "%s", "name": "%s", "src_range": null, "src_tags": ["one", "two"], "state": "present"}' % (FW3, CIDR1, NETWK1),
|
||||
},
|
||||
{'desc': 'CREATE new network *and* fw rule [success]',
|
||||
'm': 'gce_net',
|
||||
'a': 'name=%s ipv4_range=%s fwname=%s allowed="tcp:80" src_tags="one,two"' % (NETWK3, CIDR3, FW4),
|
||||
'r': '127.0.0.1 | success >> {"allowed": "tcp:80", "changed": true, "fwname": "%s", "ipv4_range": "%s", "name": "%s", "src_range": null, "src_tags": ["one", "two"], "state": "present"}' % (FW4, CIDR3, NETWK3),
|
||||
},
|
||||
],
|
||||
'teardown': ['gcutil deletefirewall -f %s' % (FW1),
|
||||
'gcutil deletefirewall -f %s' % (FW2),
|
||||
'gcutil deletefirewall -f %s' % (FW3),
|
||||
'gcutil deletefirewall -f %s' % (FW4),
|
||||
'sleep 5',
|
||||
'gcutil deletenetwork -f %s' % (NETWK1),
|
||||
'gcutil deletenetwork -f %s' % (NETWK2),
|
||||
'gcutil deletenetwork -f %s' % (NETWK3),
|
||||
'sleep 5'],
|
||||
},
|
||||
|
||||
{'id': '08', 'desc': 'Create load-balancer resources',
|
||||
'setup': ['gcutil addinstance "%s" --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --nopersistent_boot_disk' % (INAME, ZONE, TYPE, NETWORK, SCOPES, IMAGE),
|
||||
'gcutil addinstance "%s" --wait_until_running --zone=%s --machine_type=%s --network=%s --service_account_scopes="%s" --image="%s" --nopersistent_boot_disk' % (INAME2, ZONE, TYPE, NETWORK, SCOPES, IMAGE),
|
||||
],
|
||||
'tests': [
|
||||
{'desc': 'Do nothing [FAIL]',
|
||||
'm': 'gce_lb',
|
||||
'a': 'httphealthcheck_port=7',
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Nothing to do, please specify a \\\"name\\\" or \\\"httphealthcheck_name\\\" parameter"}',
|
||||
},
|
||||
{'desc': 'CREATE_HC create basic http healthcheck [success]',
|
||||
'm': 'gce_lb',
|
||||
'a': 'httphealthcheck_name=%s' % (HC1),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_healthy_count": 2, "httphealthcheck_host": null, "httphealthcheck_interval": 5, "httphealthcheck_name": "%s", "httphealthcheck_path": "/", "httphealthcheck_port": 80, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "name": null, "state": "present"}' % (HC1),
|
||||
},
|
||||
{'desc': 'CREATE_HC (repeat, no-op) create basic http healthcheck [success]',
|
||||
'm': 'gce_lb',
|
||||
'a': 'httphealthcheck_name=%s' % (HC1),
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "httphealthcheck_healthy_count": 2, "httphealthcheck_host": null, "httphealthcheck_interval": 5, "httphealthcheck_name": "%s", "httphealthcheck_path": "/", "httphealthcheck_port": 80, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "name": null, "state": "present"}' % (HC1),
|
||||
},
|
||||
{'desc': 'CREATE_HC create custom http healthcheck [success]',
|
||||
'm': 'gce_lb',
|
||||
'a': 'httphealthcheck_name=%s httphealthcheck_port=1234 httphealthcheck_path="/whatup" httphealthcheck_host="foo" httphealthcheck_interval=300' % (HC2),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_healthy_count": 2, "httphealthcheck_host": "foo", "httphealthcheck_interval": 300, "httphealthcheck_name": "%s", "httphealthcheck_path": "/whatup", "httphealthcheck_port": 1234, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "name": null, "state": "present"}' % (HC2),
|
||||
},
|
||||
{'desc': 'CREATE_HC create (broken) custom http healthcheck [FAIL]',
|
||||
'm': 'gce_lb',
|
||||
'a': 'httphealthcheck_name=%s httphealthcheck_port="string" httphealthcheck_path=7' % (HC3),
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Unexpected response: HTTP return_code[400], API error code[None] and message: Invalid value for: Expected a signed integer, got \'string\' (class java.lang.String)"}',
|
||||
},
|
||||
{'desc': 'CREATE_LB create lb, missing region [FAIL]',
|
||||
'm': 'gce_lb',
|
||||
'a': 'name=%s' % (LB1),
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Missing required region name"}',
|
||||
},
|
||||
{'desc': 'CREATE_LB create lb, bogus region [FAIL]',
|
||||
'm': 'gce_lb',
|
||||
'a': 'name=%s region=bogus' % (LB1),
|
||||
'r': '127.0.0.1 | FAILED >> {"changed": false, "failed": true, "msg": "Unexpected response: HTTP return_code[404], API error code[None] and message: The resource \'projects/%s/regions/bogus\' was not found"}' % (PROJECT),
|
||||
},
|
||||
{'desc': 'CREATE_LB create lb, minimal params [success]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce_lb',
|
||||
'a': 'name=%s region=%s' % (LB1, REGION),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "external_ip": "173.255.123.245", "httphealthchecks": [], "members": [], "name": "%s", "port_range": "1-65535", "protocol": "tcp", "region": "%s", "state": "present"}' % (LB1, REGION),
|
||||
},
|
||||
{'desc': 'CREATE_LB create lb full params [success]',
|
||||
'strip_numbers': True,
|
||||
'm': 'gce_lb',
|
||||
'a': 'httphealthcheck_name=%s httphealthcheck_port=5055 httphealthcheck_path="/howami" name=%s port_range=8000-8888 region=%s members=%s/%s,%s/%s' % (HC3,LB2,REGION,ZONE,INAME,ZONE,INAME2),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "external_ip": "173.255.126.81", "httphealthcheck_healthy_count": 2, "httphealthcheck_host": null, "httphealthcheck_interval": 5, "httphealthcheck_name": "%s", "httphealthcheck_path": "/howami", "httphealthcheck_port": 5055, "httphealthcheck_timeout": 5, "httphealthcheck_unhealthy_count": 2, "httphealthchecks": ["%s"], "members": ["%s/%s", "%s/%s"], "name": "%s", "port_range": "8000-8888", "protocol": "tcp", "region": "%s", "state": "present"}' % (HC3,HC3,ZONE,INAME,ZONE,INAME2,LB2,REGION),
|
||||
},
|
||||
],
|
||||
'teardown': [
|
||||
'gcutil deleteinstance --zone=%s -f %s %s' % (ZONE, INAME, INAME2),
|
||||
'gcutil deleteforwardingrule --region=%s -f %s %s' % (REGION, LB1, LB2),
|
||||
'sleep 10',
|
||||
'gcutil deletetargetpool --region=%s -f %s-tp %s-tp' % (REGION, LB1, LB2),
|
||||
'sleep 10',
|
||||
'gcutil deletehttphealthcheck -f %s %s %s' % (HC1, HC2, HC3),
|
||||
],
|
||||
},
|
||||
|
||||
{'id': '09', 'desc': 'Destroy load-balancer resources',
|
||||
'setup': ['gcutil addhttphealthcheck %s' % (HC1),
|
||||
'sleep 5',
|
||||
'gcutil addhttphealthcheck %s' % (HC2),
|
||||
'sleep 5',
|
||||
'gcutil addtargetpool --health_checks=%s --region=%s %s-tp' % (HC1, REGION, LB1),
|
||||
'sleep 5',
|
||||
'gcutil addforwardingrule --target=%s-tp --region=%s %s' % (LB1, REGION, LB1),
|
||||
'sleep 5',
|
||||
'gcutil addtargetpool --region=%s %s-tp' % (REGION, LB2),
|
||||
'sleep 5',
|
||||
'gcutil addforwardingrule --target=%s-tp --region=%s %s' % (LB2, REGION, LB2),
|
||||
'sleep 5',
|
||||
],
|
||||
'tests': [
|
||||
{'desc': 'DELETE_LB: delete a non-existent LB [success]',
|
||||
'm': 'gce_lb',
|
||||
'a': 'name=missing state=absent',
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "name": "missing", "state": "absent"}',
|
||||
},
|
||||
{'desc': 'DELETE_LB: delete a non-existent LB+HC [success]',
|
||||
'm': 'gce_lb',
|
||||
'a': 'name=missing httphealthcheck_name=alsomissing state=absent',
|
||||
'r': '127.0.0.1 | success >> {"changed": false, "httphealthcheck_name": "alsomissing", "name": "missing", "state": "absent"}',
|
||||
},
|
||||
{'desc': 'DELETE_LB: destroy standalone healthcheck [success]',
|
||||
'm': 'gce_lb',
|
||||
'a': 'httphealthcheck_name=%s state=absent' % (HC2),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_name": "%s", "name": null, "state": "absent"}' % (HC2),
|
||||
},
|
||||
{'desc': 'DELETE_LB: destroy standalone balancer [success]',
|
||||
'm': 'gce_lb',
|
||||
'a': 'name=%s state=absent' % (LB2),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "name": "%s", "state": "absent"}' % (LB2),
|
||||
},
|
||||
{'desc': 'DELETE_LB: destroy LB+HC [success]',
|
||||
'm': 'gce_lb',
|
||||
'a': 'name=%s httphealthcheck_name=%s state=absent' % (LB1, HC1),
|
||||
'r': '127.0.0.1 | success >> {"changed": true, "httphealthcheck_name": "%s", "name": "%s", "state": "absent"}' % (HC1,LB1),
|
||||
},
|
||||
],
|
||||
'teardown': [
|
||||
'gcutil deleteforwardingrule --region=%s -f %s %s' % (REGION, LB1, LB2),
|
||||
'sleep 10',
|
||||
'gcutil deletetargetpool --region=%s -f %s-tp %s-tp' % (REGION, LB1, LB2),
|
||||
'sleep 10',
|
||||
'gcutil deletehttphealthcheck -f %s %s' % (HC1, HC2),
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
def main(tests_to_run=[]):
|
||||
for test in test_cases:
|
||||
if tests_to_run and test['id'] not in tests_to_run:
|
||||
continue
|
||||
print "=> starting/setup '%s:%s'"% (test['id'], test['desc'])
|
||||
if DEBUG: print "=debug>", test['setup']
|
||||
for c in test['setup']:
|
||||
(s,o) = run(c)
|
||||
test_i = 1
|
||||
for t in test['tests']:
|
||||
if DEBUG: print "=>debug>", test_i, t['desc']
|
||||
# run any test-specific setup commands
|
||||
if t.has_key('setup'):
|
||||
for setup in t['setup']:
|
||||
(status, output) = run(setup)
|
||||
|
||||
# run any 'peek_before' commands
|
||||
if t.has_key('peek_before') and PEEKING_ENABLED:
|
||||
for setup in t['peek_before']:
|
||||
(status, output) = run(setup)
|
||||
|
||||
# run the ansible test if 'a' exists, otherwise
|
||||
# an empty 'a' directive allows test to run
|
||||
# setup/teardown for a subsequent test.
|
||||
if t['a']:
|
||||
if DEBUG: print "=>debug>", t['m'], t['a']
|
||||
acmd = "ansible all -o -m %s -a \"%s\"" % (t['m'],t['a'])
|
||||
#acmd = "ANSIBLE_KEEP_REMOTE_FILES=1 ansible all -vvv -m %s -a \"%s\"" % (t['m'],t['a'])
|
||||
(s,o) = run(acmd)
|
||||
|
||||
# check expected output
|
||||
if DEBUG: print "=debug>", o.strip(), "!=", t['r']
|
||||
print "=> %s.%02d '%s':" % (test['id'], test_i, t['desc']),
|
||||
if t.has_key('strip_numbers'):
|
||||
# strip out all numbers so we don't trip over different
|
||||
# IP addresses
|
||||
is_good = (o.strip().translate(None, "0123456789") == t['r'].translate(None, "0123456789"))
|
||||
else:
|
||||
is_good = (o.strip() == t['r'])
|
||||
|
||||
if is_good:
|
||||
print "PASS"
|
||||
else:
|
||||
print "FAIL"
|
||||
if VERBOSE:
|
||||
print "=>", acmd
|
||||
print "=> Expected:", t['r']
|
||||
print "=> Got:", o.strip()
|
||||
|
||||
# run any 'peek_after' commands
|
||||
if t.has_key('peek_after') and PEEKING_ENABLED:
|
||||
for setup in t['peek_after']:
|
||||
(status, output) = run(setup)
|
||||
|
||||
# run any test-specific teardown commands
|
||||
if t.has_key('teardown'):
|
||||
for td in t['teardown']:
|
||||
(status, output) = run(td)
|
||||
test_i += 1
|
||||
|
||||
print "=> completing/teardown '%s:%s'" % (test['id'], test['desc'])
|
||||
if DEBUG: print "=debug>", test['teardown']
|
||||
for c in test['teardown']:
|
||||
(s,o) = run(c)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
tests_to_run = []
|
||||
if len(sys.argv) == 2:
|
||||
if sys.argv[1] in ["--help", "--list"]:
|
||||
print "usage: %s [id1,id2,...,idN]" % sys.argv[0]
|
||||
print " * An empty argument list will execute all tests"
|
||||
print " * Do not need to specify tests in numerical order"
|
||||
print " * List test categories with --list or --help"
|
||||
print ""
|
||||
for test in test_cases:
|
||||
print "\t%s:%s" % (test['id'], test['desc'])
|
||||
sys.exit(0)
|
||||
else:
|
||||
tests_to_run = sys.argv[1].split(',')
|
||||
main(tests_to_run)
|
@ -0,0 +1,141 @@
|
||||
# (c) 2014, Brian Coca, Josh Drake, et al
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import time
|
||||
import errno
|
||||
|
||||
try:
|
||||
import simplejson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
from ansible import constants as C
|
||||
from ansible import utils
|
||||
from ansible.cache.base import BaseCacheModule
|
||||
|
||||
class CacheModule(BaseCacheModule):
|
||||
"""
|
||||
A caching module backed by json files.
|
||||
"""
|
||||
def __init__(self, *args, **kwargs):
|
||||
|
||||
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
|
||||
self._cache = {}
|
||||
self._cache_dir = C.CACHE_PLUGIN_CONNECTION # expects a dir path
|
||||
if not self._cache_dir:
|
||||
utils.exit("error, fact_caching_connection is not set, cannot use fact cache")
|
||||
|
||||
if not os.path.exists(self._cache_dir):
|
||||
try:
|
||||
os.makedirs(self._cache_dir)
|
||||
except (OSError,IOError), e:
|
||||
utils.warning("error while trying to create cache dir %s : %s" % (self._cache_dir, str(e)))
|
||||
return None
|
||||
|
||||
def get(self, key):
|
||||
|
||||
if key in self._cache:
|
||||
return self._cache.get(key)
|
||||
|
||||
if self.has_expired(key):
|
||||
raise KeyError
|
||||
|
||||
cachefile = "%s/%s" % (self._cache_dir, key)
|
||||
try:
|
||||
f = open( cachefile, 'r')
|
||||
except (OSError,IOError), e:
|
||||
utils.warning("error while trying to write to %s : %s" % (cachefile, str(e)))
|
||||
else:
|
||||
value = json.load(f)
|
||||
self._cache[key] = value
|
||||
return value
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def set(self, key, value):
|
||||
|
||||
self._cache[key] = value
|
||||
|
||||
cachefile = "%s/%s" % (self._cache_dir, key)
|
||||
try:
|
||||
f = open(cachefile, 'w')
|
||||
except (OSError,IOError), e:
|
||||
utils.warning("error while trying to read %s : %s" % (cachefile, str(e)))
|
||||
else:
|
||||
f.write(utils.jsonify(value))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
def has_expired(self, key):
|
||||
|
||||
cachefile = "%s/%s" % (self._cache_dir, key)
|
||||
try:
|
||||
st = os.stat(cachefile)
|
||||
except (OSError,IOError), e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return False
|
||||
else:
|
||||
utils.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
|
||||
|
||||
if time.time() - st.st_mtime <= self._timeout:
|
||||
return False
|
||||
|
||||
if key in self._cache:
|
||||
del self._cache[key]
|
||||
return True
|
||||
|
||||
def keys(self):
|
||||
keys = []
|
||||
for k in os.listdir(self._cache_dir):
|
||||
if not (k.startswith('.') or self.has_expired(k)):
|
||||
keys.append(k)
|
||||
return keys
|
||||
|
||||
def contains(self, key):
|
||||
|
||||
if key in self._cache:
|
||||
return True
|
||||
|
||||
if self.has_expired(key):
|
||||
return False
|
||||
try:
|
||||
st = os.stat("%s/%s" % (self._cache_dir, key))
|
||||
return True
|
||||
except (OSError,IOError), e:
|
||||
if e.errno == errno.ENOENT:
|
||||
return False
|
||||
else:
|
||||
utils.warning("error while trying to stat %s : %s" % (cachefile, str(e)))
|
||||
|
||||
def delete(self, key):
|
||||
del self._cache[key]
|
||||
try:
|
||||
os.remove("%s/%s" % (self._cache_dir, key))
|
||||
except (OSError,IOError), e:
|
||||
pass #TODO: only pass on non existing?
|
||||
|
||||
def flush(self):
|
||||
self._cache = {}
|
||||
for key in self.keys():
|
||||
self.delete(key)
|
||||
|
||||
def copy(self):
|
||||
ret = dict()
|
||||
for key in self.keys():
|
||||
ret[key] = self.get(key)
|
||||
return ret
|
@ -0,0 +1,128 @@
|
||||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
class SQLParseError(Exception):
|
||||
pass
|
||||
|
||||
class UnclosedQuoteError(SQLParseError):
|
||||
pass
|
||||
|
||||
# maps a type of identifier to the maximum number of dot levels that are
|
||||
# allowed to specifiy that identifier. For example, a database column can be
|
||||
# specified by up to 4 levels: database.schema.table.column
|
||||
_PG_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, schema=2, table=3, column=4, role=1)
|
||||
_MYSQL_IDENTIFIER_TO_DOT_LEVEL = dict(database=1, table=2, column=3, role=1, vars=1)
|
||||
|
||||
def _find_end_quote(identifier, quote_char):
|
||||
accumulate = 0
|
||||
while True:
|
||||
try:
|
||||
quote = identifier.index(quote_char)
|
||||
except ValueError:
|
||||
raise UnclosedQuoteError
|
||||
accumulate = accumulate + quote
|
||||
try:
|
||||
next_char = identifier[quote+1]
|
||||
except IndexError:
|
||||
return accumulate
|
||||
if next_char == quote_char:
|
||||
try:
|
||||
identifier = identifier[quote+2:]
|
||||
accumulate = accumulate + 2
|
||||
except IndexError:
|
||||
raise UnclosedQuoteError
|
||||
else:
|
||||
return accumulate
|
||||
|
||||
|
||||
def _identifier_parse(identifier, quote_char):
|
||||
if not identifier:
|
||||
raise SQLParseError('Identifier name unspecified or unquoted trailing dot')
|
||||
|
||||
already_quoted = False
|
||||
if identifier.startswith(quote_char):
|
||||
already_quoted = True
|
||||
try:
|
||||
end_quote = _find_end_quote(identifier[1:], quote_char=quote_char) + 1
|
||||
except UnclosedQuoteError:
|
||||
already_quoted = False
|
||||
else:
|
||||
if end_quote < len(identifier) - 1:
|
||||
if identifier[end_quote+1] == '.':
|
||||
dot = end_quote + 1
|
||||
first_identifier = identifier[:dot]
|
||||
next_identifier = identifier[dot+1:]
|
||||
further_identifiers = _identifier_parse(next_identifier, quote_char)
|
||||
further_identifiers.insert(0, first_identifier)
|
||||
else:
|
||||
raise SQLParseError('User escaped identifiers must escape extra quotes')
|
||||
else:
|
||||
further_identifiers = [identifier]
|
||||
|
||||
if not already_quoted:
|
||||
try:
|
||||
dot = identifier.index('.')
|
||||
except ValueError:
|
||||
identifier = identifier.replace(quote_char, quote_char*2)
|
||||
identifier = ''.join((quote_char, identifier, quote_char))
|
||||
further_identifiers = [identifier]
|
||||
else:
|
||||
if dot == 0 or dot >= len(identifier) - 1:
|
||||
identifier = identifier.replace(quote_char, quote_char*2)
|
||||
identifier = ''.join((quote_char, identifier, quote_char))
|
||||
further_identifiers = [identifier]
|
||||
else:
|
||||
first_identifier = identifier[:dot]
|
||||
next_identifier = identifier[dot+1:]
|
||||
further_identifiers = _identifier_parse(next_identifier, quote_char)
|
||||
first_identifier = first_identifier.replace(quote_char, quote_char*2)
|
||||
first_identifier = ''.join((quote_char, first_identifier, quote_char))
|
||||
further_identifiers.insert(0, first_identifier)
|
||||
|
||||
return further_identifiers
|
||||
|
||||
|
||||
def pg_quote_identifier(identifier, id_type):
|
||||
identifier_fragments = _identifier_parse(identifier, quote_char='"')
|
||||
if len(identifier_fragments) > _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]:
|
||||
raise SQLParseError('PostgreSQL does not support %s with more than %i dots' % (id_type, _PG_IDENTIFIER_TO_DOT_LEVEL[id_type]))
|
||||
return '.'.join(identifier_fragments)
|
||||
|
||||
def mysql_quote_identifier(identifier, id_type):
|
||||
identifier_fragments = _identifier_parse(identifier, quote_char='`')
|
||||
if len(identifier_fragments) > _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]:
|
||||
raise SQLParseError('MySQL does not support %s with more than %i dots' % (id_type, _MYSQL_IDENTIFIER_TO_DOT_LEVEL[id_type]))
|
||||
|
||||
special_cased_fragments = []
|
||||
for fragment in identifier_fragments:
|
||||
if fragment == '`*`':
|
||||
special_cased_fragments.append('*')
|
||||
else:
|
||||
special_cased_fragments.append(fragment)
|
||||
|
||||
return '.'.join(special_cased_fragments)
|
@ -0,0 +1 @@
|
||||
Subproject commit 095f8681dbdfd2e9247446822e953287c9bca66c
|
@ -0,0 +1 @@
|
||||
Subproject commit d94d0ce70b5db5ecfafbc73bebc822c9e18734f3
|
@ -0,0 +1,66 @@
|
||||
# (c) 2015, Brian Coca <briancoca+dev@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
|
||||
import os
|
||||
from ansible import utils
|
||||
from ansible.runner.return_data import ReturnData
|
||||
|
||||
class ActionModule(object):
|
||||
|
||||
def __init__(self, runner):
|
||||
self.runner = runner
|
||||
|
||||
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
|
||||
|
||||
options = {}
|
||||
if complex_args:
|
||||
options.update(complex_args)
|
||||
options.update(utils.parse_kv(module_args))
|
||||
|
||||
src = options.get('src', None)
|
||||
dest = options.get('dest', None)
|
||||
remote_src = utils.boolean(options.get('remote_src', 'yes'))
|
||||
|
||||
if src is None or dest is None:
|
||||
result = dict(failed=True, msg="src and dest are required")
|
||||
return ReturnData(conn=conn, comm_ok=False, result=result)
|
||||
|
||||
if remote_src:
|
||||
return self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args)
|
||||
|
||||
# Source is local
|
||||
if '_original_file' in inject:
|
||||
src = utils.path_dwim_relative(inject['_original_file'], 'files', src, self.runner.basedir)
|
||||
else:
|
||||
src = utils.path_dwim(self.runner.basedir, src)
|
||||
|
||||
tmp_src = tmp + src
|
||||
conn.put_file(src, tmp_src)
|
||||
|
||||
if self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root':
|
||||
if not self.runner.noop_on_check(inject):
|
||||
self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp)
|
||||
|
||||
new_module_args = dict(
|
||||
src=tmp_src,
|
||||
)
|
||||
|
||||
if self.runner.noop_on_check(inject):
|
||||
new_module_args['CHECKMODE'] = True
|
||||
|
||||
module_args = utils.merge_module_args(module_args, new_module_args)
|
||||
|
||||
return self.runner._execute_module(conn, tmp, 'patch', module_args, inject=inject, complex_args=complex_args)
|
@ -0,0 +1,377 @@
|
||||
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
|
||||
from ansible import utils
|
||||
import ansible.constants as C
|
||||
import ansible.utils.template as template
|
||||
from ansible import errors
|
||||
from ansible.runner.return_data import ReturnData
|
||||
import base64
|
||||
import json
|
||||
import stat
|
||||
import tempfile
|
||||
import pipes
|
||||
|
||||
## fixes https://github.com/ansible/ansible/issues/3518
|
||||
# http://mypy.pythonblogs.com/12_mypy/archive/1253_workaround_for_python_bug_ascii_codec_cant_encode_character_uxa0_in_position_111_ordinal_not_in_range128.html
|
||||
import sys
|
||||
reload(sys)
|
||||
sys.setdefaultencoding("utf8")
|
||||
|
||||
|
||||
class ActionModule(object):
|
||||
|
||||
def __init__(self, runner):
|
||||
self.runner = runner
|
||||
|
||||
def run(self, conn, tmp_path, module_name, module_args, inject, complex_args=None, **kwargs):
|
||||
''' handler for file transfer operations '''
|
||||
|
||||
# load up options
|
||||
options = {}
|
||||
if complex_args:
|
||||
options.update(complex_args)
|
||||
options.update(utils.parse_kv(module_args))
|
||||
source = options.get('src', None)
|
||||
content = options.get('content', None)
|
||||
dest = options.get('dest', None)
|
||||
raw = utils.boolean(options.get('raw', 'no'))
|
||||
force = utils.boolean(options.get('force', 'yes'))
|
||||
|
||||
# content with newlines is going to be escaped to safely load in yaml
|
||||
# now we need to unescape it so that the newlines are evaluated properly
|
||||
# when writing the file to disk
|
||||
if content:
|
||||
if isinstance(content, unicode):
|
||||
try:
|
||||
content = content.decode('unicode-escape')
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
|
||||
if (source is None and content is None and not 'first_available_file' in inject) or dest is None:
|
||||
result=dict(failed=True, msg="src (or content) and dest are required")
|
||||
return ReturnData(conn=conn, result=result)
|
||||
elif (source is not None or 'first_available_file' in inject) and content is not None:
|
||||
result=dict(failed=True, msg="src and content are mutually exclusive")
|
||||
return ReturnData(conn=conn, result=result)
|
||||
|
||||
# Check if the source ends with a "/"
|
||||
source_trailing_slash = False
|
||||
if source:
|
||||
source_trailing_slash = source.endswith("/")
|
||||
|
||||
# Define content_tempfile in case we set it after finding content populated.
|
||||
content_tempfile = None
|
||||
|
||||
# If content is defined make a temp file and write the content into it.
|
||||
if content is not None:
|
||||
try:
|
||||
# If content comes to us as a dict it should be decoded json.
|
||||
# We need to encode it back into a string to write it out.
|
||||
if type(content) is dict:
|
||||
content_tempfile = self._create_content_tempfile(json.dumps(content))
|
||||
else:
|
||||
content_tempfile = self._create_content_tempfile(content)
|
||||
source = content_tempfile
|
||||
except Exception, err:
|
||||
result = dict(failed=True, msg="could not write content temp file: %s" % err)
|
||||
return ReturnData(conn=conn, result=result)
|
||||
# if we have first_available_file in our vars
|
||||
# look up the files and use the first one we find as src
|
||||
elif 'first_available_file' in inject:
|
||||
found = False
|
||||
for fn in inject.get('first_available_file'):
|
||||
fn_orig = fn
|
||||
fnt = template.template(self.runner.basedir, fn, inject)
|
||||
fnd = utils.path_dwim(self.runner.basedir, fnt)
|
||||
if not os.path.exists(fnd) and '_original_file' in inject:
|
||||
fnd = utils.path_dwim_relative(inject['_original_file'], 'files', fnt, self.runner.basedir, check=False)
|
||||
if os.path.exists(fnd):
|
||||
source = fnd
|
||||
found = True
|
||||
break
|
||||
if not found:
|
||||
results = dict(failed=True, msg="could not find src in first_available_file list")
|
||||
return ReturnData(conn=conn, result=results)
|
||||
else:
|
||||
source = template.template(self.runner.basedir, source, inject)
|
||||
if '_original_file' in inject:
|
||||
source = utils.path_dwim_relative(inject['_original_file'], 'files', source, self.runner.basedir)
|
||||
else:
|
||||
source = utils.path_dwim(self.runner.basedir, source)
|
||||
|
||||
# A list of source file tuples (full_path, relative_path) which will try to copy to the destination
|
||||
source_files = []
|
||||
|
||||
# If source is a directory populate our list else source is a file and translate it to a tuple.
|
||||
if os.path.isdir(source):
|
||||
# Get the amount of spaces to remove to get the relative path.
|
||||
if source_trailing_slash:
|
||||
sz = len(source) + 1
|
||||
else:
|
||||
sz = len(source.rsplit('/', 1)[0]) + 1
|
||||
|
||||
# Walk the directory and append the file tuples to source_files.
|
||||
for base_path, sub_folders, files in os.walk(source):
|
||||
for file in files:
|
||||
full_path = os.path.join(base_path, file)
|
||||
rel_path = full_path[sz:]
|
||||
source_files.append((full_path, rel_path))
|
||||
|
||||
# If it's recursive copy, destination is always a dir,
|
||||
# explicitly mark it so (note - copy module relies on this).
|
||||
if not conn.shell.path_has_trailing_slash(dest):
|
||||
dest = conn.shell.join_path(dest, '')
|
||||
else:
|
||||
source_files.append((source, os.path.basename(source)))
|
||||
|
||||
changed = False
|
||||
diffs = []
|
||||
module_result = {"changed": False}
|
||||
|
||||
# A register for if we executed a module.
|
||||
# Used to cut down on command calls when not recursive.
|
||||
module_executed = False
|
||||
|
||||
# Tell _execute_module to delete the file if there is one file.
|
||||
delete_remote_tmp = (len(source_files) == 1)
|
||||
|
||||
# If this is a recursive action create a tmp_path that we can share as the _exec_module create is too late.
|
||||
if not delete_remote_tmp:
|
||||
if "-tmp-" not in tmp_path:
|
||||
tmp_path = self.runner._make_tmp_path(conn)
|
||||
|
||||
# expand any user home dir specifier
|
||||
dest = self.runner._remote_expand_user(conn, dest, tmp_path)
|
||||
|
||||
for source_full, source_rel in source_files:
|
||||
# Generate a hash of the local file.
|
||||
local_checksum = utils.checksum(source_full)
|
||||
|
||||
# If local_checksum is not defined we can't find the file so we should fail out.
|
||||
if local_checksum is None:
|
||||
result = dict(failed=True, msg="could not find src=%s" % source_full)
|
||||
return ReturnData(conn=conn, result=result)
|
||||
|
||||
# This is kind of optimization - if user told us destination is
|
||||
# dir, do path manipulation right away, otherwise we still check
|
||||
# for dest being a dir via remote call below.
|
||||
if conn.shell.path_has_trailing_slash(dest):
|
||||
dest_file = conn.shell.join_path(dest, source_rel)
|
||||
else:
|
||||
dest_file = conn.shell.join_path(dest)
|
||||
|
||||
# Attempt to get the remote checksum
|
||||
remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
|
||||
|
||||
if remote_checksum == '3':
|
||||
# The remote_checksum was executed on a directory.
|
||||
if content is not None:
|
||||
# If source was defined as content remove the temporary file and fail out.
|
||||
self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||
result = dict(failed=True, msg="can not use content with a dir as dest")
|
||||
return ReturnData(conn=conn, result=result)
|
||||
else:
|
||||
# Append the relative source location to the destination and retry remote_checksum.
|
||||
dest_file = conn.shell.join_path(dest, source_rel)
|
||||
remote_checksum = self.runner._remote_checksum(conn, tmp_path, dest_file, inject)
|
||||
|
||||
if remote_checksum != '1' and not force:
|
||||
# remote_file does not exist so continue to next iteration.
|
||||
continue
|
||||
|
||||
if local_checksum != remote_checksum:
|
||||
# The checksums don't match and we will change or error out.
|
||||
changed = True
|
||||
|
||||
# Create a tmp_path if missing only if this is not recursive.
|
||||
# If this is recursive we already have a tmp_path.
|
||||
if delete_remote_tmp:
|
||||
if "-tmp-" not in tmp_path:
|
||||
tmp_path = self.runner._make_tmp_path(conn)
|
||||
|
||||
if self.runner.diff and not raw:
|
||||
diff = self._get_diff_data(conn, tmp_path, inject, dest_file, source_full)
|
||||
else:
|
||||
diff = {}
|
||||
|
||||
if self.runner.noop_on_check(inject):
|
||||
self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||
diffs.append(diff)
|
||||
changed = True
|
||||
module_result = dict(changed=True)
|
||||
continue
|
||||
|
||||
# Define a remote directory that we will copy the file to.
|
||||
tmp_src = tmp_path + 'source'
|
||||
|
||||
if not raw:
|
||||
conn.put_file(source_full, tmp_src)
|
||||
else:
|
||||
conn.put_file(source_full, dest_file)
|
||||
|
||||
# We have copied the file remotely and no longer require our content_tempfile
|
||||
self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||
|
||||
# fix file permissions when the copy is done as a different user
|
||||
if (self.runner.sudo and self.runner.sudo_user != 'root' or self.runner.su and self.runner.su_user != 'root') and not raw:
|
||||
self.runner._remote_chmod(conn, 'a+r', tmp_src, tmp_path)
|
||||
|
||||
if raw:
|
||||
# Continue to next iteration if raw is defined.
|
||||
continue
|
||||
|
||||
# Run the copy module
|
||||
|
||||
# src and dest here come after original and override them
|
||||
# we pass dest only to make sure it includes trailing slash in case of recursive copy
|
||||
new_module_args = dict(
|
||||
src=tmp_src,
|
||||
dest=dest,
|
||||
original_basename=source_rel
|
||||
)
|
||||
if self.runner.noop_on_check(inject):
|
||||
new_module_args['CHECKMODE'] = True
|
||||
if self.runner.no_log:
|
||||
new_module_args['NO_LOG'] = True
|
||||
|
||||
module_args_tmp = utils.merge_module_args(module_args, new_module_args)
|
||||
|
||||
module_return = self.runner._execute_module(conn, tmp_path, 'win_copy', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
|
||||
module_executed = True
|
||||
|
||||
else:
|
||||
# no need to transfer the file, already correct md5, but still need to call
|
||||
# the file module in case we want to change attributes
|
||||
self._remove_tempfile_if_content_defined(content, content_tempfile)
|
||||
|
||||
if raw:
|
||||
# Continue to next iteration if raw is defined.
|
||||
# self.runner._remove_tmp_path(conn, tmp_path)
|
||||
continue
|
||||
|
||||
tmp_src = tmp_path + source_rel
|
||||
|
||||
# Build temporary module_args.
|
||||
new_module_args = dict(
|
||||
src=tmp_src,
|
||||
dest=dest,
|
||||
original_basename=source_rel
|
||||
)
|
||||
if self.runner.noop_on_check(inject):
|
||||
new_module_args['CHECKMODE'] = True
|
||||
if self.runner.no_log:
|
||||
new_module_args['NO_LOG'] = True
|
||||
|
||||
module_args_tmp = utils.merge_module_args(module_args, new_module_args)
|
||||
|
||||
# Execute the file module.
|
||||
module_return = self.runner._execute_module(conn, tmp_path, 'win_file', module_args_tmp, inject=inject, complex_args=complex_args, delete_remote_tmp=delete_remote_tmp)
|
||||
module_executed = True
|
||||
|
||||
module_result = module_return.result
|
||||
if not module_result.get('checksum'):
|
||||
module_result['checksum'] = local_checksum
|
||||
if module_result.get('failed') == True:
|
||||
return module_return
|
||||
if module_result.get('changed') == True:
|
||||
changed = True
|
||||
|
||||
# Delete tmp_path if we were recursive or if we did not execute a module.
|
||||
if (not C.DEFAULT_KEEP_REMOTE_FILES and not delete_remote_tmp) \
|
||||
or (not C.DEFAULT_KEEP_REMOTE_FILES and delete_remote_tmp and not module_executed):
|
||||
self.runner._remove_tmp_path(conn, tmp_path)
|
||||
|
||||
# the file module returns the file path as 'path', but
|
||||
# the copy module uses 'dest', so add it if it's not there
|
||||
if 'path' in module_result and 'dest' not in module_result:
|
||||
module_result['dest'] = module_result['path']
|
||||
|
||||
# TODO: Support detailed status/diff for multiple files
|
||||
if len(source_files) == 1:
|
||||
result = module_result
|
||||
else:
|
||||
result = dict(dest=dest, src=source, changed=changed)
|
||||
if len(diffs) == 1:
|
||||
return ReturnData(conn=conn, result=result, diff=diffs[0])
|
||||
else:
|
||||
return ReturnData(conn=conn, result=result)
|
||||
|
||||
def _create_content_tempfile(self, content):
|
||||
''' Create a tempfile containing defined content '''
|
||||
fd, content_tempfile = tempfile.mkstemp()
|
||||
f = os.fdopen(fd, 'w')
|
||||
try:
|
||||
f.write(content)
|
||||
except Exception, err:
|
||||
os.remove(content_tempfile)
|
||||
raise Exception(err)
|
||||
finally:
|
||||
f.close()
|
||||
return content_tempfile
|
||||
|
||||
def _get_diff_data(self, conn, tmp, inject, destination, source):
|
||||
peek_result = self.runner._execute_module(conn, tmp, 'win_file', "path=%s diff_peek=1" % destination, inject=inject, persist_files=True)
|
||||
|
||||
if not peek_result.is_successful():
|
||||
return {}
|
||||
|
||||
diff = {}
|
||||
if peek_result.result['state'] == 'absent':
|
||||
diff['before'] = ''
|
||||
elif peek_result.result['appears_binary']:
|
||||
diff['dst_binary'] = 1
|
||||
elif peek_result.result['size'] > utils.MAX_FILE_SIZE_FOR_DIFF:
|
||||
diff['dst_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
|
||||
else:
|
||||
dest_result = self.runner._execute_module(conn, tmp, 'slurp', "path=%s" % destination, inject=inject, persist_files=True)
|
||||
if 'content' in dest_result.result:
|
||||
dest_contents = dest_result.result['content']
|
||||
if dest_result.result['encoding'] == 'base64':
|
||||
dest_contents = base64.b64decode(dest_contents)
|
||||
else:
|
||||
raise Exception("unknown encoding, failed: %s" % dest_result.result)
|
||||
diff['before_header'] = destination
|
||||
diff['before'] = dest_contents
|
||||
|
||||
src = open(source)
|
||||
src_contents = src.read(8192)
|
||||
st = os.stat(source)
|
||||
if "\x00" in src_contents:
|
||||
diff['src_binary'] = 1
|
||||
elif st[stat.ST_SIZE] > utils.MAX_FILE_SIZE_FOR_DIFF:
|
||||
diff['src_larger'] = utils.MAX_FILE_SIZE_FOR_DIFF
|
||||
else:
|
||||
src.seek(0)
|
||||
diff['after_header'] = source
|
||||
diff['after'] = src.read()
|
||||
|
||||
return diff
|
||||
|
||||
def _remove_tempfile_if_content_defined(self, content, content_tempfile):
|
||||
if content is not None:
|
||||
os.remove(content_tempfile)
|
||||
|
||||
|
||||
def _result_key_merge(self, options, results):
|
||||
# add keys to file module results to mimic copy
|
||||
if 'path' in results.result and 'dest' not in results.result:
|
||||
results.result['dest'] = results.result['path']
|
||||
del results.result['path']
|
||||
return results
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue