diff --git a/extras/utilities/accelerate b/extras/utilities/accelerate deleted file mode 100644 index bd62471316c..00000000000 --- a/extras/utilities/accelerate +++ /dev/null @@ -1,727 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2013, James Cammarata -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: accelerate -short_description: Enable accelerated mode on remote node -description: - - This modules launches an ephemeral I(accelerate) daemon on the remote node which - Ansible can use to communicate with nodes at high speed. - - The daemon listens on a configurable port for a configurable amount of time. - - Fireball mode is AES encrypted -version_added: "1.3" -options: - port: - description: - - TCP port for the socket connection - required: false - default: 5099 - aliases: [] - timeout: - description: - - The number of seconds the socket will wait for data. If none is received when the timeout value is reached, the connection will be closed. - required: false - default: 300 - aliases: [] - minutes: - description: - - The I(accelerate) listener daemon is started on nodes and will stay around for - this number of minutes before turning itself off. - required: false - default: 30 - ipv6: - description: - - The listener daemon on the remote host will bind to the ipv6 localhost socket - if this parameter is set to true. - required: false - default: false - multi_key: - description: - - When enabled, the daemon will open a local socket file which can be used by future daemon executions to - upload a new key to the already running daemon, so that multiple users can connect using different keys. - This access still requires an ssh connection as the uid for which the daemon is currently running. - required: false - default: no - version_added: "1.6" -notes: - - See the advanced playbooks chapter for more about using accelerated mode. -requirements: [ "python-keyczar" ] -author: James Cammarata -''' - -EXAMPLES = ''' -# To use accelerate mode, simply add "accelerate: true" to your play. The initial -# key exchange and starting up of the daemon will occur over SSH, but all commands and -# subsequent actions will be conducted over the raw socket connection using AES encryption - -- hosts: devservers - accelerate: true - tasks: - - command: /usr/bin/anything -''' - -import base64 -import errno -import getpass -import json -import os -import os.path -import pwd -import signal -import socket -import struct -import sys -import syslog -import tempfile -import time -import traceback - -import SocketServer - -from datetime import datetime -from threading import Thread, Lock - -# import module snippets -# we must import this here at the top so we can use get_module_path() -from ansible.module_utils.basic import * - -syslog.openlog('ansible-%s' % os.path.basename(__file__)) - -# the chunk size to read and send, assuming mtu 1500 and -# leaving room for base64 (+33%) encoding and header (100 bytes) -# 4 * (975/3) + 100 = 1400 -# which leaves room for the TCP/IP header -CHUNK_SIZE=10240 - -# FIXME: this all should be moved to module_common, as it's -# pretty much a copy from the callbacks/util code -DEBUG_LEVEL=0 -def log(msg, cap=0): - global DEBUG_LEVEL - if DEBUG_LEVEL >= cap: - syslog.syslog(syslog.LOG_NOTICE|syslog.LOG_DAEMON, msg) - -def v(msg): - log(msg, cap=1) - -def vv(msg): - log(msg, cap=2) - -def vvv(msg): - log(msg, cap=3) - -def vvvv(msg): - log(msg, cap=4) - - -HAS_KEYCZAR = False -try: - from keyczar.keys import AesKey - HAS_KEYCZAR = True -except ImportError: - pass - -SOCKET_FILE = os.path.join(get_module_path(), '.ansible-accelerate', ".local.socket") - -def get_pid_location(module): - """ - Try to find a pid directory in the common locations, falling - back to the user's home directory if no others exist - """ - for dir in ['/var/run', '/var/lib/run', '/run', os.path.expanduser("~/")]: - try: - if os.path.isdir(dir) and os.access(dir, os.R_OK|os.W_OK): - return os.path.join(dir, '.accelerate.pid') - except: - pass - module.fail_json(msg="couldn't find any valid directory to use for the accelerate pid file") - - -# NOTE: this shares a fair amount of code in common with async_wrapper, if async_wrapper were a new module we could move -# this into utils.module_common and probably should anyway - -def daemonize_self(module, password, port, minutes, pid_file): - # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012 - try: - pid = os.fork() - if pid > 0: - vvv("exiting pid %s" % pid) - # exit first parent - module.exit_json(msg="daemonized accelerate on port %s for %s minutes with pid %s" % (port, minutes, str(pid))) - except OSError, e: - log("fork #1 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - # decouple from parent environment - os.chdir("/") - os.setsid() - os.umask(022) - - # do second fork - try: - pid = os.fork() - if pid > 0: - log("daemon pid %s, writing %s" % (pid, pid_file)) - pid_file = open(pid_file, "w") - pid_file.write("%s" % pid) - pid_file.close() - vvv("pid file written") - sys.exit(0) - except OSError, e: - log("fork #2 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - dev_null = file('/dev/null','rw') - os.dup2(dev_null.fileno(), sys.stdin.fileno()) - os.dup2(dev_null.fileno(), sys.stdout.fileno()) - os.dup2(dev_null.fileno(), sys.stderr.fileno()) - log("daemonizing successful") - -class LocalSocketThread(Thread): - server = None - terminated = False - - def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None): - self.server = kwargs.get('server') - Thread.__init__(self, group, target, name, args, kwargs, Verbose) - - def run(self): - try: - if os.path.exists(SOCKET_FILE): - os.remove(SOCKET_FILE) - else: - dir = os.path.dirname(SOCKET_FILE) - if os.path.exists(dir): - if not os.path.isdir(dir): - log("The socket file path (%s) exists, but is not a directory. No local connections will be available" % dir) - return - else: - # make sure the directory is accessible only to this - # user, as socket files derive their permissions from - # the directory that contains them - os.chmod(dir, 0700) - elif not os.path.exists(dir): - os.makedirs(dir, 0700) - except OSError: - pass - self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self.s.bind(SOCKET_FILE) - self.s.listen(5) - while not self.terminated: - try: - conn, addr = self.s.accept() - vv("received local connection") - data = "" - while "\n" not in data: - data += conn.recv(2048) - try: - new_key = AesKey.Read(data.strip()) - found = False - for key in self.server.key_list: - try: - new_key.Decrypt(key.Encrypt("foo")) - found = True - break - except: - pass - if not found: - vv("adding new key to the key list") - self.server.key_list.append(new_key) - conn.sendall("OK\n") - else: - vv("key already exists in the key list, ignoring") - conn.sendall("EXISTS\n") - - # update the last event time so the server doesn't - # shutdown sooner than expected for new cliets - try: - self.server.last_event_lock.acquire() - self.server.last_event = datetime.now() - finally: - self.server.last_event_lock.release() - except Exception, e: - vv("key loaded locally was invalid, ignoring (%s)" % e) - conn.sendall("BADKEY\n") - finally: - try: - conn.close() - except: - pass - except: - pass - - def terminate(self): - self.terminated = True - self.s.shutdown(socket.SHUT_RDWR) - self.s.close() - -class ThreadWithReturnValue(Thread): - def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, Verbose=None): - Thread.__init__(self, group, target, name, args, kwargs, Verbose) - self._return = None - - def run(self): - if self._Thread__target is not None: - self._return = self._Thread__target(*self._Thread__args, - **self._Thread__kwargs) - - def join(self,timeout=None): - Thread.join(self, timeout=timeout) - return self._return - -class ThreadedTCPServer(SocketServer.ThreadingTCPServer): - key_list = [] - last_event = datetime.now() - last_event_lock = Lock() - def __init__(self, server_address, RequestHandlerClass, module, password, timeout, use_ipv6=False): - self.module = module - self.key_list.append(AesKey.Read(password)) - self.allow_reuse_address = True - self.timeout = timeout - - if use_ipv6: - self.address_family = socket.AF_INET6 - - if self.module.params.get('multi_key', False): - vv("starting thread to handle local connections for multiple keys") - self.local_thread = LocalSocketThread(kwargs=dict(server=self)) - self.local_thread.start() - - SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass) - - def shutdown(self): - self.local_thread.terminate() - self.running = False - SocketServer.ThreadingTCPServer.shutdown(self) - -class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler): - # the key to use for this connection - active_key = None - - def send_data(self, data): - try: - self.server.last_event_lock.acquire() - self.server.last_event = datetime.now() - finally: - self.server.last_event_lock.release() - - packed_len = struct.pack('!Q', len(data)) - return self.request.sendall(packed_len + data) - - def recv_data(self): - header_len = 8 # size of a packed unsigned long long - data = "" - vvvv("in recv_data(), waiting for the header") - while len(data) < header_len: - try: - d = self.request.recv(header_len - len(data)) - if not d: - vvv("received nothing, bailing out") - return None - data += d - except: - # probably got a connection reset - vvvv("exception received while waiting for recv(), returning None") - return None - vvvv("in recv_data(), got the header, unpacking") - data_len = struct.unpack('!Q',data[:header_len])[0] - data = data[header_len:] - vvvv("data received so far (expecting %d): %d" % (data_len,len(data))) - while len(data) < data_len: - try: - d = self.request.recv(data_len - len(data)) - if not d: - vvv("received nothing, bailing out") - return None - data += d - vvvv("data received so far (expecting %d): %d" % (data_len,len(data))) - except: - # probably got a connection reset - vvvv("exception received while waiting for recv(), returning None") - return None - vvvv("received all of the data, returning") - - try: - self.server.last_event_lock.acquire() - self.server.last_event = datetime.now() - finally: - self.server.last_event_lock.release() - - return data - - def handle(self): - try: - while True: - vvvv("waiting for data") - data = self.recv_data() - if not data: - vvvv("received nothing back from recv_data(), breaking out") - break - vvvv("got data, decrypting") - if not self.active_key: - for key in self.server.key_list: - try: - data = key.Decrypt(data) - self.active_key = key - break - except: - pass - else: - vv("bad decrypt, exiting the connection handler") - return - else: - try: - data = self.active_key.Decrypt(data) - except: - vv("bad decrypt, exiting the connection handler") - return - - vvvv("decryption done, loading json from the data") - data = json.loads(data) - - mode = data['mode'] - response = {} - last_pong = datetime.now() - if mode == 'command': - vvvv("received a command request, running it") - twrv = ThreadWithReturnValue(target=self.command, args=(data,)) - twrv.start() - response = None - while twrv.is_alive(): - if (datetime.now() - last_pong).seconds >= 15: - last_pong = datetime.now() - vvvv("command still running, sending keepalive packet") - data2 = json.dumps(dict(pong=True)) - data2 = self.active_key.Encrypt(data2) - self.send_data(data2) - time.sleep(0.1) - response = twrv._return - vvvv("thread is done, response from join was %s" % response) - elif mode == 'put': - vvvv("received a put request, putting it") - response = self.put(data) - elif mode == 'fetch': - vvvv("received a fetch request, getting it") - response = self.fetch(data) - elif mode == 'validate_user': - vvvv("received a request to validate the user id") - response = self.validate_user(data) - - vvvv("response result is %s" % str(response)) - json_response = json.dumps(response) - vvvv("dumped json is %s" % json_response) - data2 = self.active_key.Encrypt(json_response) - vvvv("sending the response back to the controller") - self.send_data(data2) - vvvv("done sending the response") - - if mode == 'validate_user' and response.get('rc') == 1: - vvvv("detected a uid mismatch, shutting down") - self.server.shutdown() - except: - tb = traceback.format_exc() - log("encountered an unhandled exception in the handle() function") - log("error was:\n%s" % tb) - if self.active_key: - data2 = json.dumps(dict(rc=1, failed=True, msg="unhandled error in the handle() function")) - data2 = self.active_key.Encrypt(data2) - self.send_data(data2) - - def validate_user(self, data): - if 'username' not in data: - return dict(failed=True, msg='No username specified') - - vvvv("validating we're running as %s" % data['username']) - - # get the current uid - c_uid = os.getuid() - try: - # the target uid - t_uid = pwd.getpwnam(data['username']).pw_uid - except: - vvvv("could not find user %s" % data['username']) - return dict(failed=True, msg='could not find user %s' % data['username']) - - # and return rc=0 for success, rc=1 for failure - if c_uid == t_uid: - return dict(rc=0) - else: - return dict(rc=1) - - def command(self, data): - if 'cmd' not in data: - return dict(failed=True, msg='internal error: cmd is required') - if 'tmp_path' not in data: - return dict(failed=True, msg='internal error: tmp_path is required') - - vvvv("executing: %s" % data['cmd']) - - use_unsafe_shell = False - executable = data.get('executable') - if executable: - use_unsafe_shell = True - - rc, stdout, stderr = self.server.module.run_command(data['cmd'], executable=executable, use_unsafe_shell=use_unsafe_shell, close_fds=True) - if stdout is None: - stdout = '' - if stderr is None: - stderr = '' - vvvv("got stdout: %s" % stdout) - vvvv("got stderr: %s" % stderr) - - return dict(rc=rc, stdout=stdout, stderr=stderr) - - def fetch(self, data): - if 'in_path' not in data: - return dict(failed=True, msg='internal error: in_path is required') - - try: - fd = file(data['in_path'], 'rb') - fstat = os.stat(data['in_path']) - vvv("FETCH file is %d bytes" % fstat.st_size) - while fd.tell() < fstat.st_size: - data = fd.read(CHUNK_SIZE) - last = False - if fd.tell() >= fstat.st_size: - last = True - data = dict(data=base64.b64encode(data), last=last) - data = json.dumps(data) - data = self.active_key.Encrypt(data) - - if self.send_data(data): - return dict(failed=True, stderr="failed to send data") - - response = self.recv_data() - if not response: - log("failed to get a response, aborting") - return dict(failed=True, stderr="Failed to get a response from %s" % self.host) - response = self.active_key.Decrypt(response) - response = json.loads(response) - - if response.get('failed',False): - log("got a failed response from the master") - return dict(failed=True, stderr="Master reported failure, aborting transfer") - except Exception, e: - fd.close() - tb = traceback.format_exc() - log("failed to fetch the file: %s" % tb) - return dict(failed=True, stderr="Could not fetch the file: %s" % str(e)) - - fd.close() - return dict() - - def put(self, data): - if 'data' not in data: - return dict(failed=True, msg='internal error: data is required') - if 'out_path' not in data: - return dict(failed=True, msg='internal error: out_path is required') - - final_path = None - if 'user' in data and data.get('user') != getpass.getuser(): - vvv("the target user doesn't match this user, we'll move the file into place via sudo") - tmp_path = os.path.expanduser('~/.ansible/tmp/') - if not os.path.exists(tmp_path): - try: - os.makedirs(tmp_path, 0700) - except: - return dict(failed=True, msg='could not create a temporary directory at %s' % tmp_path) - (fd,out_path) = tempfile.mkstemp(prefix='ansible.', dir=tmp_path) - out_fd = os.fdopen(fd, 'w', 0) - final_path = data['out_path'] - else: - out_path = data['out_path'] - out_fd = open(out_path, 'w') - - try: - bytes=0 - while True: - out = base64.b64decode(data['data']) - bytes += len(out) - out_fd.write(out) - response = json.dumps(dict()) - response = self.active_key.Encrypt(response) - self.send_data(response) - if data['last']: - break - data = self.recv_data() - if not data: - raise "" - data = self.active_key.Decrypt(data) - data = json.loads(data) - except: - out_fd.close() - tb = traceback.format_exc() - log("failed to put the file: %s" % tb) - return dict(failed=True, stdout="Could not write the file") - - vvvv("wrote %d bytes" % bytes) - out_fd.close() - - if final_path: - vvv("moving %s to %s" % (out_path, final_path)) - self.server.module.atomic_move(out_path, final_path) - return dict() - -def daemonize(module, password, port, timeout, minutes, use_ipv6, pid_file): - try: - daemonize_self(module, password, port, minutes, pid_file) - - def timer_handler(signum, _): - try: - server.last_event_lock.acquire() - td = datetime.now() - server.last_event - # older python timedelta objects don't have total_seconds(), - # so we use the formula from the docs to calculate it - total_seconds = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6 - if total_seconds >= minutes * 60: - log("server has been idle longer than the timeout, shutting down") - server.running = False - server.shutdown() - else: - # reschedule the check - vvvv("daemon idle for %d seconds (timeout=%d)" % (total_seconds,minutes*60)) - signal.alarm(30) - except: - pass - finally: - server.last_event_lock.release() - - signal.signal(signal.SIGALRM, timer_handler) - signal.alarm(30) - - tries = 5 - while tries > 0: - try: - if use_ipv6: - address = ("::", port) - else: - address = ("0.0.0.0", port) - server = ThreadedTCPServer(address, ThreadedTCPRequestHandler, module, password, timeout, use_ipv6=use_ipv6) - server.allow_reuse_address = True - break - except Exception, e: - vv("Failed to create the TCP server (tries left = %d) (error: %s) " % (tries,e)) - tries -= 1 - time.sleep(0.2) - - if tries == 0: - vv("Maximum number of attempts to create the TCP server reached, bailing out") - raise Exception("max # of attempts to serve reached") - - # run the server in a separate thread to make signal handling work - server_thread = Thread(target=server.serve_forever, kwargs=dict(poll_interval=0.1)) - server_thread.start() - server.running = True - - v("serving!") - while server.running: - time.sleep(1) - - # wait for the thread to exit fully - server_thread.join() - - v("server thread terminated, exiting!") - sys.exit(0) - except Exception, e: - tb = traceback.format_exc() - log("exception caught, exiting accelerated mode: %s\n%s" % (e, tb)) - sys.exit(0) - -def main(): - global DEBUG_LEVEL - module = AnsibleModule( - argument_spec = dict( - port=dict(required=False, default=5099), - ipv6=dict(required=False, default=False, type='bool'), - multi_key=dict(required=False, default=False, type='bool'), - timeout=dict(required=False, default=300), - password=dict(required=True), - minutes=dict(required=False, default=30), - debug=dict(required=False, default=0, type='int') - ), - supports_check_mode=True - ) - - password = base64.b64decode(module.params['password']) - port = int(module.params['port']) - timeout = int(module.params['timeout']) - minutes = int(module.params['minutes']) - debug = int(module.params['debug']) - ipv6 = module.params['ipv6'] - multi_key = module.params['multi_key'] - - if not HAS_KEYCZAR: - module.fail_json(msg="keyczar is not installed (on the remote side)") - - DEBUG_LEVEL=debug - pid_file = get_pid_location(module) - - daemon_pid = None - daemon_running = False - if os.path.exists(pid_file): - try: - daemon_pid = int(open(pid_file).read()) - try: - # sending signal 0 doesn't do anything to the - # process, other than tell the calling program - # whether other signals can be sent - os.kill(daemon_pid, 0) - except OSError, e: - if e.errno == errno.EPERM: - # no permissions means the pid is probably - # running, but as a different user, so fail - module.fail_json(msg="the accelerate daemon appears to be running as a different user that this user cannot access (pid=%d)" % daemon_pid) - else: - daemon_running = True - except ValueError: - # invalid pid file, unlink it - otherwise we don't care - try: - os.unlink(pid_file) - except: - pass - - if daemon_running and multi_key: - # try to connect to the file socket for the daemon if it exists - s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - try: - s.connect(SOCKET_FILE) - s.sendall(password + '\n') - data = "" - while '\n' not in data: - data += s.recv(2048) - res = data.strip() - except: - module.fail_json(msg="failed to connect to the local socket file") - finally: - try: - s.close() - except: - pass - - if res in ("OK", "EXISTS"): - module.exit_json(msg="transferred new key to the existing daemon") - else: - module.fail_json(msg="could not transfer new key: %s" % data.strip()) - else: - # try to start up the daemon - daemonize(module, password, port, timeout, minutes, ipv6, pid_file) - -main() diff --git a/extras/utilities/assert b/extras/utilities/assert deleted file mode 100644 index f5963d60cd7..00000000000 --- a/extras/utilities/assert +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: assert -short_description: Fail with custom message -description: - - This module asserts that a given expression is true and can be a simpler alternative to the 'fail' module in some cases. -version_added: "1.5" -options: - that: - description: - - "A string expression of the same form that can be passed to the 'when' statement" - - "Alternatively, a list of string expressions" - required: true -author: Michael DeHaan -''' - -EXAMPLES = ''' -- assert: { that: "ansible_os_family != 'RedHat'" } - -- assert: - that: - - "'foo' in some_command_result.stdout" - - "number_of_the_counting == 3" -''' diff --git a/extras/utilities/debug b/extras/utilities/debug deleted file mode 100644 index 2df68ca0830..00000000000 --- a/extras/utilities/debug +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: debug -short_description: Print statements during execution -description: - - This module prints statements during execution and can be useful - for debugging variables or expressions without necessarily halting - the playbook. Useful for debugging together with the 'when:' directive. - -version_added: "0.8" -options: - msg: - description: - - The customized message that is printed. If omitted, prints a generic - message. - required: false - default: "Hello world!" - var: - description: - - A variable name to debug. Mutually exclusive with the 'msg' option. -author: Dag Wieers, Michael DeHaan -''' - -EXAMPLES = ''' -# Example that prints the loopback address and gateway for each host -- debug: msg="System {{ inventory_hostname }} has uuid {{ ansible_product_uuid }}" - -- debug: msg="System {{ inventory_hostname }} has gateway {{ ansible_default_ipv4.gateway }}" - when: ansible_default_ipv4.gateway is defined - -- shell: /usr/bin/uptime - register: result - -- debug: var=result - -- name: Display all variables/facts known for a host - debug: var=hostvars[inventory_hostname] -''' diff --git a/extras/utilities/fail b/extras/utilities/fail deleted file mode 100644 index 23f5b83668c..00000000000 --- a/extras/utilities/fail +++ /dev/null @@ -1,44 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2012 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: fail -short_description: Fail with custom message -description: - - This module fails the progress with a custom message. It can be - useful for bailing out when a certain condition is met using C(when). -version_added: "0.8" -options: - msg: - description: - - The customized message used for failing execution. If omitted, - fail will simple bail out with a generic message. - required: false - default: "'Failed as requested from task'" - -author: Dag Wieers -''' - -EXAMPLES = ''' -# Example playbook using fail and when together -- fail: msg="The system may not be provisioned according to the CMDB status." - when: cmdb_status != "to-be-staged" -''' diff --git a/extras/utilities/fireball b/extras/utilities/fireball deleted file mode 100644 index 43760969a89..00000000000 --- a/extras/utilities/fireball +++ /dev/null @@ -1,280 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Michael DeHaan -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -module: fireball -short_description: Enable fireball mode on remote node -description: - - This modules launches an ephemeral I(fireball) ZeroMQ message bus daemon on the remote node which - Ansible can use to communicate with nodes at high speed. - - The daemon listens on a configurable port for a configurable amount of time. - - Starting a new fireball as a given user terminates any existing user fireballs. - - Fireball mode is AES encrypted -version_added: "0.9" -options: - port: - description: - - TCP port for ZeroMQ - required: false - default: 5099 - aliases: [] - minutes: - description: - - The I(fireball) listener daemon is started on nodes and will stay around for - this number of minutes before turning itself off. - required: false - default: 30 -notes: - - See the advanced playbooks chapter for more about using fireball mode. -requirements: [ "zmq", "keyczar" ] -author: Michael DeHaan -''' - -EXAMPLES = ''' -# This example playbook has two plays: the first launches 'fireball' mode on all hosts via SSH, and -# the second actually starts using it for subsequent management over the fireball connection - -- hosts: devservers - gather_facts: false - connection: ssh - sudo: yes - tasks: - - action: fireball - -- hosts: devservers - connection: fireball - tasks: - - command: /usr/bin/anything -''' - -import os -import sys -import shutil -import time -import base64 -import syslog -import signal -import time -import signal -import traceback - -syslog.openlog('ansible-%s' % os.path.basename(__file__)) -PIDFILE = os.path.expanduser("~/.fireball.pid") - -def log(msg): - syslog.syslog(syslog.LOG_NOTICE, msg) - -if os.path.exists(PIDFILE): - try: - data = int(open(PIDFILE).read()) - try: - os.kill(data, signal.SIGKILL) - except OSError: - pass - except ValueError: - pass - os.unlink(PIDFILE) - -HAS_ZMQ = False -try: - import zmq - HAS_ZMQ = True -except ImportError: - pass - -HAS_KEYCZAR = False -try: - from keyczar.keys import AesKey - HAS_KEYCZAR = True -except ImportError: - pass - -# NOTE: this shares a fair amount of code in common with async_wrapper, if async_wrapper were a new module we could move -# this into utils.module_common and probably should anyway - -def daemonize_self(module, password, port, minutes): - # daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012 - try: - pid = os.fork() - if pid > 0: - log("exiting pid %s" % pid) - # exit first parent - module.exit_json(msg="daemonized fireball on port %s for %s minutes" % (port, minutes)) - except OSError, e: - log("fork #1 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - # decouple from parent environment - os.chdir("/") - os.setsid() - os.umask(022) - - # do second fork - try: - pid = os.fork() - if pid > 0: - log("daemon pid %s, writing %s" % (pid, PIDFILE)) - pid_file = open(PIDFILE, "w") - pid_file.write("%s" % pid) - pid_file.close() - log("pidfile written") - sys.exit(0) - except OSError, e: - log("fork #2 failed: %d (%s)" % (e.errno, e.strerror)) - sys.exit(1) - - dev_null = file('/dev/null','rw') - os.dup2(dev_null.fileno(), sys.stdin.fileno()) - os.dup2(dev_null.fileno(), sys.stdout.fileno()) - os.dup2(dev_null.fileno(), sys.stderr.fileno()) - log("daemonizing successful (%s,%s)" % (password, port)) - -def command(module, data): - if 'cmd' not in data: - return dict(failed=True, msg='internal error: cmd is required') - if 'tmp_path' not in data: - return dict(failed=True, msg='internal error: tmp_path is required') - if 'executable' not in data: - return dict(failed=True, msg='internal error: executable is required') - - log("executing: %s" % data['cmd']) - rc, stdout, stderr = module.run_command(data['cmd'], executable=data['executable'], close_fds=True) - if stdout is None: - stdout = '' - if stderr is None: - stderr = '' - log("got stdout: %s" % stdout) - - return dict(rc=rc, stdout=stdout, stderr=stderr) - -def fetch(data): - if 'in_path' not in data: - return dict(failed=True, msg='internal error: in_path is required') - - # FIXME: should probably support chunked file transfer for binary files - # at some point. For now, just base64 encodes the file - # so don't use it to move ISOs, use rsync. - - fh = open(data['in_path']) - data = base64.b64encode(fh.read()) - return dict(data=data) - -def put(data): - - if 'data' not in data: - return dict(failed=True, msg='internal error: data is required') - if 'out_path' not in data: - return dict(failed=True, msg='internal error: out_path is required') - - # FIXME: should probably support chunked file transfer for binary files - # at some point. For now, just base64 encodes the file - # so don't use it to move ISOs, use rsync. - - fh = open(data['out_path'], 'w') - fh.write(base64.b64decode(data['data'])) - fh.close() - - return dict() - -def serve(module, password, port, minutes): - - - log("serving") - context = zmq.Context() - socket = context.socket(zmq.REP) - addr = "tcp://*:%s" % port - log("zmq serving on %s" % addr) - socket.bind(addr) - - # password isn't so much a password but a serialized AesKey object that we xferred over SSH - # password as a variable in ansible is never logged though, so it serves well - - key = AesKey.Read(password) - - while True: - - data = socket.recv() - - try: - data = key.Decrypt(data) - except: - continue - - data = json.loads(data) - - mode = data['mode'] - response = {} - - if mode == 'command': - response = command(module, data) - elif mode == 'put': - response = put(data) - elif mode == 'fetch': - response = fetch(data) - - data2 = json.dumps(response) - data2 = key.Encrypt(data2) - socket.send(data2) - -def daemonize(module, password, port, minutes): - - try: - daemonize_self(module, password, port, minutes) - - def catcher(signum, _): - module.exit_json(msg='timer expired') - - signal.signal(signal.SIGALRM, catcher) - signal.setitimer(signal.ITIMER_REAL, 60 * minutes) - - - serve(module, password, port, minutes) - except Exception, e: - tb = traceback.format_exc() - log("exception caught, exiting fireball mode: %s\n%s" % (e, tb)) - sys.exit(0) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - port=dict(required=False, default=5099), - password=dict(required=True), - minutes=dict(required=False, default=30), - ), - supports_check_mode=True - ) - - password = base64.b64decode(module.params['password']) - port = module.params['port'] - minutes = int(module.params['minutes']) - - if not HAS_ZMQ: - module.fail_json(msg="zmq is not installed") - if not HAS_KEYCZAR: - module.fail_json(msg="keyczar is not installed") - - daemonize(module, password, port, minutes) - - -# import module snippets -from ansible.module_utils.basic import * -main() diff --git a/extras/utilities/include_vars b/extras/utilities/include_vars deleted file mode 100644 index 4c7c39d9035..00000000000 --- a/extras/utilities/include_vars +++ /dev/null @@ -1,39 +0,0 @@ -# -*- mode: python -*- - -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -author: Benno Joy -module: include_vars -short_description: Load variables from files, dynamically within a task. -description: - - Loads variables from a YAML file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from. -options: - free-form: - description: - - The file name from which variables should be loaded, if called from a role it will look for - the file in vars/ subdirectory of the role, otherwise the path would be relative to playbook. An absolute path can also be provided. - required: true -version_added: "1.4" -''' - -EXAMPLES = """ -# Conditionally decide to load in variables when x is 0, otherwise do not. -- include_vars: contingency_plan.yml - when: x == 0 - -# Load a variable file based on the OS type, or a default if not found. -- include_vars: "{{ item }}" - with_first_found: - - "{{ ansible_distribution }}.yml" - - "{{ ansible_os_family }}.yml" - - "default.yml" - -""" diff --git a/extras/utilities/pause b/extras/utilities/pause deleted file mode 100644 index 6e8a83afe61..00000000000 --- a/extras/utilities/pause +++ /dev/null @@ -1,40 +0,0 @@ -# -*- mode: python -*- - -DOCUMENTATION = ''' ---- -module: pause -short_description: Pause playbook execution -description: - - Pauses playbook execution for a set amount of time, or until a prompt is acknowledged. All parameters are optional. The default behavior is to pause with a prompt. - - "You can use C(ctrl+c) if you wish to advance a pause earlier than it is set to expire or if you need to abort a playbook run entirely. To continue early: press C(ctrl+c) and then C(c). To abort a playbook: press C(ctrl+c) and then C(a)." - - "The pause module integrates into async/parallelized playbooks without any special considerations (see also: Rolling Updates). When using pauses with the C(serial) playbook parameter (as in rolling updates) you are only prompted once for the current group of hosts." -version_added: "0.8" -options: - minutes: - description: - - Number of minutes to pause for. - required: false - default: null - seconds: - description: - - Number of seconds to pause for. - required: false - default: null - prompt: - description: - - Optional text to use for the prompt message. - required: false - default: null -author: Tim Bielawa -''' - -EXAMPLES = ''' -# Pause for 5 minutes to build app cache. -- pause: minutes=5 - -# Pause until you can verify updates to an application were successful. -- pause: - -# A helpful reminder of what to look out for post-update. -- pause: prompt="Make sure org.foo.FooOverload exception is not present" -''' diff --git a/extras/utilities/set_fact b/extras/utilities/set_fact deleted file mode 100644 index ea67cc43a3f..00000000000 --- a/extras/utilities/set_fact +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# Copyright 2013 Dag Wieers -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -DOCUMENTATION = ''' ---- -author: Dag Wieers -module: set_fact -short_description: Set host facts from a task -description: - - This module allows setting new variables. Variables are set on a host-by-host basis - just like facts discovered by the setup module. - - These variables will survive between plays. -options: - key_value: - description: - - The C(set_fact) module takes key=value pairs as variables to set - in the playbook scope. Or alternatively, accepts complex arguments - using the C(args:) statement. - required: true - default: null -version_added: "1.2" -''' - -EXAMPLES = ''' -# Example setting host facts using key=value pairs -- set_fact: one_fact="something" other_fact="{{ local_var * 2 }}" - -# Example setting host facts using complex arguments -- set_fact: - one_fact: something - other_fact: "{{ local_var * 2 }}" - -# As of 1.8, Ansible will convert boolean strings ('true', 'false', 'yes', 'no') -# to proper boolean values when using the key=value syntax, however it is still -# recommended that booleans be set using the complex argument style: -- set_fact: - one_fact: true - other_fact: false - -''' diff --git a/extras/utilities/wait_for b/extras/utilities/wait_for deleted file mode 100644 index 5e02712ddff..00000000000 --- a/extras/utilities/wait_for +++ /dev/null @@ -1,462 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -# (c) 2012, Jeroen Hoekx -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -import socket -import datetime -import time -import sys -import re -import binascii - -HAS_PSUTIL = False -try: - import psutil - HAS_PSUTIL = True - # just because we can import it on Linux doesn't mean we will use it -except ImportError: - pass - -DOCUMENTATION = ''' ---- -module: wait_for -short_description: Waits for a condition before continuing. -description: - - Waiting for a port to become available is useful for when services - are not immediately available after their init scripts return - - which is true of certain Java application servers. It is also - useful when starting guests with the M(virt) module and - needing to pause until they are ready. - - This module can also be used to wait for a regex match a string to be present in a file. - - In 1.6 and later, this module can - also be used to wait for a file to be available or absent on the - filesystem. - - In 1.8 and later, this module can also be used to wait for active - connections to be closed before continuing, useful if a node - is being rotated out of a load balancer pool. -version_added: "0.7" -options: - host: - description: - - hostname or IP address to wait for - required: false - default: "127.0.0.1" - aliases: [] - timeout: - description: - - maximum number of seconds to wait for - required: false - default: 300 - delay: - description: - - number of seconds to wait before starting to poll - required: false - default: 0 - port: - description: - - port number to poll - required: false - state: - description: - - either C(present), C(started), or C(stopped), C(absent), or C(drained) - - When checking a port C(started) will ensure the port is open, C(stopped) will check that it is closed, C(drained) will check for active connections - - When checking for a file or a search string C(present) or C(started) will ensure that the file or string is present before continuing, C(absent) will check that file is absent or removed - choices: [ "present", "started", "stopped", "absent", "drained" ] - default: "started" - path: - version_added: "1.4" - required: false - description: - - path to a file on the filesytem that must exist before continuing - search_regex: - version_added: "1.4" - required: false - description: - - Can be used to match a string in either a file or a socket connection. Defaults to a multiline regex. - exclude_hosts: - version_added: "1.8" - required: false - description: - - list of hosts or IPs to ignore when looking for active TCP connections for C(drained) state -notes: - - The ability to use search_regex with a port connection was added in 1.7. -requirements: [] -author: Jeroen Hoekx, John Jarvis, Andrii Radyk -''' - -EXAMPLES = ''' - -# wait 300 seconds for port 8000 to become open on the host, don't start checking for 10 seconds -- wait_for: port=8000 delay=10 - -# wait 300 seconds for port 8000 of any IP to close active connections, don't start checking for 10 seconds -- wait_for: host=0.0.0.0 port=8000 delay=10 state=drained - -# wait 300 seconds for port 8000 of any IP to close active connections, ignoring connections for specified hosts -- wait_for: host=0.0.0.0 port=8000 state=drained exclude_hosts=10.2.1.2,10.2.1.3 - -# wait until the file /tmp/foo is present before continuing -- wait_for: path=/tmp/foo - -# wait until the string "completed" is in the file /tmp/foo before continuing -- wait_for: path=/tmp/foo search_regex=completed - -# wait until the lock file is removed -- wait_for: path=/var/lock/file.lock state=absent - -# wait until the process is finished and pid was destroyed -- wait_for: path=/proc/3466/status state=absent - -# Wait 300 seconds for port 22 to become open and contain "OpenSSH", don't start checking for 10 seconds -- local_action: wait_for port=22 host="{{ inventory_hostname }}" search_regex=OpenSSH delay=10 - -''' - -class TCPConnectionInfo(object): - """ - This is a generic TCP Connection Info strategy class that relies - on the psutil module, which is not ideal for targets, but necessary - for cross platform support. - - A subclass may wish to override some or all of these methods. - - _get_exclude_ips() - - get_active_connections() - - All subclasses MUST define platform and distribution (which may be None). - """ - platform = 'Generic' - distribution = None - - match_all_ips = { - socket.AF_INET: '0.0.0.0', - socket.AF_INET6: '::', - } - connection_states = { - '01': 'ESTABLISHED', - '02': 'SYN_SENT', - '03': 'SYN_RECV', - '04': 'FIN_WAIT1', - '05': 'FIN_WAIT2', - '06': 'TIME_WAIT', - } - - def __new__(cls, *args, **kwargs): - return load_platform_subclass(TCPConnectionInfo, args, kwargs) - - def __init__(self, module): - self.module = module - (self.family, self.ip) = _convert_host_to_ip(self.module.params['host']) - self.port = int(self.module.params['port']) - self.exclude_ips = self._get_exclude_ips() - if not HAS_PSUTIL: - module.fail_json(msg="psutil module required for wait_for") - - def _get_exclude_ips(self): - if self.module.params['exclude_hosts'] is None: - return [] - exclude_hosts = self.module.params['exclude_hosts'].split(',') - return [ _convert_host_to_hex(h)[1] for h in exclude_hosts ] - - def get_active_connections_count(self): - active_connections = 0 - for p in psutil.process_iter(): - connections = p.get_connections(kind='inet') - for conn in connections: - if conn.status not in self.connection_states.values(): - continue - (local_ip, local_port) = conn.local_address - if self.port == local_port and self.ip in [self.match_all_ips[self.family], local_ip]: - (remote_ip, remote_port) = conn.remote_address - if remote_ip not in self.exclude_ips: - active_connections += 1 - return active_connections - - -# =========================================== -# Subclass: Linux - -class LinuxTCPConnectionInfo(TCPConnectionInfo): - """ - This is a TCP Connection Info evaluation strategy class - that utilizes information from Linux's procfs. While less universal, - does allow Linux targets to not require an additional library. - """ - platform = 'Linux' - distribution = None - - source_file = { - socket.AF_INET: '/proc/net/tcp', - socket.AF_INET6: '/proc/net/tcp6' - } - match_all_ips = { - socket.AF_INET: '00000000', - socket.AF_INET6: '00000000000000000000000000000000', - } - local_address_field = 1 - remote_address_field = 2 - connection_state_field = 3 - - def __init__(self, module): - self.module = module - (self.family, self.ip) = _convert_host_to_hex(module.params['host']) - self.port = "%0.4X" % int(module.params['port']) - self.exclude_ips = self._get_exclude_ips() - - def _get_exclude_ips(self): - if self.module.params['exclude_hosts'] is None: - return [] - exclude_hosts = self.module.params['exclude_hosts'].split(',') - return [ _convert_host_to_hex(h) for h in exclude_hosts ] - - def get_active_connections_count(self): - active_connections = 0 - f = open(self.source_file[self.family]) - for tcp_connection in f.readlines(): - tcp_connection = tcp_connection.strip().split(' ') - if tcp_connection[self.local_address_field] == 'local_address': - continue - if tcp_connection[self.connection_state_field] not in self.connection_states: - continue - (local_ip, local_port) = tcp_connection[self.local_address_field].split(':') - if self.port == local_port and self.ip in [self.match_all_ips[self.family], local_ip]: - (remote_ip, remote_port) = tcp_connection[self.remote_address_field].split(':') - if remote_ip not in self.exclude_ips: - active_connections += 1 - f.close() - return active_connections - - -def _convert_host_to_ip(host): - """ - Perform forward DNS resolution on host, IP will give the same IP - - Args: - host: String with either hostname, IPv4, or IPv6 address - - Returns: - Tuple containing address family and IP - """ - addrinfo = socket.getaddrinfo(host, 80, 0, 0, socket.SOL_TCP)[0] - return (addrinfo[0], addrinfo[4][0]) - -def _convert_host_to_hex(host): - """ - Convert the provided host to the format in /proc/net/tcp* - - /proc/net/tcp uses little-endian four byte hex for ipv4 - /proc/net/tcp6 uses little-endian per 4B word for ipv6 - - Args: - host: String with either hostname, IPv4, or IPv6 address - - Returns: - Tuple containing address family and the little-endian converted host - """ - (family, ip) = _convert_host_to_ip(host) - hexed = binascii.hexlify(socket.inet_pton(family, ip)).upper() - if family == socket.AF_INET: - hexed = _little_endian_convert_32bit(hexed) - elif family == socket.AF_INET6: - # xrange loops through each 8 character (4B) set in the 128bit total - hexed = "".join([ _little_endian_convert_32bit(hexed[x:x+8]) for x in xrange(0, 32, 8) ]) - return (family, hexed) - -def _little_endian_convert_32bit(block): - """ - Convert to little-endian, effectively transposing - the order of the four byte word - 12345678 -> 78563412 - - Args: - block: String containing a 4 byte hex representation - - Returns: - String containing the little-endian converted block - """ - # xrange starts at 6, and increments by -2 until it reaches -2 - # which lets us start at the end of the string block and work to the begining - return "".join([ block[x:x+2] for x in xrange(6, -2, -2) ]) - -def main(): - - module = AnsibleModule( - argument_spec = dict( - host=dict(default='127.0.0.1'), - timeout=dict(default=300), - connect_timeout=dict(default=5), - delay=dict(default=0), - port=dict(default=None), - path=dict(default=None), - search_regex=dict(default=None), - state=dict(default='started', choices=['started', 'stopped', 'present', 'absent', 'drained']), - exclude_hosts=dict(default=None, type='list') - ), - ) - - params = module.params - - host = params['host'] - timeout = int(params['timeout']) - connect_timeout = int(params['connect_timeout']) - delay = int(params['delay']) - if params['port']: - port = int(params['port']) - else: - port = None - state = params['state'] - path = params['path'] - search_regex = params['search_regex'] - if params['exclude_hosts']: - exclude_hosts = params['exclude_hosts'].split(',') - else: - exclude_hosts = [] - - if port and path: - module.fail_json(msg="port and path parameter can not both be passed to wait_for") - if path and state == 'stopped': - module.fail_json(msg="state=stopped should only be used for checking a port in the wait_for module") - if path and state == 'drained': - module.fail_json(msg="state=drained should only be used for checking a port in the wait_for module") - if exclude_hosts and state != 'drained': - module.fail_json(msg="exclude_hosts should only be with state=drained") - - start = datetime.datetime.now() - - if delay: - time.sleep(delay) - - if state in [ 'stopped', 'absent' ]: - ### first wait for the stop condition - end = start + datetime.timedelta(seconds=timeout) - - while datetime.datetime.now() < end: - if path: - try: - f = open(path) - f.close() - time.sleep(1) - pass - except IOError: - break - elif port: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(connect_timeout) - try: - s.connect( (host, port) ) - s.shutdown(socket.SHUT_RDWR) - s.close() - time.sleep(1) - except: - break - else: - elapsed = datetime.datetime.now() - start - if port: - module.fail_json(msg="Timeout when waiting for %s:%s to stop." % (host, port), elapsed=elapsed.seconds) - elif path: - module.fail_json(msg="Timeout when waiting for %s to be absent." % (path), elapsed=elapsed.seconds) - - elif state in ['started', 'present']: - ### wait for start condition - end = start + datetime.timedelta(seconds=timeout) - while datetime.datetime.now() < end: - if path: - try: - os.stat(path) - if search_regex: - try: - f = open(path) - try: - if re.search(search_regex, f.read(), re.MULTILINE): - break - else: - time.sleep(1) - finally: - f.close() - except IOError: - time.sleep(1) - pass - else: - break - except OSError, e: - # File not present - if e.errno == 2: - time.sleep(1) - else: - elapsed = datetime.datetime.now() - start - module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds) - elif port: - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - s.settimeout(connect_timeout) - try: - s.connect( (host, port) ) - if search_regex: - data = '' - matched = False - while 1: - data += s.recv(1024) - if not data: - break - elif re.search(search_regex, data, re.MULTILINE): - matched = True - break - if matched: - s.shutdown(socket.SHUT_RDWR) - s.close() - break - else: - s.shutdown(socket.SHUT_RDWR) - s.close() - break - except: - time.sleep(1) - pass - else: - elapsed = datetime.datetime.now() - start - if port: - if search_regex: - module.fail_json(msg="Timeout when waiting for search string %s in %s:%s" % (search_regex, host, port), elapsed=elapsed.seconds) - else: - module.fail_json(msg="Timeout when waiting for %s:%s" % (host, port), elapsed=elapsed.seconds) - elif path: - if search_regex: - module.fail_json(msg="Timeout when waiting for search string %s in %s" % (search_regex, path), elapsed=elapsed.seconds) - else: - module.fail_json(msg="Timeout when waiting for file %s" % (path), elapsed=elapsed.seconds) - - elif state == 'drained': - ### wait until all active connections are gone - end = start + datetime.timedelta(seconds=timeout) - tcpconns = TCPConnectionInfo(module) - while datetime.datetime.now() < end: - try: - if tcpconns.get_active_connections_count() == 0: - break - except IOError: - pass - time.sleep(1) - else: - elapsed = datetime.datetime.now() - start - module.fail_json(msg="Timeout when waiting for %s:%s to drain" % (host, port), elapsed=elapsed.seconds) - - elapsed = datetime.datetime.now() - start - module.exit_json(state=state, port=port, search_regex=search_regex, path=path, elapsed=elapsed.seconds) - -# import module snippets -from ansible.module_utils.basic import * -main()