|
|
@ -22,6 +22,7 @@ __metaclass__ = type
|
|
|
|
import time
|
|
|
|
import time
|
|
|
|
|
|
|
|
|
|
|
|
from ansible.plugins.strategies import StrategyBase
|
|
|
|
from ansible.plugins.strategies import StrategyBase
|
|
|
|
|
|
|
|
from ansible.utils.debug import debug
|
|
|
|
|
|
|
|
|
|
|
|
class StrategyModule(StrategyBase):
|
|
|
|
class StrategyModule(StrategyBase):
|
|
|
|
|
|
|
|
|
|
|
@ -42,66 +43,106 @@ class StrategyModule(StrategyBase):
|
|
|
|
# the last host to be given a task
|
|
|
|
# the last host to be given a task
|
|
|
|
last_host = 0
|
|
|
|
last_host = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
result = True
|
|
|
|
|
|
|
|
|
|
|
|
work_to_do = True
|
|
|
|
work_to_do = True
|
|
|
|
while work_to_do and not self._tqm._terminated:
|
|
|
|
while work_to_do and not self._tqm._terminated:
|
|
|
|
|
|
|
|
|
|
|
|
hosts_left = self.get_hosts_remaining()
|
|
|
|
hosts_left = self.get_hosts_remaining(iterator._play)
|
|
|
|
if len(hosts_left) == 0:
|
|
|
|
if len(hosts_left) == 0:
|
|
|
|
self._callback.playbook_on_no_hosts_remaining()
|
|
|
|
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
|
|
|
|
|
|
|
|
result = False
|
|
|
|
break
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
|
|
# using .qsize() is a best estimate anyway, due to the
|
|
|
|
work_to_do = False # assume we have no more work to do
|
|
|
|
# multiprocessing/threading concerns (per the python docs)
|
|
|
|
starting_host = last_host # save current position so we know when we've
|
|
|
|
if 1: #if self._job_queue.qsize() < len(hosts_left):
|
|
|
|
# looped back around and need to break
|
|
|
|
|
|
|
|
|
|
|
|
work_to_do = False # assume we have no more work to do
|
|
|
|
# try and find an unblocked host with a task to run
|
|
|
|
starting_host = last_host # save current position so we know when we've
|
|
|
|
host_results = []
|
|
|
|
# looped back around and need to break
|
|
|
|
while True:
|
|
|
|
|
|
|
|
host = hosts_left[last_host]
|
|
|
|
# try and find an unblocked host with a task to run
|
|
|
|
debug("next free host: %s" % host)
|
|
|
|
while True:
|
|
|
|
host_name = host.get_name()
|
|
|
|
host = hosts_left[last_host]
|
|
|
|
|
|
|
|
host_name = host.get_name()
|
|
|
|
# peek at the next task for the host, to see if there's
|
|
|
|
|
|
|
|
# anything to do do for this host
|
|
|
|
# peek at the next task for the host, to see if there's
|
|
|
|
(state, task) = iterator.get_next_task_for_host(host, peek=True)
|
|
|
|
# anything to do do for this host
|
|
|
|
debug("free host state: %s" % state)
|
|
|
|
if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and iterator.get_next_task_for_host(host, peek=True):
|
|
|
|
debug("free host task: %s" % task)
|
|
|
|
|
|
|
|
if host_name not in self._tqm._failed_hosts and host_name not in self._tqm._unreachable_hosts and task:
|
|
|
|
# FIXME: check task tags, etc. here as we do in linear
|
|
|
|
|
|
|
|
# FIXME: handle meta tasks here, which will require a tweak
|
|
|
|
# set the flag so the outer loop knows we've still found
|
|
|
|
# to run_handlers so that only the handlers on this host
|
|
|
|
# some work which needs to be done
|
|
|
|
# are flushed and not all
|
|
|
|
work_to_do = True
|
|
|
|
|
|
|
|
|
|
|
|
# set the flag so the outer loop knows we've still found
|
|
|
|
debug("this host has work to do")
|
|
|
|
# some work which needs to be done
|
|
|
|
|
|
|
|
work_to_do = True
|
|
|
|
# check to see if this host is blocked (still executing a previous task)
|
|
|
|
|
|
|
|
if not host_name in self._blocked_hosts:
|
|
|
|
# check to see if this host is blocked (still executing a previous task)
|
|
|
|
# pop the task, mark the host blocked, and queue it
|
|
|
|
if not host_name in self._blocked_hosts:
|
|
|
|
self._blocked_hosts[host_name] = True
|
|
|
|
# pop the task, mark the host blocked, and queue it
|
|
|
|
(state, task) = iterator.get_next_task_for_host(host)
|
|
|
|
self._blocked_hosts[host_name] = True
|
|
|
|
|
|
|
|
task = iterator.get_next_task_for_host(host)
|
|
|
|
debug("getting variables")
|
|
|
|
#self._callback.playbook_on_task_start(task.get_name(), False)
|
|
|
|
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=task)
|
|
|
|
self._queue_task(iterator._play, host, task, connection_info)
|
|
|
|
debug("done getting variables")
|
|
|
|
|
|
|
|
|
|
|
|
# move on to the next host and make sure we
|
|
|
|
# check to see if this task should be skipped, due to it being a member of a
|
|
|
|
# haven't gone past the end of our hosts list
|
|
|
|
# role which has already run (and whether that role allows duplicate execution)
|
|
|
|
last_host += 1
|
|
|
|
if task._role and task._role.has_run():
|
|
|
|
if last_host > len(hosts_left) - 1:
|
|
|
|
# If there is no metadata, the default behavior is to not allow duplicates,
|
|
|
|
last_host = 0
|
|
|
|
# if there is metadata, check to see if the allow_duplicates flag was set to true
|
|
|
|
|
|
|
|
if task._role._metadata is None or task._role._metadata and not task._role._metadata.allow_duplicates:
|
|
|
|
# if we've looped around back to the start, break out
|
|
|
|
debug("'%s' skipped because role has already run" % task)
|
|
|
|
if last_host == starting_host:
|
|
|
|
continue
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
|
|
if not task.evaluate_tags(connection_info.only_tags, connection_info.skip_tags, task_vars) and task.action != 'setup':
|
|
|
|
|
|
|
|
debug("'%s' failed tag evaluation" % task)
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if task.action == 'meta':
|
|
|
|
|
|
|
|
# meta tasks store their args in the _raw_params field of args,
|
|
|
|
|
|
|
|
# since they do not use k=v pairs, so get that
|
|
|
|
|
|
|
|
meta_action = task.args.get('_raw_params')
|
|
|
|
|
|
|
|
if meta_action == 'noop':
|
|
|
|
|
|
|
|
# FIXME: issue a callback for the noop here?
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
elif meta_action == 'flush_handlers':
|
|
|
|
|
|
|
|
# FIXME: in the 'free' mode, flushing handlers should result in
|
|
|
|
|
|
|
|
# only those handlers notified for the host doing the flush
|
|
|
|
|
|
|
|
self.run_handlers(iterator, connection_info)
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
|
|
|
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
self._blocked_hosts[host_name] = False
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
|
|
|
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
|
|
|
|
|
|
|
|
self._queue_task(host, task, task_vars, connection_info)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# move on to the next host and make sure we
|
|
|
|
|
|
|
|
# haven't gone past the end of our hosts list
|
|
|
|
|
|
|
|
last_host += 1
|
|
|
|
|
|
|
|
if last_host > len(hosts_left) - 1:
|
|
|
|
|
|
|
|
last_host = 0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# if we've looped around back to the start, break out
|
|
|
|
|
|
|
|
if last_host == starting_host:
|
|
|
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
results = self._process_pending_results(iterator)
|
|
|
|
|
|
|
|
host_results.extend(results)
|
|
|
|
|
|
|
|
|
|
|
|
# pause briefly so we don't spin lock
|
|
|
|
# pause briefly so we don't spin lock
|
|
|
|
time.sleep(0.05)
|
|
|
|
time.sleep(0.05)
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
try:
|
|
|
|
self._wait_for_pending_results()
|
|
|
|
results = self._wait_on_pending_results(iterator)
|
|
|
|
except:
|
|
|
|
host_results.extend(results)
|
|
|
|
|
|
|
|
except Exception, e:
|
|
|
|
# FIXME: ctrl+c can cause some failures here, so catch them
|
|
|
|
# FIXME: ctrl+c can cause some failures here, so catch them
|
|
|
|
# with the appropriate error type
|
|
|
|
# with the appropriate error type
|
|
|
|
|
|
|
|
print("wtf: %s" % e)
|
|
|
|
pass
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
|
|
|
# run the base class run() method, which executes the cleanup function
|
|
|
|
# run the base class run() method, which executes the cleanup function
|
|
|
|