2011-09-26 09:45:19 +00:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
# -*- coding: utf-8 -*-
|
|
|
|
#
|
2015-03-05 14:02:26 +00:00
|
|
|
# 2010-2015 Nico Schottelius (nico-cdist at schottelius.org)
|
2017-07-01 21:59:51 +00:00
|
|
|
# 2016-2017 Darko Poljak (darko.poljak at gmail.com)
|
2011-09-26 09:45:19 +00:00
|
|
|
#
|
|
|
|
# This file is part of cdist.
|
|
|
|
#
|
|
|
|
# cdist is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# cdist is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with cdist. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#
|
|
|
|
#
|
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
import logging
|
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import time
|
2016-05-22 07:45:08 +00:00
|
|
|
import itertools
|
2016-07-23 14:13:59 +00:00
|
|
|
import tempfile
|
2016-08-10 21:56:56 +00:00
|
|
|
import socket
|
2016-12-08 16:36:57 +00:00
|
|
|
import multiprocessing
|
2016-12-08 20:48:59 +00:00
|
|
|
from cdist.mputil import mp_pool_run
|
2017-05-31 07:55:33 +00:00
|
|
|
import atexit
|
|
|
|
import shutil
|
2011-09-26 09:45:19 +00:00
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
import cdist
|
2016-12-03 17:12:38 +00:00
|
|
|
import cdist.hostsource
|
2013-08-29 19:56:53 +00:00
|
|
|
|
|
|
|
import cdist.exec.local
|
|
|
|
import cdist.exec.remote
|
2017-07-20 20:04:44 +00:00
|
|
|
|
|
|
|
from cdist import inventory
|
|
|
|
|
2016-12-03 09:46:49 +00:00
|
|
|
import cdist.util.ipaddr as ipaddr
|
2013-08-29 19:56:53 +00:00
|
|
|
|
|
|
|
from cdist import core
|
2016-12-03 09:46:49 +00:00
|
|
|
from cdist.util.remoteutil import inspect_ssh_mux_opts
|
2016-07-23 14:13:59 +00:00
|
|
|
|
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
class Config(object):
|
|
|
|
"""Cdist main class to hold arbitrary data"""
|
|
|
|
|
2016-08-11 21:54:31 +00:00
|
|
|
def __init__(self, local, remote, dry_run=False, jobs=None):
|
2013-08-29 19:56:53 +00:00
|
|
|
|
2016-07-05 18:44:24 +00:00
|
|
|
self.local = local
|
|
|
|
self.remote = remote
|
2016-12-08 16:36:57 +00:00
|
|
|
self._open_logger()
|
2016-07-05 18:44:24 +00:00
|
|
|
self.dry_run = dry_run
|
2016-08-11 21:54:31 +00:00
|
|
|
self.jobs = jobs
|
2013-08-29 19:56:53 +00:00
|
|
|
|
2016-07-05 18:44:24 +00:00
|
|
|
self.explorer = core.Explorer(self.local.target_host, self.local,
|
2016-08-11 21:54:31 +00:00
|
|
|
self.remote, jobs=self.jobs)
|
2013-08-29 19:56:53 +00:00
|
|
|
self.manifest = core.Manifest(self.local.target_host, self.local)
|
2016-07-05 18:44:24 +00:00
|
|
|
self.code = core.Code(self.local.target_host, self.local, self.remote)
|
2013-08-29 19:56:53 +00:00
|
|
|
|
|
|
|
def _init_files_dirs(self):
|
|
|
|
"""Prepare files and directories for the run"""
|
|
|
|
self.local.create_files_dirs()
|
|
|
|
self.remote.create_files_dirs()
|
|
|
|
|
2016-05-22 07:22:39 +00:00
|
|
|
@staticmethod
|
|
|
|
def hosts(source):
|
2016-12-03 17:12:38 +00:00
|
|
|
try:
|
|
|
|
yield from cdist.hostsource.HostSource(source)()
|
|
|
|
except (IOError, OSError, UnicodeError) as e:
|
|
|
|
raise cdist.Error(
|
|
|
|
"Error reading hosts from \'{}\': {}".format(
|
|
|
|
source, e))
|
2016-05-22 07:22:39 +00:00
|
|
|
|
2017-07-01 22:32:43 +00:00
|
|
|
@staticmethod
|
|
|
|
def construct_remote_exec_copy_patterns(args):
|
|
|
|
# default remote cmd patterns
|
|
|
|
args.remote_exec_pattern = None
|
|
|
|
args.remote_copy_pattern = None
|
|
|
|
|
|
|
|
args_dict = vars(args)
|
|
|
|
# if remote-exec and/or remote-copy args are None then user
|
|
|
|
# didn't specify command line options nor env vars:
|
|
|
|
# inspect multiplexing options for default cdist.REMOTE_COPY/EXEC
|
|
|
|
if (args_dict['remote_copy'] is None or
|
|
|
|
args_dict['remote_exec'] is None):
|
|
|
|
mux_opts = inspect_ssh_mux_opts()
|
|
|
|
if args_dict['remote_exec'] is None:
|
|
|
|
args.remote_exec_pattern = cdist.REMOTE_EXEC + mux_opts
|
|
|
|
if args_dict['remote_copy'] is None:
|
|
|
|
args.remote_copy_pattern = cdist.REMOTE_COPY + mux_opts
|
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
@classmethod
|
2016-12-03 17:12:38 +00:00
|
|
|
def _check_and_prepare_args(cls, args):
|
2016-05-22 07:22:39 +00:00
|
|
|
if args.manifest == '-' and args.hostfile == '-':
|
2016-07-05 18:44:24 +00:00
|
|
|
raise cdist.Error(("Cannot read both, manifest and host file, "
|
|
|
|
"from stdin"))
|
2016-08-15 12:20:35 +00:00
|
|
|
|
2016-05-22 07:22:39 +00:00
|
|
|
# if no host source is specified then read hosts from stdin
|
|
|
|
if not (args.hostfile or args.host):
|
|
|
|
args.hostfile = '-'
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
initial_manifest_tempfile = None
|
|
|
|
if args.manifest == '-':
|
|
|
|
# read initial manifest from stdin
|
|
|
|
try:
|
2016-07-05 18:44:24 +00:00
|
|
|
handle, initial_manifest_temp_path = tempfile.mkstemp(
|
|
|
|
prefix='cdist.stdin.')
|
2013-08-29 19:56:53 +00:00
|
|
|
with os.fdopen(handle, 'w') as fd:
|
|
|
|
fd.write(sys.stdin.read())
|
|
|
|
except (IOError, OSError) as e:
|
2016-07-05 18:44:24 +00:00
|
|
|
raise cdist.Error(("Creating tempfile for stdin data "
|
|
|
|
"failed: %s" % e))
|
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
args.manifest = initial_manifest_temp_path
|
|
|
|
atexit.register(lambda: os.remove(initial_manifest_temp_path))
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2016-12-03 17:12:38 +00:00
|
|
|
@classmethod
|
|
|
|
def commandline(cls, args):
|
|
|
|
"""Configure remote system"""
|
|
|
|
|
|
|
|
# FIXME: Refactor relict - remove later
|
|
|
|
log = logging.getLogger("cdist")
|
|
|
|
|
|
|
|
cls._check_and_prepare_args(args)
|
|
|
|
|
|
|
|
process = {}
|
|
|
|
failed_hosts = []
|
|
|
|
time_start = time.time()
|
|
|
|
|
2017-07-01 22:32:43 +00:00
|
|
|
cls.construct_remote_exec_copy_patterns(args)
|
|
|
|
base_root_path = cls.create_base_root_path(args.out_path)
|
2016-07-23 14:13:59 +00:00
|
|
|
|
2016-05-22 07:22:39 +00:00
|
|
|
hostcnt = 0
|
2017-07-20 20:04:44 +00:00
|
|
|
|
|
|
|
if args.tag or args.all_tagged_hosts:
|
|
|
|
inventory.determine_default_inventory_dir(args)
|
|
|
|
if args.all_tagged_hosts:
|
|
|
|
inv_list = inventory.InventoryList(
|
|
|
|
hosts=None, istag=True, hostfile=None,
|
|
|
|
db_basedir=args.inventory_dir)
|
|
|
|
else:
|
|
|
|
inv_list = inventory.InventoryList(
|
|
|
|
hosts=args.host, istag=True, hostfile=args.hostfile,
|
|
|
|
db_basedir=args.inventory_dir,
|
|
|
|
has_all_tags=args.has_all_tags)
|
|
|
|
it = inv_list.entries()
|
|
|
|
else:
|
|
|
|
it = itertools.chain(cls.hosts(args.host),
|
|
|
|
cls.hosts(args.hostfile))
|
|
|
|
for entry in it:
|
|
|
|
if isinstance(entry, tuple):
|
|
|
|
# if configuring by specified tags
|
|
|
|
host = entry[0]
|
|
|
|
host_tags = entry[1]
|
|
|
|
else:
|
|
|
|
# if configuring by host then check inventory for tags
|
|
|
|
host = entry
|
|
|
|
inventory.determine_default_inventory_dir(args)
|
|
|
|
inv_list = inventory.InventoryList(
|
|
|
|
hosts=(host,), db_basedir=args.inventory_dir)
|
|
|
|
inv = tuple(inv_list.entries())
|
|
|
|
if inv:
|
|
|
|
# host is present in inventory and has tags
|
|
|
|
host_tags = inv[0][1]
|
|
|
|
else:
|
|
|
|
# host is not present in inventory or has no tags
|
|
|
|
host_tags = None
|
2017-07-01 22:32:43 +00:00
|
|
|
host_base_path, hostdir = cls.create_host_base_dirs(
|
|
|
|
host, base_root_path)
|
2016-07-23 14:13:59 +00:00
|
|
|
log.debug("Base root path for target host \"{}\" is \"{}\"".format(
|
|
|
|
host, host_base_path))
|
|
|
|
|
2016-05-22 07:22:39 +00:00
|
|
|
hostcnt += 1
|
2013-08-29 19:56:53 +00:00
|
|
|
if args.parallel:
|
2017-06-28 21:36:42 +00:00
|
|
|
log.trace("Creating child process for %s", host)
|
2016-07-05 18:44:24 +00:00
|
|
|
process[host] = multiprocessing.Process(
|
2016-07-23 14:13:59 +00:00
|
|
|
target=cls.onehost,
|
2017-07-20 20:04:44 +00:00
|
|
|
args=(host, host_tags, host_base_path, hostdir, args,
|
|
|
|
True))
|
2013-08-29 19:56:53 +00:00
|
|
|
process[host].start()
|
|
|
|
else:
|
|
|
|
try:
|
2017-07-20 20:04:44 +00:00
|
|
|
cls.onehost(host, host_tags, host_base_path, hostdir,
|
2016-07-23 14:13:59 +00:00
|
|
|
args, parallel=False)
|
2013-08-29 19:56:53 +00:00
|
|
|
except cdist.Error as e:
|
|
|
|
failed_hosts.append(host)
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
# Catch errors in parallel mode when joining
|
|
|
|
if args.parallel:
|
|
|
|
for host in process.keys():
|
2017-06-28 21:36:42 +00:00
|
|
|
log.trace("Joining process %s", host)
|
2013-08-29 19:56:53 +00:00
|
|
|
process[host].join()
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
if not process[host].exitcode == 0:
|
|
|
|
failed_hosts.append(host)
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
time_end = time.time()
|
2017-07-03 11:32:39 +00:00
|
|
|
log.verbose("Total processing time for %s host(s): %s", hostcnt,
|
2017-07-14 19:47:30 +00:00
|
|
|
(time_end - time_start))
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
if len(failed_hosts) > 0:
|
2016-07-05 18:44:24 +00:00
|
|
|
raise cdist.Error("Failed to configure the following hosts: " +
|
|
|
|
" ".join(failed_hosts))
|
|
|
|
|
2016-12-03 17:12:38 +00:00
|
|
|
@classmethod
|
2017-05-31 07:55:33 +00:00
|
|
|
def _resolve_ssh_control_path(cls):
|
|
|
|
base_path = tempfile.mkdtemp()
|
|
|
|
control_path = os.path.join(base_path, "s")
|
|
|
|
atexit.register(lambda: shutil.rmtree(base_path))
|
|
|
|
return control_path
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _resolve_remote_cmds(cls, args):
|
|
|
|
control_path = cls._resolve_ssh_control_path()
|
2016-12-03 17:12:38 +00:00
|
|
|
# If we constructed patterns for remote commands then there is
|
|
|
|
# placeholder for ssh ControlPath, format it and we have unique
|
|
|
|
# ControlPath for each host.
|
|
|
|
#
|
|
|
|
# If not then use args.remote_exec/copy that user specified.
|
|
|
|
if args.remote_exec_pattern:
|
|
|
|
remote_exec = args.remote_exec_pattern.format(control_path)
|
|
|
|
else:
|
|
|
|
remote_exec = args.remote_exec
|
|
|
|
if args.remote_copy_pattern:
|
|
|
|
remote_copy = args.remote_copy_pattern.format(control_path)
|
|
|
|
else:
|
|
|
|
remote_copy = args.remote_copy
|
|
|
|
return (remote_exec, remote_copy, )
|
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
@classmethod
|
2017-07-20 20:04:44 +00:00
|
|
|
def onehost(cls, host, host_tags, host_base_path, host_dir_name, args,
|
|
|
|
parallel):
|
2013-08-29 19:56:53 +00:00
|
|
|
"""Configure ONE system"""
|
|
|
|
|
|
|
|
log = logging.getLogger(host)
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
try:
|
2017-05-31 07:55:33 +00:00
|
|
|
remote_exec, remote_copy = cls._resolve_remote_cmds(args)
|
2016-07-23 14:13:59 +00:00
|
|
|
log.debug("remote_exec for host \"{}\": {}".format(
|
|
|
|
host, remote_exec))
|
|
|
|
log.debug("remote_copy for host \"{}\": {}".format(
|
|
|
|
host, remote_copy))
|
|
|
|
|
2016-12-03 09:46:49 +00:00
|
|
|
target_host = ipaddr.resolve_target_addresses(host)
|
2016-11-03 12:26:50 +00:00
|
|
|
log.debug("target_host: {}".format(target_host))
|
2016-08-10 21:56:56 +00:00
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
local = cdist.exec.local.Local(
|
2016-08-10 21:56:56 +00:00
|
|
|
target_host=target_host,
|
2017-07-20 20:04:44 +00:00
|
|
|
target_host_tags=host_tags,
|
2016-07-23 14:13:59 +00:00
|
|
|
base_root_path=host_base_path,
|
|
|
|
host_dir_name=host_dir_name,
|
2013-08-29 19:56:53 +00:00
|
|
|
initial_manifest=args.manifest,
|
2017-07-01 21:59:51 +00:00
|
|
|
add_conf_dirs=args.conf_dir,
|
2017-06-29 08:18:46 +00:00
|
|
|
cache_path_pattern=args.cache_path_pattern,
|
|
|
|
quiet_mode=args.quiet)
|
2013-08-29 19:56:53 +00:00
|
|
|
|
|
|
|
remote = cdist.exec.remote.Remote(
|
2016-08-10 21:56:56 +00:00
|
|
|
target_host=target_host,
|
2016-07-23 14:13:59 +00:00
|
|
|
remote_exec=remote_exec,
|
2017-01-20 14:34:17 +00:00
|
|
|
remote_copy=remote_copy,
|
2017-06-29 08:18:46 +00:00
|
|
|
base_path=args.remote_out_path,
|
|
|
|
quiet_mode=args.quiet)
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2016-08-11 21:54:31 +00:00
|
|
|
c = cls(local, remote, dry_run=args.dry_run, jobs=args.jobs)
|
2013-08-29 19:56:53 +00:00
|
|
|
c.run()
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
except cdist.Error as e:
|
|
|
|
log.error(e)
|
|
|
|
if parallel:
|
|
|
|
# We are running in our own process here, need to sys.exit!
|
|
|
|
sys.exit(1)
|
|
|
|
else:
|
|
|
|
raise
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
except KeyboardInterrupt:
|
|
|
|
# Ignore in parallel mode, we are existing anyway
|
|
|
|
if parallel:
|
|
|
|
sys.exit(0)
|
|
|
|
# Pass back to controlling code in sequential mode
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
|
2017-07-01 22:32:43 +00:00
|
|
|
@staticmethod
|
|
|
|
def create_base_root_path(out_path=None):
|
|
|
|
if out_path:
|
|
|
|
base_root_path = out_path
|
|
|
|
else:
|
|
|
|
base_root_path = tempfile.mkdtemp()
|
|
|
|
|
|
|
|
return base_root_path
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def create_host_base_dirs(host, base_root_path):
|
|
|
|
hostdir = cdist.str_hash(host)
|
|
|
|
host_base_path = os.path.join(base_root_path, hostdir)
|
|
|
|
|
|
|
|
return (host_base_path, hostdir)
|
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
def run(self):
|
|
|
|
"""Do what is most often done: deploy & cleanup"""
|
|
|
|
start_time = time.time()
|
|
|
|
|
2017-07-05 14:40:02 +00:00
|
|
|
self.log.info("Starting configuration run")
|
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
self._init_files_dirs()
|
|
|
|
|
|
|
|
self.explorer.run_global_explorers(self.local.global_explorer_out_path)
|
|
|
|
self.manifest.run_initial_manifest(self.local.initial_manifest)
|
|
|
|
self.iterate_until_finished()
|
|
|
|
|
2017-07-01 21:59:51 +00:00
|
|
|
self.local.save_cache(start_time)
|
2017-07-05 14:40:02 +00:00
|
|
|
self.log.info("Finished successful run in {:.2f} seconds".format(
|
|
|
|
time.time() - start_time))
|
2013-08-29 19:56:53 +00:00
|
|
|
|
|
|
|
def object_list(self):
|
|
|
|
"""Short name for object list retrieval"""
|
2016-07-05 18:44:24 +00:00
|
|
|
for cdist_object in core.CdistObject.list_objects(
|
|
|
|
self.local.object_path, self.local.type_path,
|
|
|
|
self.local.object_marker_name):
|
2014-01-29 21:40:59 +00:00
|
|
|
if cdist_object.cdist_type.is_install:
|
2016-07-05 18:44:24 +00:00
|
|
|
self.log.debug(("Running in config mode, ignoring install "
|
|
|
|
"object: {0}").format(cdist_object))
|
2014-01-29 21:40:59 +00:00
|
|
|
else:
|
|
|
|
yield cdist_object
|
|
|
|
|
2016-12-07 18:06:51 +00:00
|
|
|
def iterate_once(self):
|
2013-08-29 19:56:53 +00:00
|
|
|
"""
|
2016-07-05 18:44:24 +00:00
|
|
|
Iterate over the objects once - helper method for
|
2013-08-29 19:56:53 +00:00
|
|
|
iterate_until_finished
|
|
|
|
"""
|
2016-12-07 18:06:51 +00:00
|
|
|
if self.jobs:
|
|
|
|
objects_changed = self._iterate_once_parallel()
|
|
|
|
else:
|
|
|
|
objects_changed = self._iterate_once_sequential()
|
|
|
|
return objects_changed
|
|
|
|
|
|
|
|
def _iterate_once_sequential(self):
|
2017-06-28 21:36:42 +00:00
|
|
|
self.log.debug("Iteration in sequential mode")
|
2016-12-07 18:06:51 +00:00
|
|
|
objects_changed = False
|
|
|
|
|
|
|
|
for cdist_object in self.object_list():
|
|
|
|
if cdist_object.requirements_unfinished(cdist_object.requirements):
|
|
|
|
"""We cannot do anything for this poor object"""
|
|
|
|
continue
|
|
|
|
|
|
|
|
if cdist_object.state == core.CdistObject.STATE_UNDEF:
|
|
|
|
"""Prepare the virgin object"""
|
|
|
|
|
|
|
|
self.object_prepare(cdist_object)
|
|
|
|
objects_changed = True
|
|
|
|
|
|
|
|
if cdist_object.requirements_unfinished(cdist_object.autorequire):
|
|
|
|
"""The previous step created objects we depend on -
|
|
|
|
wait for them
|
|
|
|
"""
|
|
|
|
continue
|
|
|
|
|
|
|
|
if cdist_object.state == core.CdistObject.STATE_PREPARED:
|
|
|
|
self.object_run(cdist_object)
|
|
|
|
objects_changed = True
|
|
|
|
|
|
|
|
return objects_changed
|
|
|
|
|
|
|
|
def _iterate_once_parallel(self):
|
2017-06-28 21:36:42 +00:00
|
|
|
self.log.debug("Iteration in parallel mode in {} jobs".format(
|
2016-12-08 13:11:30 +00:00
|
|
|
self.jobs))
|
2016-07-05 18:44:24 +00:00
|
|
|
objects_changed = False
|
2013-08-29 19:56:53 +00:00
|
|
|
|
2016-12-06 13:27:17 +00:00
|
|
|
cargo = []
|
2013-08-29 19:56:53 +00:00
|
|
|
for cdist_object in self.object_list():
|
|
|
|
if cdist_object.requirements_unfinished(cdist_object.requirements):
|
|
|
|
"""We cannot do anything for this poor object"""
|
|
|
|
continue
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
if cdist_object.state == core.CdistObject.STATE_UNDEF:
|
|
|
|
"""Prepare the virgin object"""
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2016-12-06 13:27:17 +00:00
|
|
|
# self.object_prepare(cdist_object)
|
|
|
|
# objects_changed = True
|
|
|
|
cargo.append(cdist_object)
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2016-12-08 13:11:30 +00:00
|
|
|
n = len(cargo)
|
|
|
|
if n == 1:
|
|
|
|
self.log.debug("Only one object, preparing sequentially")
|
2016-12-07 23:47:07 +00:00
|
|
|
self.object_prepare(cargo[0])
|
|
|
|
objects_changed = True
|
|
|
|
elif cargo:
|
2017-06-28 21:36:42 +00:00
|
|
|
self.log.trace("Multiprocessing start method is {}".format(
|
2016-12-08 16:36:57 +00:00
|
|
|
multiprocessing.get_start_method()))
|
2017-07-14 19:47:30 +00:00
|
|
|
|
|
|
|
self.log.trace("Multiprocessing cargo: %s", cargo)
|
|
|
|
|
|
|
|
cargo_types = set()
|
|
|
|
for c in cargo:
|
|
|
|
cargo_types.add(c.cdist_type)
|
|
|
|
self.log.trace("Multiprocessing cargo_types: %s", cargo_types)
|
|
|
|
nt = len(cargo_types)
|
|
|
|
if nt == 1:
|
|
|
|
self.log.debug(("Only one type, transfering explorers "
|
|
|
|
"sequentially"))
|
|
|
|
self.explorer.transfer_type_explorers(cargo_types.pop())
|
|
|
|
else:
|
|
|
|
self.log.trace(("Starting multiprocessing Pool for {} "
|
|
|
|
"parallel transfering types' explorers".format(
|
|
|
|
nt)))
|
|
|
|
args = [
|
|
|
|
(ct, ) for ct in cargo_types
|
|
|
|
]
|
|
|
|
mp_pool_run(self.explorer.transfer_type_explorers, args,
|
|
|
|
jobs=self.jobs)
|
|
|
|
self.log.trace(("Multiprocessing for parallel transfering "
|
|
|
|
"types' explorers finished"))
|
|
|
|
|
2017-06-28 21:36:42 +00:00
|
|
|
self.log.trace(("Starting multiprocessing Pool for {} parallel "
|
2016-12-08 16:36:57 +00:00
|
|
|
"objects preparation".format(n)))
|
2016-12-08 20:48:59 +00:00
|
|
|
args = [
|
2017-07-14 19:47:30 +00:00
|
|
|
(c, False, ) for c in cargo
|
2016-12-08 20:48:59 +00:00
|
|
|
]
|
|
|
|
mp_pool_run(self.object_prepare, args, jobs=self.jobs)
|
2017-06-28 21:36:42 +00:00
|
|
|
self.log.trace(("Multiprocessing for parallel object "
|
2016-12-08 20:48:59 +00:00
|
|
|
"preparation finished"))
|
2016-12-06 13:27:17 +00:00
|
|
|
objects_changed = True
|
|
|
|
|
2016-12-07 23:47:07 +00:00
|
|
|
del cargo[:]
|
2016-12-06 13:27:17 +00:00
|
|
|
for cdist_object in self.object_list():
|
|
|
|
if cdist_object.requirements_unfinished(cdist_object.requirements):
|
|
|
|
"""We cannot do anything for this poor object"""
|
2013-08-29 19:56:53 +00:00
|
|
|
continue
|
2016-07-05 18:44:24 +00:00
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
if cdist_object.state == core.CdistObject.STATE_PREPARED:
|
2016-12-07 18:06:51 +00:00
|
|
|
if cdist_object.requirements_unfinished(
|
|
|
|
cdist_object.autorequire):
|
2016-12-06 13:27:17 +00:00
|
|
|
"""The previous step created objects we depend on -
|
|
|
|
wait for them
|
|
|
|
"""
|
|
|
|
continue
|
|
|
|
|
|
|
|
# self.object_run(cdist_object)
|
|
|
|
# objects_changed = True
|
|
|
|
|
2017-07-14 19:47:30 +00:00
|
|
|
# put objects in chuncks of distinct types
|
|
|
|
# so that there is no more than one object
|
|
|
|
# of the same type in one chunk because there is a
|
|
|
|
# possibility of object's process locking which
|
|
|
|
# prevents parallel execution at remote
|
|
|
|
# and do this only for nonparallel marked types
|
|
|
|
for chunk in cargo:
|
|
|
|
for obj in chunk:
|
|
|
|
if (obj.cdist_type == cdist_object.cdist_type and
|
2017-07-19 05:58:14 +00:00
|
|
|
cdist_object.cdist_type.is_nonparallel):
|
2017-07-14 19:47:30 +00:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
chunk.append(cdist_object)
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
chunk = [cdist_object, ]
|
|
|
|
cargo.append(chunk)
|
|
|
|
|
|
|
|
for chunk in cargo:
|
|
|
|
self.log.trace("Running chunk: %s", chunk)
|
|
|
|
n = len(chunk)
|
|
|
|
if n == 1:
|
|
|
|
self.log.debug("Only one object, running sequentially")
|
|
|
|
self.object_run(chunk[0])
|
|
|
|
objects_changed = True
|
|
|
|
elif chunk:
|
|
|
|
self.log.trace("Multiprocessing start method is {}".format(
|
|
|
|
multiprocessing.get_start_method()))
|
|
|
|
self.log.trace(("Starting multiprocessing Pool for {} "
|
|
|
|
"parallel object run".format(n)))
|
|
|
|
args = [
|
|
|
|
(c, ) for c in chunk
|
|
|
|
]
|
|
|
|
mp_pool_run(self.object_run, args, jobs=self.jobs)
|
|
|
|
self.log.trace(("Multiprocessing for parallel object "
|
|
|
|
"run finished"))
|
|
|
|
objects_changed = True
|
2013-08-29 19:56:53 +00:00
|
|
|
|
|
|
|
return objects_changed
|
|
|
|
|
2016-12-08 16:36:57 +00:00
|
|
|
def _open_logger(self):
|
|
|
|
self.log = logging.getLogger(self.local.target_host[0])
|
|
|
|
|
|
|
|
# logger is not pickable, so remove it when we pickle
|
|
|
|
def __getstate__(self):
|
|
|
|
state = self.__dict__.copy()
|
|
|
|
if 'log' in state:
|
|
|
|
del state['log']
|
|
|
|
return state
|
|
|
|
|
|
|
|
# recreate logger when we unpickle
|
|
|
|
def __setstate__(self, state):
|
|
|
|
self.__dict__.update(state)
|
|
|
|
self._open_logger()
|
|
|
|
|
2013-08-29 19:56:53 +00:00
|
|
|
def iterate_until_finished(self):
|
|
|
|
"""
|
|
|
|
Go through all objects and solve them
|
|
|
|
one after another
|
|
|
|
"""
|
|
|
|
|
|
|
|
objects_changed = True
|
|
|
|
|
|
|
|
while objects_changed:
|
2016-12-07 18:06:51 +00:00
|
|
|
objects_changed = self.iterate_once()
|
2013-08-29 19:56:53 +00:00
|
|
|
|
|
|
|
# Check whether all objects have been finished
|
|
|
|
unfinished_objects = []
|
|
|
|
for cdist_object in self.object_list():
|
|
|
|
if not cdist_object.state == cdist_object.STATE_DONE:
|
|
|
|
unfinished_objects.append(cdist_object)
|
|
|
|
|
|
|
|
if unfinished_objects:
|
|
|
|
info_string = []
|
|
|
|
|
|
|
|
for cdist_object in unfinished_objects:
|
|
|
|
|
|
|
|
requirement_names = []
|
|
|
|
autorequire_names = []
|
|
|
|
|
2016-07-05 18:44:24 +00:00
|
|
|
for requirement in cdist_object.requirements_unfinished(
|
|
|
|
cdist_object.requirements):
|
2013-08-29 19:56:53 +00:00
|
|
|
requirement_names.append(requirement.name)
|
|
|
|
|
2016-07-05 18:44:24 +00:00
|
|
|
for requirement in cdist_object.requirements_unfinished(
|
|
|
|
cdist_object.autorequire):
|
2013-08-29 19:56:53 +00:00
|
|
|
autorequire_names.append(requirement.name)
|
|
|
|
|
2015-02-10 20:59:39 +00:00
|
|
|
requirements = "\n ".join(requirement_names)
|
2016-07-05 18:44:24 +00:00
|
|
|
autorequire = "\n ".join(autorequire_names)
|
|
|
|
info_string.append(("%s requires:\n"
|
|
|
|
" %s\n"
|
|
|
|
"%s ""autorequires:\n"
|
|
|
|
" %s" % (
|
|
|
|
cdist_object.name,
|
|
|
|
requirements, cdist_object.name,
|
|
|
|
autorequire)))
|
|
|
|
|
|
|
|
raise cdist.UnresolvableRequirementsError(
|
|
|
|
("The requirements of the following objects could not be "
|
|
|
|
"resolved:\n%s") % ("\n".join(info_string)))
|
2013-08-29 19:56:53 +00:00
|
|
|
|
2017-07-14 19:47:30 +00:00
|
|
|
def object_prepare(self, cdist_object, transfer_type_explorers=True):
|
2013-08-29 19:56:53 +00:00
|
|
|
"""Prepare object: Run type explorer + manifest"""
|
2017-07-03 11:32:39 +00:00
|
|
|
self.log.verbose("Preparing object {}".format(cdist_object.name))
|
|
|
|
self.log.verbose(
|
2016-07-05 18:44:24 +00:00
|
|
|
"Running manifest and explorers for " + cdist_object.name)
|
2017-07-14 19:47:30 +00:00
|
|
|
self.explorer.run_type_explorers(cdist_object, transfer_type_explorers)
|
2013-08-29 19:56:53 +00:00
|
|
|
self.manifest.run_type_manifest(cdist_object)
|
|
|
|
cdist_object.state = core.CdistObject.STATE_PREPARED
|
|
|
|
|
|
|
|
def object_run(self, cdist_object):
|
|
|
|
"""Run gencode and code for an object"""
|
|
|
|
|
2017-07-03 11:32:39 +00:00
|
|
|
self.log.verbose("Running object " + cdist_object.name)
|
2013-08-29 19:56:53 +00:00
|
|
|
if cdist_object.state == core.CdistObject.STATE_DONE:
|
2016-07-05 18:44:24 +00:00
|
|
|
raise cdist.Error(("Attempting to run an already finished "
|
|
|
|
"object: %s"), cdist_object)
|
2013-08-29 19:56:53 +00:00
|
|
|
|
|
|
|
cdist_type = cdist_object.cdist_type
|
|
|
|
|
2013-11-26 15:14:44 +00:00
|
|
|
# Generate
|
2017-06-29 07:22:59 +00:00
|
|
|
self.log.debug("Generating code for %s" % (cdist_object.name))
|
2013-08-29 19:56:53 +00:00
|
|
|
cdist_object.code_local = self.code.run_gencode_local(cdist_object)
|
2013-09-12 14:16:18 +00:00
|
|
|
cdist_object.code_remote = self.code.run_gencode_remote(cdist_object)
|
2013-11-26 15:14:44 +00:00
|
|
|
if cdist_object.code_local or cdist_object.code_remote:
|
2013-09-12 14:16:18 +00:00
|
|
|
cdist_object.changed = True
|
2013-11-26 15:14:44 +00:00
|
|
|
|
|
|
|
# Execute
|
|
|
|
if not self.dry_run:
|
2013-11-26 15:30:03 +00:00
|
|
|
if cdist_object.code_local or cdist_object.code_remote:
|
2017-07-04 14:46:27 +00:00
|
|
|
self.log.info("Processing %s" % (cdist_object.name))
|
2013-11-26 15:14:44 +00:00
|
|
|
if cdist_object.code_local:
|
2017-06-29 07:22:59 +00:00
|
|
|
self.log.trace("Executing local code for %s"
|
|
|
|
% (cdist_object.name))
|
2013-11-26 15:14:44 +00:00
|
|
|
self.code.run_code_local(cdist_object)
|
|
|
|
if cdist_object.code_remote:
|
2017-06-29 07:22:59 +00:00
|
|
|
self.log.trace("Executing remote code for %s"
|
|
|
|
% (cdist_object.name))
|
2013-08-29 19:56:53 +00:00
|
|
|
self.code.transfer_code_remote(cdist_object)
|
|
|
|
self.code.run_code_remote(cdist_object)
|
2013-11-26 15:14:44 +00:00
|
|
|
else:
|
2017-07-03 11:32:39 +00:00
|
|
|
self.log.verbose("Skipping code execution due to DRY RUN")
|
2013-08-29 19:56:53 +00:00
|
|
|
|
|
|
|
# Mark this object as done
|
2017-06-28 21:36:42 +00:00
|
|
|
self.log.trace("Finishing run of " + cdist_object.name)
|
2013-08-29 19:56:53 +00:00
|
|
|
cdist_object.state = core.CdistObject.STATE_DONE
|