From f984a918b959a46e49fe8456a327eca8d44313de Mon Sep 17 00:00:00 2001 From: Darko Poljak Date: Tue, 30 Mar 2021 07:56:38 +0200 Subject: [PATCH] Fix log message string formatting Use logging message format with args, instead of direct `%` or `str.format`. Resolve #855. --- cdist/argparse.py | 8 +-- cdist/config.py | 87 ++++++++++++-------------- cdist/core/cdist_type.py | 6 +- cdist/core/explorer.py | 26 ++++---- cdist/core/manifest.py | 2 +- cdist/emulator.py | 37 +++++------ cdist/exec/local.py | 16 +++-- cdist/exec/remote.py | 14 ++--- cdist/install.py | 2 +- cdist/inventory.py | 36 +++++------ cdist/preos.py | 6 +- cdist/preos/debootstrap/debootstrap.py | 41 ++++++------ cdist/scan/scan.py | 4 +- cdist/util/ipaddr.py | 6 +- 14 files changed, 140 insertions(+), 151 deletions(-) diff --git a/cdist/argparse.py b/cdist/argparse.py index 88759d7b..cadac39a 100644 --- a/cdist/argparse.py +++ b/cdist/argparse.py @@ -533,10 +533,10 @@ def parse_and_configure(argv, singleton=True): log = logging.getLogger("cdist") - log.verbose("version %s" % cdist.VERSION) - log.trace('command line args: {}'.format(cfg.command_line_args)) - log.trace('configuration: {}'.format(cfg.get_config())) - log.trace('configured args: {}'.format(args)) + log.verbose("version %s", cdist.VERSION) + log.trace('command line args: %s', cfg.command_line_args) + log.trace('configuration: %s', cfg.get_config()) + log.trace('configured args: %s', args) check_beta(vars(args)) diff --git a/cdist/config.py b/cdist/config.py index 19d5bd70..d6fec55f 100644 --- a/cdist/config.py +++ b/cdist/config.py @@ -273,15 +273,15 @@ class Config: host_tags = None host_base_path, hostdir = cls.create_host_base_dirs( host, base_root_path) - log.debug("Base root path for target host \"{}\" is \"{}\"".format( - host, host_base_path)) + log.debug("Base root path for target host \"%s\" is \"%s\"", + host, host_base_path) hostcnt += 1 if args.parallel: pargs = (host, host_tags, host_base_path, hostdir, args, True, configuration) - log.trace(("Args for multiprocessing operation " - "for host {}: {}".format(host, pargs))) + log.trace("Args for multiprocessing operation for host %s: %s", + host, pargs) process_args.append(pargs) else: try: @@ -298,10 +298,10 @@ class Config: except cdist.Error: failed_hosts.append(host) elif args.parallel: - log.trace("Multiprocessing start method is {}".format( - multiprocessing.get_start_method())) - log.trace(("Starting multiprocessing Pool for {} " - "parallel host operation".format(args.parallel))) + log.trace("Multiprocessing start method is %s", + multiprocessing.get_start_method()) + log.trace("Starting multiprocessing Pool for %d parallel host" + " operation", args.parallel) results = mp_pool_run(cls.onehost, process_args, @@ -396,16 +396,13 @@ class Config: remote_exec, remote_copy, cleanup_cmd = cls._resolve_remote_cmds( args) - log.debug("remote_exec for host \"{}\": {}".format( - host, remote_exec)) - log.debug("remote_copy for host \"{}\": {}".format( - host, remote_copy)) + log.debug("remote_exec for host \"%s\": %s", host, remote_exec) + log.debug("remote_copy for host \"%s\": %s", host, remote_copy) family = cls._address_family(args) - log.debug("address family: {}".format(family)) + log.debug("address family: %s", family) target_host = cls.resolve_target_addresses(host, family) - log.debug("target_host for host \"{}\": {}".format( - host, target_host)) + log.debug("target_host for host \"%s\": %s", host, target_host) local = cdist.exec.local.Local( target_host=target_host, @@ -474,8 +471,8 @@ class Config: """Do what is most often done: deploy & cleanup""" start_time = time.time() - self.log.info("Starting {} run".format( - 'dry' if self.dry_run else 'configuration')) + self.log.info("Starting %s run", + 'dry' if self.dry_run else 'configuration') self._init_files_dirs() @@ -493,9 +490,9 @@ class Config: self._remove_files_dirs() self.local.save_cache(start_time) - self.log.info("Finished {} run in {:.2f} seconds".format( + self.log.info("Finished %s run in %.2f seconds", 'dry' if self.dry_run else 'successful', - time.time() - start_time)) + time.time() - start_time) def cleanup(self): self.log.debug("Running cleanup commands") @@ -519,8 +516,8 @@ class Config: self.local.object_path, self.local.type_path, self.local.object_marker_name): if cdist_object.cdist_type.is_install: - self.log.debug(("Running in config mode, ignoring install " - "object: {0}").format(cdist_object)) + self.log.debug("Running in config mode, ignoring install " + "object: %s", cdist_object) else: yield cdist_object @@ -565,8 +562,7 @@ class Config: return objects_changed def _iterate_once_parallel(self): - self.log.debug("Iteration in parallel mode in {} jobs".format( - self.jobs)) + self.log.debug("Iteration in parallel mode in %d jobs", self.jobs) objects_changed = False cargo = [] @@ -588,8 +584,8 @@ class Config: self.object_prepare(cargo[0]) objects_changed = True elif cargo: - self.log.trace("Multiprocessing start method is {}".format( - multiprocessing.get_start_method())) + self.log.trace("Multiprocessing start method is %s", + multiprocessing.get_start_method()) self.log.trace("Multiprocessing cargo: %s", cargo) @@ -603,9 +599,8 @@ class Config: "sequentially")) self.explorer.transfer_type_explorers(cargo_types.pop()) else: - self.log.trace(("Starting multiprocessing Pool for {} " - "parallel types explorers transferring".format( - nt))) + self.log.trace("Starting multiprocessing Pool for %d " + "parallel types explorers transferring", nt) args = [ (ct, ) for ct in cargo_types ] @@ -614,8 +609,8 @@ class Config: self.log.trace(("Multiprocessing for parallel transferring " "types' explorers finished")) - self.log.trace(("Starting multiprocessing Pool for {} parallel " - "objects preparation".format(n))) + self.log.trace("Starting multiprocessing Pool for %d parallel " + "objects preparation", n) args = [ (c, False, ) for c in cargo ] @@ -667,10 +662,10 @@ class Config: self.object_run(chunk[0]) objects_changed = True elif chunk: - self.log.trace("Multiprocessing start method is {}".format( - multiprocessing.get_start_method())) - self.log.trace(("Starting multiprocessing Pool for {} " - "parallel object run".format(n))) + self.log.trace("Multiprocessing start method is %s", + multiprocessing.get_start_method()) + self.log.trace("Starting multiprocessing Pool for %d " + "parallel object run", n) args = [ (c, ) for c in chunk ] @@ -794,9 +789,9 @@ class Config: def object_prepare(self, cdist_object, transfer_type_explorers=True): """Prepare object: Run type explorer + manifest""" self._handle_deprecation(cdist_object) - self.log.verbose("Preparing object {}".format(cdist_object.name)) - self.log.verbose( - "Running manifest and explorers for " + cdist_object.name) + self.log.verbose("Preparing object %s", cdist_object.name) + self.log.verbose("Running manifest and explorers for %s", + cdist_object.name) self.explorer.run_type_explorers(cdist_object, transfer_type_explorers) try: self.manifest.run_type_manifest(cdist_object) @@ -810,13 +805,13 @@ class Config: def object_run(self, cdist_object): """Run gencode and code for an object""" try: - self.log.verbose("Running object " + cdist_object.name) + self.log.verbose("Running object %s", cdist_object.name) if cdist_object.state == core.CdistObject.STATE_DONE: raise cdist.Error(("Attempting to run an already finished " - "object: %s"), cdist_object) + "object: {}").format(cdist_object)) # Generate - self.log.debug("Generating code for %s" % (cdist_object.name)) + self.log.debug("Generating code for %s", cdist_object.name) cdist_object.code_local = self.code.run_gencode_local(cdist_object) cdist_object.code_remote = self.code.run_gencode_remote( cdist_object) @@ -825,20 +820,20 @@ class Config: # Execute if cdist_object.code_local or cdist_object.code_remote: - self.log.info("Processing %s" % (cdist_object.name)) + self.log.info("Processing %s", cdist_object.name) if not self.dry_run: if cdist_object.code_local: - self.log.trace("Executing local code for %s" - % (cdist_object.name)) + self.log.trace("Executing local code for %s", + cdist_object.name) self.code.run_code_local(cdist_object) if cdist_object.code_remote: - self.log.trace("Executing remote code for %s" - % (cdist_object.name)) + self.log.trace("Executing remote code for %s", + cdist_object.name) self.code.transfer_code_remote(cdist_object) self.code.run_code_remote(cdist_object) # Mark this object as done - self.log.trace("Finishing run of " + cdist_object.name) + self.log.trace("Finishing run of %s", cdist_object.name) cdist_object.state = core.CdistObject.STATE_DONE except cdist.Error as e: raise cdist.CdistObjectError(cdist_object, e) diff --git a/cdist/core/cdist_type.py b/cdist/core/cdist_type.py index c0329c8a..274de989 100644 --- a/cdist/core/cdist_type.py +++ b/cdist/core/cdist_type.py @@ -82,9 +82,9 @@ class CdistType: yield cls(base_path, name) except InvalidTypeError as e: # ignore invalid type, log warning and continue - msg = "Ignoring invalid type '%s' at '%s' defined at '%s'" % ( - e.type_path, e.type_absolute_path, e.source_path) - cls.log.warning(msg) + cls.log.warning("Ignoring invalid type '%s' at '%s' defined" + " at '%s'", e.type_path, e.type_absolute_path, + e.source_path) # remove invalid from runtime conf dir os.remove(e.type_absolute_path) diff --git a/cdist/core/explorer.py b/cdist/core/explorer.py index a3baa959..caa12a7d 100644 --- a/cdist/core/explorer.py +++ b/cdist/core/explorer.py @@ -131,18 +131,17 @@ class Explorer: self._run_global_explorer(explorer, out_path) def _run_global_explorers_parallel(self, out_path): - self.log.debug("Running global explorers in {} parallel jobs".format( - self.jobs)) - self.log.trace("Multiprocessing start method is {}".format( - multiprocessing.get_start_method())) - self.log.trace(("Starting multiprocessing Pool for global " - "explorers run")) + self.log.debug("Running global explorers in %s parallel jobs", + self.jobs) + self.log.trace("Multiprocessing start method is %s", + multiprocessing.get_start_method()) + self.log.trace("Starting multiprocessing Pool for global explorers" + " run") args = [ (e, out_path, ) for e in self.list_global_explorer_names() ] mp_pool_run(self._run_global_explorer, args, jobs=self.jobs) - self.log.trace(("Multiprocessing run for global explorers " - "finished")) + self.log.trace("Multiprocessing run for global explorers finished") # logger is not pickable, so remove it when we pickle def __getstate__(self): @@ -184,15 +183,14 @@ class Explorer: in the object. """ - self.log.verbose("Running type explorers for {}".format( - cdist_object.cdist_type)) + self.log.verbose("Running type explorers for %s", + cdist_object.cdist_type) if transfer_type_explorers: self.log.trace("Transferring type explorers for type: %s", cdist_object.cdist_type) self.transfer_type_explorers(cdist_object.cdist_type) else: - self.log.trace(("No need for transferring type explorers for " - "type: %s"), + self.log.trace("No need for transferring type explorers for %s", cdist_object.cdist_type) self.log.trace("Transferring object parameters for object: %s", cdist_object.name) @@ -236,8 +234,8 @@ class Explorer: remote side.""" if cdist_type.explorers: if cdist_type.name in self._type_explorers_transferred: - self.log.trace(("Skipping retransfer of type explorers " - "for: %s"), cdist_type) + self.log.trace("Skipping retransfer of type explorers for: %s", + cdist_type) else: source = os.path.join(self.local.type_path, cdist_type.explorer_path) diff --git a/cdist/core/manifest.py b/cdist/core/manifest.py index 390340d4..3148d66c 100644 --- a/cdist/core/manifest.py +++ b/cdist/core/manifest.py @@ -161,7 +161,7 @@ class Manifest: raise NoInitialManifestError(initial_manifest, user_supplied) message_prefix = "initialmanifest" - self.log.verbose("Running initial manifest " + initial_manifest) + self.log.verbose("Running initial manifest %s", initial_manifest) which = "init" if self.local.save_output_streams: stderr_path = os.path.join(self.local.stderr_base_path, which) diff --git a/cdist/emulator.py b/cdist/emulator.py index f1db862e..f09c282d 100644 --- a/cdist/emulator.py +++ b/cdist/emulator.py @@ -107,8 +107,8 @@ class Emulator: self.record_requirements() self.record_auto_requirements() self.record_parent_child_relationships() - self.log.trace("Finished %s %s" % ( - self.cdist_object.path, self.parameters)) + self.log.trace("Finished %s %s", self.cdist_object.path, + self.parameters) def __init_log(self): """Setup logging facility""" @@ -170,7 +170,7 @@ class Emulator: # And finally parse/verify parameter self.args = parser.parse_args(self.argv[1:]) - self.log.trace('Args: %s' % self.args) + self.log.trace('Args: %s', self.args) def init_object(self): # Initialize object - and ensure it is not in args @@ -241,8 +241,8 @@ class Emulator: raise cdist.Error(errmsg) else: if self.cdist_object.exists: - self.log.debug(('Object %s override forced with ' - 'CDIST_OVERRIDE'), self.cdist_object.name) + self.log.debug('Object %s override forced with CDIST_OVERRIDE', + self.cdist_object.name) self.cdist_object.create(True) else: self.cdist_object.create() @@ -260,8 +260,8 @@ class Emulator: parent = self.cdist_object.object_from_name(__object_name) parent.typeorder.append(self.cdist_object.name) if self._order_dep_on(): - self.log.trace(('[ORDER_DEP] Adding %s to typeorder dep' - ' for %s'), depname, parent.name) + self.log.trace('[ORDER_DEP] Adding %s to typeorder dep for %s', + depname, parent.name) parent.typeorder_dep.append(depname) elif self._order_dep_on(): self.log.trace('[ORDER_DEP] Adding %s to global typeorder dep', @@ -301,16 +301,14 @@ class Emulator: try: cdist_object = self.cdist_object.object_from_name(requirement) except core.cdist_type.InvalidTypeError as e: - self.log.error(("%s requires object %s, but type %s does not" - " exist. Defined at %s" % ( - self.cdist_object.name, - requirement, e.name, self.object_source))) + self.log.error("%s requires object %s, but type %s does not" + " exist. Defined at %s", self.cdist_object.name, + requirement, e.name, self.object_source) raise except core.cdist_object.MissingObjectIdError: - self.log.error(("%s requires object %s without object id." - " Defined at %s" % (self.cdist_object.name, - requirement, - self.object_source))) + self.log.error("%s requires object %s without object id." + " Defined at %s", self.cdist_object.name, + requirement, self.object_source) raise self.log.debug("Recording requirement %s for %s", @@ -380,10 +378,9 @@ class Emulator: self.env['require'] += " " + lastcreatedtype else: self.env['require'] = lastcreatedtype - self.log.debug(("Injecting require for " - "CDIST_ORDER_DEPENDENCY: %s for %s"), - lastcreatedtype, - self.cdist_object.name) + self.log.debug("Injecting require for" + " CDIST_ORDER_DEPENDENCY: %s for %s", + lastcreatedtype, self.cdist_object.name) except IndexError: # if no second last line, we are on the first type, # so do not set a requirement @@ -391,7 +388,7 @@ class Emulator: if "require" in self.env: requirements = self.env['require'] - self.log.debug("reqs = " + requirements) + self.log.debug("reqs = %s", requirements) for requirement in self._parse_require(requirements): # Ignore empty fields - probably the only field anyway if len(requirement) == 0: diff --git a/cdist/exec/local.py b/cdist/exec/local.py index e0aab190..6713cd13 100644 --- a/cdist/exec/local.py +++ b/cdist/exec/local.py @@ -154,8 +154,8 @@ class Local: with open(self.object_marker_file, 'w') as fd: fd.write("%s\n" % self.object_marker_name) - self.log.trace("Object marker %s saved in %s" % ( - self.object_marker_name, self.object_marker_file)) + self.log.trace("Object marker %s saved in %s", + self.object_marker_name, self.object_marker_file) def _init_cache_dir(self, cache_dir): home_dir = cdist.home_dir() @@ -289,14 +289,12 @@ class Local: return cache_subpath def save_cache(self, start_time=time.time()): - self.log.trace("cache subpath pattern: {}".format( - self.cache_path_pattern)) + self.log.trace("cache subpath pattern: %s", self.cache_path_pattern) cache_subpath = self._cache_subpath(start_time, self.cache_path_pattern) - self.log.debug("cache subpath: {}".format(cache_subpath)) + self.log.debug("cache subpath: %s", cache_subpath) destination = os.path.join(self.cache_path, cache_subpath) - self.log.trace(("Saving cache: " + self.base_path + " to " + - destination)) + self.log.trace("Saving cache %s to %s", self.base_path, destination) if not os.path.exists(destination): shutil.move(self.base_path, destination) @@ -332,7 +330,7 @@ class Local: # Iterate over all directories and link the to the output dir for conf_dir in self.conf_dirs: - self.log.debug("Checking conf_dir %s ..." % (conf_dir)) + self.log.debug("Checking conf_dir %s ...", conf_dir) for sub_dir in CONF_SUBDIRS_LINKED: current_dir = os.path.join(conf_dir, sub_dir) @@ -350,7 +348,7 @@ class Local: if os.path.exists(dst): os.unlink(dst) - self.log.trace("Linking %s to %s ..." % (src, dst)) + self.log.trace("Linking %s to %s ...", src, dst) try: os.symlink(src, dst) except OSError as e: diff --git a/cdist/exec/remote.py b/cdist/exec/remote.py index e5af2f34..ea85da4c 100644 --- a/cdist/exec/remote.py +++ b/cdist/exec/remote.py @@ -176,19 +176,19 @@ class Remote: # create archive tarpath, fcnt = autil.tar(source, self.archiving_mode) if tarpath is None: - self.log.trace(("Files count {} is lower than {} limit, " - "skipping archiving").format( - fcnt, autil.FILES_LIMIT)) + self.log.trace("Files count %d is lower than %d limit, " + "skipping archiving", + fcnt, autil.FILES_LIMIT) else: - self.log.trace(("Archiving mode, tarpath: %s, file count: " - "%s"), tarpath, fcnt) + self.log.trace("Archiving mode, tarpath: %s, file count: " + "%s", tarpath, fcnt) # get archive name tarname = os.path.basename(tarpath) self.log.trace("Archiving mode tarname: %s", tarname) # archive path at the remote desttarpath = os.path.join(destination, tarname) - self.log.trace( - "Archiving mode desttarpath: %s", desttarpath) + self.log.trace("Archiving mode desttarpath: %s", + desttarpath) # transfer archive to the remote side self.log.trace("Archiving mode: transferring") self._transfer_file(tarpath, desttarpath) diff --git a/cdist/install.py b/cdist/install.py index 7c894fe5..d077baef 100644 --- a/cdist/install.py +++ b/cdist/install.py @@ -47,4 +47,4 @@ class Install(cdist.config.Config): yield cdist_object else: self.log.debug("Running in install mode, ignoring non install" - "object: {0}".format(cdist_object)) + "object: %s", cdist_object) diff --git a/cdist/inventory.py b/cdist/inventory.py index 0387f326..106052a2 100644 --- a/cdist/inventory.py +++ b/cdist/inventory.py @@ -92,7 +92,7 @@ class Inventory: self.init_db() def init_db(self): - self.log.trace("Init db: {}".format(self.db_basedir)) + self.log.trace("Init db: %s", self.db_basedir) if not os.path.exists(self.db_basedir): os.makedirs(self.db_basedir, exist_ok=True) elif not os.path.isdir(self.db_basedir): @@ -182,9 +182,9 @@ class Inventory: configuration = cfg.get_config(section='GLOBAL') determine_default_inventory_dir(args, configuration) - log.debug("Using inventory: {}".format(args.inventory_dir)) - log.trace("Inventory args: {}".format(vars(args))) - log.trace("Inventory command: {}".format(args.subcommand)) + log.debug("Using inventory: %s", args.inventory_dir) + log.trace("Inventory args: %s", vars(args)) + log.trace("Inventory command: %s", args.subcommand) if args.subcommand == "list": c = InventoryList(hosts=args.host, istag=args.tag, @@ -237,16 +237,16 @@ class InventoryList(Inventory): def _do_list(self, it_tags, it_hosts, check_func): if (it_tags is not None): param_tags = set(it_tags) - self.log.trace("param_tags: {}".format(param_tags)) + self.log.trace("param_tags: %s", param_tags) else: param_tags = set() for host in it_hosts: - self.log.trace("host: {}".format(host)) + self.log.trace("host: %s", host) tags = self._get_host_tags(host) if tags is None: - self.log.debug("Host \'{}\' not found, skipped".format(host)) + self.log.debug("Host \'%s\' not found, skipped", host) continue - self.log.trace("tags: {}".format(tags)) + self.log.trace("tags: %s", tags) if check_func(tags, param_tags): yield host, tags @@ -308,11 +308,11 @@ class InventoryHost(Inventory): def _action(self, host): if self.action == "add": - self.log.debug("Adding host \'{}\'".format(host)) + self.log.debug("Adding host \'%s\'", host) elif self.action == "del": - self.log.debug("Deleting host \'{}\'".format(host)) + self.log.debug("Deleting host \'%s\'", host) hostpath = self._host_path(host) - self.log.trace("hostpath: {}".format(hostpath)) + self.log.trace("hostpath: %s", hostpath) if self.action == "add" and not os.path.exists(hostpath): self._new_hostpath(hostpath) else: @@ -372,23 +372,23 @@ class InventoryTag(Inventory): print("Host \'{}\' does not exist, skipping".format(host), file=sys.stderr) return - self.log.trace("existing host_tags: {}".format(host_tags)) + self.log.trace("existing host_tags: %s", host_tags) if self.action == "del" and self.all: host_tags = set() else: for tag in self.input_tags: if self.action == "add": - self.log.debug("Adding tag \'{}\' for host \'{}\'".format( - tag, host)) + self.log.debug("Adding tag \'%s\' for host \'%s\'", + tag, host) host_tags.add(tag) elif self.action == "del": - self.log.debug("Deleting tag \'{}\' for host " - "\'{}\'".format(tag, host)) + self.log.debug("Deleting tag \'%s\' for host \'%s\'", + tag, host) if tag in host_tags: host_tags.remove(tag) - self.log.trace("new host tags: {}".format(host_tags)) + self.log.trace("new host tags: %s", host_tags) if not self._write_host_tags(host, host_tags): - self.log.trace("{} does not exist, skipped".format(host)) + self.log.trace("%s does not exist, skipped", host) def run(self): if self.allhosts: diff --git a/cdist/preos.py b/cdist/preos.py index f8a5dd67..45711a41 100644 --- a/cdist/preos.py +++ b/cdist/preos.py @@ -49,7 +49,7 @@ def scan_preos_dir_plugins(dir): c = cm[1] yield from preos_plugin(c) except ImportError as e: - log.warning("Cannot import '{}': {}".format(module_name, e)) + log.warning("Cannot import '%s': %s", module_name, e) def find_preos_plugins(): @@ -102,7 +102,7 @@ class PreOS: parser.add_argument('remainder_args', nargs=argparse.REMAINDER) args = parser.parse_args(argv[1:]) cdist.argparse.handle_loglevel(args) - log.debug("preos args : {}".format(args)) + log.debug("preos args : %s", args) conf_dirs = util.resolve_conf_dirs_from_config_and_args(args) @@ -122,7 +122,7 @@ class PreOS: func_args = [preos, args.remainder_args, ] else: func_args = [args.remainder_args, ] - log.info("Running preos : {}".format(preos_name)) + log.info("Running preos : %s", preos_name) func(*func_args) else: raise cdist.Error( diff --git a/cdist/preos/debootstrap/debootstrap.py b/cdist/preos/debootstrap/debootstrap.py index a20cdb9c..f1f750ee 100644 --- a/cdist/preos/debootstrap/debootstrap.py +++ b/cdist/preos/debootstrap/debootstrap.py @@ -166,7 +166,7 @@ class Debian: args.pxe_boot_dir = os.path.realpath(args.pxe_boot_dir) cdist.argparse.handle_loglevel(args) - log.debug("preos: {}, args: {}".format(cls._preos_name, args)) + log.debug("preos: %s, args: %s", cls._preos_name, args) try: env = vars(args) new_env = {} @@ -190,27 +190,30 @@ class Debian: env = new_env env.update(os.environ) cls.update_env(env) - log.debug("preos: {} env: {}".format(cls._preos_name, env)) + log.debug("preos: %s env: %s", cls._preos_name, env) + + if log.getEffectiveLevel() <= logging.INFO: + info_msg = ["Running preos: {}, suite: {}, arch: {}".format( + cls._preos_name, args.suite, args.arch), ] + if args.mirror: + info_msg.append("mirror: {}".format(args.mirror)) + if args.script: + info_msg.append("script: {}".format(args.script)) + if args.bootstrap: + info_msg.append("bootstrapping") + if args.configure: + info_msg.append("configuring") + if args.pxe_boot_dir: + info_msg.append("creating PXE") + if args.drive: + info_msg.append("creating bootable drive") + log.info(info_msg) + cmd = os.path.join(cls._files_dir, "code") - info_msg = ["Running preos: {}, suite: {}, arch: {}".format( - cls._preos_name, args.suite, args.arch), ] - if args.mirror: - info_msg.append("mirror: {}".format(args.mirror)) - if args.script: - info_msg.append("script: {}".format(args.script)) - if args.bootstrap: - info_msg.append("bootstrapping") - if args.configure: - info_msg.append("configuring") - if args.pxe_boot_dir: - info_msg.append("creating PXE") - if args.drive: - info_msg.append("creating bootable drive") - log.info(info_msg) - log.debug("cmd={}".format(cmd)) + log.debug("cmd=%s", cmd) subprocess.check_call(cmd, env=env, shell=True) except subprocess.CalledProcessError as e: - log.error("preos {} failed: {}".format(cls._preos_name, e)) + log.error("preos %s failed: %s", cls._preos_name, e) class Ubuntu(Debian): diff --git a/cdist/scan/scan.py b/cdist/scan/scan.py index b1d0e9e1..9a0bcc52 100644 --- a/cdist/scan/scan.py +++ b/cdist/scan/scan.py @@ -94,7 +94,7 @@ class Trigger(object): def trigger(self, interface): packet = IPv6(dst=f"ff02::1%{interface}") / ICMPv6EchoRequest() - log.debug(f"Sending request on {interface}") + log.debug("Sending request on %s", interface) send(packet, verbose=self.verbose) @@ -114,7 +114,7 @@ class Scanner(object): def handle_pkg(self, pkg): if ICMPv6EchoReply in pkg: host = pkg['IPv6'].src - log.verbose(f"Host {host} is alive") + log.verbose("Host %s is alive", host) dir = os.path.join(self.outdir, host) fname = os.path.join(dir, "last_seen") diff --git a/cdist/util/ipaddr.py b/cdist/util/ipaddr.py index 95ca74ee..d9e5f498 100644 --- a/cdist/util/ipaddr.py +++ b/cdist/util/ipaddr.py @@ -42,8 +42,7 @@ def resolve_target_host_name(host, family=0): # gethostbyaddr returns triple # (hostname, aliaslist, ipaddrlist) host_name = socket.gethostbyaddr(ip_addr)[0] - log.debug("derived host_name for host \"{}\": {}".format( - host, host_name)) + log.debug("derived host_name for host \"%s\": %s", host, host_name) except (socket.gaierror, socket.herror) as e: # in case of error provide empty value host_name = '' @@ -54,8 +53,7 @@ def resolve_target_fqdn(host): log = logging.getLogger(host) try: host_fqdn = socket.getfqdn(host) - log.debug("derived host_fqdn for host \"{}\": {}".format( - host, host_fqdn)) + log.debug("derived host_fqdn for host \"%s\": %s", host, host_fqdn) except socket.herror as e: # in case of error provide empty value host_fqdn = ''