Enables pylint W1201

This pylint check makes sure the log message is not built in log
statement itself, but uses the proper argument from logger to do it
lazy.
Signed-off-by: NCaio Carrara <ccarrara@redhat.com>
上级 4f3fbcda
...@@ -402,10 +402,10 @@ class Job(object): ...@@ -402,10 +402,10 @@ class Job(object):
def _log_avocado_datadir(self): def _log_avocado_datadir(self):
LOG_JOB.info('Avocado Data Directories:') LOG_JOB.info('Avocado Data Directories:')
LOG_JOB.info('') LOG_JOB.info('')
LOG_JOB.info('base ' + data_dir.get_base_dir()) LOG_JOB.info('base %s', data_dir.get_base_dir())
LOG_JOB.info('tests ' + data_dir.get_test_dir()) LOG_JOB.info('tests %s', data_dir.get_test_dir())
LOG_JOB.info('data ' + data_dir.get_data_dir()) LOG_JOB.info('data %s', data_dir.get_data_dir())
LOG_JOB.info('logs ' + self.logdir) LOG_JOB.info('logs %s', self.logdir)
LOG_JOB.info('') LOG_JOB.info('')
@staticmethod @staticmethod
...@@ -540,7 +540,7 @@ class Job(object): ...@@ -540,7 +540,7 @@ class Job(object):
self.exitcode |= exit_codes.AVOCADO_JOB_FAIL self.exitcode |= exit_codes.AVOCADO_JOB_FAIL
return self.exitcode return self.exitcode
except exceptions.OptionValidationError as details: except exceptions.OptionValidationError as details:
self.log.error('\n' + str(details)) self.log.error('\n%s', str(details))
self.exitcode |= exit_codes.AVOCADO_JOB_FAIL self.exitcode |= exit_codes.AVOCADO_JOB_FAIL
return self.exitcode return self.exitcode
......
...@@ -326,8 +326,7 @@ class StdOutput(object): ...@@ -326,8 +326,7 @@ class StdOutput(object):
except RuntimeError as details: except RuntimeError as details:
# Paginator not available # Paginator not available
logging.getLogger('avocado.app.debug').error("Failed to enable " logging.getLogger('avocado.app.debug').error("Failed to enable "
"paginator: %s" "paginator: %s", details)
% details)
return return
self.stdout = self.stderr = paginator self.stdout = self.stderr = paginator
......
...@@ -378,13 +378,14 @@ class TestRunner(object): ...@@ -378,13 +378,14 @@ class TestRunner(object):
return return
with sigtstp: with sigtstp:
msg = "ctrl+z pressed, %%s test (%s)" % proc.pid msg = "ctrl+z pressed, %%s test (%s)" % proc.pid
app_log_msg = '\n%s' % msg
if self.sigstopped: if self.sigstopped:
APP_LOG.info("\n" + msg, "resumming") APP_LOG.info(app_log_msg, "resumming")
TEST_LOG.info(msg, "resumming") TEST_LOG.info(msg, "resumming")
process.kill_process_tree(proc.pid, signal.SIGCONT, False) process.kill_process_tree(proc.pid, signal.SIGCONT, False)
self.sigstopped = False self.sigstopped = False
else: else:
APP_LOG.info("\n" + msg, "stopping") APP_LOG.info(app_log_msg, "stopping")
TEST_LOG.info(msg, "stopping") TEST_LOG.info(msg, "stopping")
process.kill_process_tree(proc.pid, signal.SIGSTOP, False) process.kill_process_tree(proc.pid, signal.SIGSTOP, False)
self.sigstopped = True self.sigstopped = True
......
...@@ -213,7 +213,7 @@ class Daemon(Command): ...@@ -213,7 +213,7 @@ class Daemon(Command):
stderr=subprocess.STDOUT, stderr=subprocess.STDOUT,
shell=False, env=env) shell=False, env=env)
except OSError: except OSError:
log.debug("Not logging %s (command could not be run)" % self.cmd) log.debug("Not logging %s (command could not be run)", self.cmd)
def stop(self): def stop(self):
""" """
......
...@@ -39,11 +39,11 @@ class Config(CLICmd): ...@@ -39,11 +39,11 @@ class Config(CLICmd):
def run(self, args): def run(self, args):
LOG_UI.info('Config files read (in order):') LOG_UI.info('Config files read (in order):')
for cfg_path in settings.config_paths: for cfg_path in settings.config_paths:
LOG_UI.debug(' %s' % cfg_path) LOG_UI.debug(' %s', cfg_path)
if settings.config_paths_failed: if settings.config_paths_failed:
LOG_UI.error('\nConfig files that failed to read:') LOG_UI.error('\nConfig files that failed to read:')
for cfg_path in settings.config_paths_failed: for cfg_path in settings.config_paths_failed:
LOG_UI.error(' %s' % cfg_path) LOG_UI.error(' %s', cfg_path)
LOG_UI.debug("") LOG_UI.debug("")
if not args.datadir: if not args.datadir:
blength = 0 blength = 0
...@@ -66,8 +66,8 @@ class Config(CLICmd): ...@@ -66,8 +66,8 @@ class Config(CLICmd):
LOG_UI.debug("file to customize values") LOG_UI.debug("file to customize values")
LOG_UI.debug('') LOG_UI.debug('')
LOG_UI.info('Avocado Data Directories:') LOG_UI.info('Avocado Data Directories:')
LOG_UI.debug(' base ' + data_dir.get_base_dir()) LOG_UI.debug(' base %s', data_dir.get_base_dir())
LOG_UI.debug(' tests ' + data_dir.get_test_dir()) LOG_UI.debug(' tests %s', data_dir.get_test_dir())
LOG_UI.debug(' data ' + data_dir.get_data_dir()) LOG_UI.debug(' data %s', data_dir.get_data_dir())
LOG_UI.debug(' logs ' + data_dir.get_logs_dir()) LOG_UI.debug(' logs %s', data_dir.get_logs_dir())
LOG_UI.debug(' cache ' + ", ".join(data_dir.get_cache_dirs())) LOG_UI.debug(' cache %s', ", ".join(data_dir.get_cache_dirs()))
...@@ -159,7 +159,7 @@ class Asset(object): ...@@ -159,7 +159,7 @@ class Asset(object):
return asset_file return asset_file
except: except:
exc_type, exc_value = sys.exc_info()[:2] exc_type, exc_value = sys.exc_info()[:2]
log.error('%s: %s' % (exc_type.__name__, exc_value)) log.error('%s: %s', exc_type.__name__, exc_value)
# If we get to this point, we have to download it from a location. # If we get to this point, we have to download it from a location.
# A writable cache directory is then needed. The first available # A writable cache directory is then needed. The first available
...@@ -190,7 +190,7 @@ class Asset(object): ...@@ -190,7 +190,7 @@ class Asset(object):
return asset_file return asset_file
except: except:
exc_type, exc_value = sys.exc_info()[:2] exc_type, exc_value = sys.exc_info()[:2]
log.error('%s: %s' % (exc_type.__name__, exc_value)) log.error('%s: %s', exc_type.__name__, exc_value)
raise EnvironmentError("Failed to fetch %s." % basename) raise EnvironmentError("Failed to fetch %s." % basename)
......
...@@ -82,8 +82,8 @@ def make_dir_and_populate(basedir='/tmp'): ...@@ -82,8 +82,8 @@ def make_dir_and_populate(basedir='/tmp'):
os.write(fd, generate_random_string(str_length)) os.write(fd, generate_random_string(str_length))
os.close(fd) os.close(fd)
except OSError as details: except OSError as details:
log.error("Failed to generate dir in '%s' and populate: %s" % log_msg = "Failed to generate dir in '%s' and populate: %s"
(basedir, details)) log.error(log_msg, basedir, details)
return None return None
return path return path
...@@ -171,7 +171,7 @@ def get_file(src, dst, permissions=None, hash_expected=None, ...@@ -171,7 +171,7 @@ def get_file(src, dst, permissions=None, hash_expected=None,
while not hash_file == hash_expected: while not hash_file == hash_expected:
hash_file = _verify_hash(_get_file(src, dst, permissions)) hash_file = _verify_hash(_get_file(src, dst, permissions))
if hash_file != hash_expected: if hash_file != hash_expected:
log.error("It seems that dst %s is corrupted" % dst) log.error("It seems that dst %s is corrupted", dst)
download_failures += 1 download_failures += 1
if download_failures > download_retries: if download_failures > download_retries:
raise EnvironmentError("Failed to retrieve %s. " raise EnvironmentError("Failed to retrieve %s. "
......
...@@ -99,7 +99,7 @@ def ask(question, auto=False): ...@@ -99,7 +99,7 @@ def ask(question, auto=False):
:rtype: str :rtype: str
""" """
if auto: if auto:
log.info("%s (y/n) y" % question) log.info("%s (y/n) y", question)
return "y" return "y"
return input("%s (y/n) " % question) return input("%s (y/n) " % question)
......
...@@ -448,8 +448,8 @@ def lv_take_snapshot(vg_name, lv_name, ...@@ -448,8 +448,8 @@ def lv_take_snapshot(vg_name, lv_name,
re.search(re.escape(lv_snapshot_name + " [active]"), re.search(re.escape(lv_snapshot_name + " [active]"),
process.run("lvdisplay", sudo=True).stdout)): process.run("lvdisplay", sudo=True).stdout)):
# the above conditions detect if merge of snapshot was postponed # the above conditions detect if merge of snapshot was postponed
LOGGER.debug(("Logical volume %s is still active! " + log_msg = "Logical volume %s is still active! Attempting to deactivate..."
"Attempting to deactivate..."), lv_name) LOGGER.debug(log_msg, lv_name)
lv_reactivate(vg_name, lv_name) lv_reactivate(vg_name, lv_name)
process.run(cmd, sudo=True) process.run(cmd, sudo=True)
else: else:
...@@ -491,8 +491,8 @@ def lv_revert(vg_name, lv_name, lv_snapshot_name): ...@@ -491,8 +491,8 @@ def lv_revert(vg_name, lv_name, lv_snapshot_name):
if ('Snapshot could not be found' in ex.result.stderr and if ('Snapshot could not be found' in ex.result.stderr and
re.search(active_lv_pattern, lvdisplay_output) or re.search(active_lv_pattern, lvdisplay_output) or
"The Logical volume %s is still active" % lv_name in ex.result.stderr): "The Logical volume %s is still active" % lv_name in ex.result.stderr):
LOGGER.debug(("Logical volume %s is still active! " + log_msg = "Logical volume %s is still active! Attempting to deactivate..."
"Attempting to deactivate..."), lv_name) LOGGER.debug(log_msg, lv_name)
lv_reactivate(vg_name, lv_name) lv_reactivate(vg_name, lv_name)
LOGGER.error("Continuing after reactivation") LOGGER.error("Continuing after reactivation")
elif 'Snapshot could not be found' in ex.result.stderr: elif 'Snapshot could not be found' in ex.result.stderr:
...@@ -533,8 +533,8 @@ def lv_reactivate(vg_name, lv_name, timeout=10): ...@@ -533,8 +533,8 @@ def lv_reactivate(vg_name, lv_name, timeout=10):
process.run("lvchange -ay /dev/%s/%s" % (vg_name, lv_name), sudo=True) process.run("lvchange -ay /dev/%s/%s" % (vg_name, lv_name), sudo=True)
time.sleep(timeout) time.sleep(timeout)
except process.CmdError: except process.CmdError:
LOGGER.error(("Failed to reactivate %s - please, " + log_msg = "Failed to reactivate %s - please, nuke the process that uses it first."
"nuke the process that uses it first."), lv_name) LOGGER.error(log_msg, lv_name)
raise LVException("The Logical volume %s is still active" % lv_name) raise LVException("The Logical volume %s is still active" % lv_name)
......
...@@ -394,7 +394,7 @@ def get_buddy_info(chunk_sizes, nodes="all", zones="all"): ...@@ -394,7 +394,7 @@ def get_buddy_info(chunk_sizes, nodes="all", zones="all"):
re_buddyinfo += "(%s)" % "|".join(nodes.split()) re_buddyinfo += "(%s)" % "|".join(nodes.split())
if not re.findall(re_buddyinfo, buddy_info_content): if not re.findall(re_buddyinfo, buddy_info_content):
logging.warn("Can not find Nodes %s" % nodes) logging.warn("Can not find Nodes %s", nodes)
return None return None
re_buddyinfo += r".*?zone\s+" re_buddyinfo += r".*?zone\s+"
if zones == "all": if zones == "all":
...@@ -402,7 +402,7 @@ def get_buddy_info(chunk_sizes, nodes="all", zones="all"): ...@@ -402,7 +402,7 @@ def get_buddy_info(chunk_sizes, nodes="all", zones="all"):
else: else:
re_buddyinfo += "(%s)" % "|".join(zones.split()) re_buddyinfo += "(%s)" % "|".join(zones.split())
if not re.findall(re_buddyinfo, buddy_info_content): if not re.findall(re_buddyinfo, buddy_info_content):
logging.warn("Can not find zones %s" % zones) logging.warn("Can not find zones %s", zones)
return None return None
re_buddyinfo += r"\s+([\s\d]+)" re_buddyinfo += r"\s+([\s\d]+)"
......
...@@ -662,7 +662,7 @@ class ZypperBackend(RpmBackend): ...@@ -662,7 +662,7 @@ class ZypperBackend(RpmBackend):
except IndexError: except IndexError:
ver = out ver = out
self.pm_version = ver self.pm_version = ver
log.debug('Zypper version: %s' % self.pm_version) log.debug('Zypper version: %s', self.pm_version)
def install(self, name): def install(self, name):
""" """
......
...@@ -253,7 +253,7 @@ class HTMLResult(Result): ...@@ -253,7 +253,7 @@ class HTMLResult(Result):
report_contents = v.render('utf8') report_contents = v.render('utf8')
except UnicodeDecodeError as details: except UnicodeDecodeError as details:
# FIXME: Remove me when UnicodeDecodeError problem is fixed # FIXME: Remove me when UnicodeDecodeError problem is fixed
LOG_UI.critical("\n" + ("-" * 80)) LOG_UI.critical("\n%s", ("-" * 80))
LOG_UI.critical("HTML failed to render the template: %s\n\n", LOG_UI.critical("HTML failed to render the template: %s\n\n",
template) template)
LOG_UI.critical("-" * 80) LOG_UI.critical("-" * 80)
......
...@@ -162,8 +162,8 @@ class ResultsdbResult(Result): ...@@ -162,8 +162,8 @@ class ResultsdbResult(Result):
def render(self, result, job): def render(self, result, job):
if (getattr(job.args, 'resultsdb_logs', None) is not None and if (getattr(job.args, 'resultsdb_logs', None) is not None and
getattr(job.args, 'stdout_claimed_by', None) is None): getattr(job.args, 'stdout_claimed_by', None) is None):
LOG_UI.info("JOB URL : %s/%s" % (job.args.resultsdb_logs, log_msg = "JOB URL : %s/%s"
os.path.basename(job.logdir))) LOG_UI.info(log_msg, job.args.resultsdb_logs, os.path.basename(job.logdir))
class ResultsdbCLI(CLI): class ResultsdbCLI(CLI):
......
...@@ -169,7 +169,7 @@ results_dir_content() { ...@@ -169,7 +169,7 @@ results_dir_content() {
[ "$SKIP_RESULTSDIR_CHECK" ] || RESULTS_DIR_CONTENT="$(ls $RESULTS_DIR 2> /dev/null)" [ "$SKIP_RESULTSDIR_CHECK" ] || RESULTS_DIR_CONTENT="$(ls $RESULTS_DIR 2> /dev/null)"
LINT_CMD="inspekt lint --exclude=.git" LINT_CMD="inspekt lint --exclude=.git"
PYLINT_ENABLE="--enable R0401,W0101,W0102,W0104,W0105,W0106,W0107,W0108,W0109,W0111,W0120,W0122,W0123,W0124,W0125,W0150,W0199,W0211,W0222,W0232,W0233,W0301,W0312,W0401,W0404,W0406,W0410,W0601,W0602,W0603,W0604,W0611,W0612,W0614,W0622,W0623,W0640,W0711,W1202,W1300,W1301,W1302,W1303,W1304,W1305,W1306,W1307,W1401,W1402,W1501,W1503,W1645" PYLINT_ENABLE="--enable R0401,W0101,W0102,W0104,W0105,W0106,W0107,W0108,W0109,W0111,W0120,W0122,W0123,W0124,W0125,W0150,W0199,W0211,W0222,W0232,W0233,W0301,W0312,W0401,W0404,W0406,W0410,W0601,W0602,W0603,W0604,W0611,W0612,W0614,W0622,W0623,W0640,W0711,W1201,W1202,W1300,W1301,W1302,W1303,W1304,W1305,W1306,W1307,W1401,W1402,W1501,W1503,W1645"
if [ "$AVOCADO_PARALLEL_LINT" ]; then if [ "$AVOCADO_PARALLEL_LINT" ]; then
LINT_CMD="$LINT_CMD --parallel=$AVOCADO_PARALLEL_LINT" LINT_CMD="$LINT_CMD --parallel=$AVOCADO_PARALLEL_LINT"
fi fi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册