Skip to content

Commit

Permalink
Review-changes 4: fix score when use-local-configs=y
Browse files Browse the repository at this point in the history
  • Loading branch information
Aleksey Petryankin committed Apr 14, 2024
1 parent 655ef69 commit e69f6f4
Show file tree
Hide file tree
Showing 2 changed files with 86 additions and 18 deletions.
44 changes: 27 additions & 17 deletions pylint/lint/pylinter.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@
ModuleDescriptionDict,
Options,
)
from pylint.utils import ASTWalker, FileState, LinterStats, utils
from pylint.utils import ASTWalker, FileState, LinterStats, merge_stats, utils

MANAGER = astroid.MANAGER

Expand Down Expand Up @@ -320,6 +320,7 @@ def __init__(

# Attributes related to stats
self.stats = LinterStats()
self.all_stats: list[LinterStats] = []

# Attributes related to (command-line) options and their parsing
self.options: Options = options + _make_linter_options(self)
Expand Down Expand Up @@ -951,12 +952,17 @@ def _expand_files(
def set_current_module(self, modname: str, filepath: str | None = None) -> None:
"""Set the name of the currently analyzed module and
init statistics for it.
Save current stats before init to make sure no counters for
error, statement, etc are missed.
"""
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname or "", filepath)
self.current_name = modname
self.current_file = filepath or modname
self.all_stats.append(self.stats)
self.stats = LinterStats()
self.stats.init_single_module(modname or "")

# If there is an actual filepath we might need to update the config attribute
Expand Down Expand Up @@ -1013,7 +1019,7 @@ def _astroid_module_checker(
rawcheckers=rawcheckers,
)

# notify global end
# notify end of module if jobs>1 or use-local-configs=y, global end otherwise
self.stats.statement = walker.nbstatements
for checker in reversed(_checkers):
checker.close()
Expand Down Expand Up @@ -1147,14 +1153,18 @@ def generate_reports(self, verbose: bool = False) -> int | None:
if persistent run, pickle results for later comparison
"""
self.config = self._base_config
# Display whatever messages are left on the reporter.
self.reporter.display_messages(report_nodes.Section())
# current self.stats is needed in merge - it contains stats from last module
# separate variable to avoid modifying any stats during reporting
self.finished_run_stats = merge_stats([self.stats, *self.all_stats])
if not self.file_state._is_base_filestate:
# load previous results if any
previous_stats = load_results(self.file_state.base_name)
self.reporter.on_close(self.stats, previous_stats)
self.reporter.on_close(self.finished_run_stats, previous_stats)
if self.config.reports:
sect = self.make_reports(self.stats, previous_stats)
sect = self.make_reports(self.finished_run_stats, previous_stats)
else:
sect = report_nodes.Section()

Expand All @@ -1163,9 +1173,9 @@ def generate_reports(self, verbose: bool = False) -> int | None:
score_value = self._report_evaluation(verbose)
# save results if persistent run
if self.config.persistent:
save_results(self.stats, self.file_state.base_name)
save_results(self.finished_run_stats, self.file_state.base_name)
else:
self.reporter.on_close(self.stats, LinterStats())
self.reporter.on_close(self.finished_run_stats, LinterStats())
score_value = None
return score_value

Expand All @@ -1175,35 +1185,35 @@ def _report_evaluation(self, verbose: bool = False) -> int | None:
# syntax error preventing pylint from further processing)
note = None
previous_stats = load_results(self.file_state.base_name)
if self.stats.statement == 0:
if self.finished_run_stats.statement == 0:
return note

# get a global note for the code
evaluation = self.config.evaluation
try:
stats_dict = {
"fatal": self.stats.fatal,
"error": self.stats.error,
"warning": self.stats.warning,
"refactor": self.stats.refactor,
"convention": self.stats.convention,
"statement": self.stats.statement,
"info": self.stats.info,
"fatal": self.finished_run_stats.fatal,
"error": self.finished_run_stats.error,
"warning": self.finished_run_stats.warning,
"refactor": self.finished_run_stats.refactor,
"convention": self.finished_run_stats.convention,
"statement": self.finished_run_stats.statement,
"info": self.finished_run_stats.info,
}
note = eval(evaluation, {}, stats_dict) # pylint: disable=eval-used
except Exception as ex: # pylint: disable=broad-except
msg = f"An exception occurred while rating: {ex}"
else:
self.stats.global_note = note
self.finished_run_stats.global_note = note
msg = f"Your code has been rated at {note:.2f}/10"
if previous_stats:
pnote = previous_stats.global_note
if pnote is not None:
msg += f" (previous run: {pnote:.2f}/10, {note - pnote:+.2f})"

if verbose:
checked_files_count = self.stats.node_count["module"]
unchecked_files_count = self.stats.undocumented["module"]
checked_files_count = self.finished_run_stats.node_count["module"]
unchecked_files_count = self.finished_run_stats.undocumented["module"]
msg += f"\nChecked {checked_files_count} files, skipped {unchecked_files_count} files"

if self.config.score:
Expand Down
60 changes: 59 additions & 1 deletion tests/config/test_per_directory_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,17 @@

import os
import os.path
from argparse import Namespace
from io import StringIO
from pathlib import Path
from typing import Any
from unittest.mock import patch

import pytest
from pytest import CaptureFixture

from pylint.lint import Run as LintRun
from pylint.lint.pylinter import PyLinter
from pylint.testutils._run import _Run as Run
from pylint.testutils.utils import _patch_streams, _test_cwd

Expand Down Expand Up @@ -52,7 +56,7 @@ def _create_subconfig_test_fs(tmp_path: Path) -> tuple[Path, ...]:
level1_init.touch()
level1_init2.touch()
level2_init.touch()
test_file_text = "#LEVEL1\n#LEVEL2\n#ALL_LEVELS\n#TODO\n"
test_file_text = "#LEVEL1\n#LEVEL2\n#ALL_LEVELS\n#TODO\nassert (1, None)\ns = 'statement without warnings'\n"
test_file1.write_text(test_file_text)
test_file2.write_text(test_file_text)
test_file3.write_text(test_file_text)
Expand Down Expand Up @@ -250,3 +254,57 @@ def test_local_config_verbose(
LintRun(["--verbose", "--use-local-configs=y", str(tmp_files[1])], exit=False)
output = capsys.readouterr()
assert f"Using config from {level1_dir / 'sub'}" in output.err


def ns_diff(ns1: Namespace, ns2: Namespace) -> str:
msg = "Namespaces not equal\n"
for k, v in ns1.__dict__.items():
if v != ns2.__dict__[k]:
msg += f"{v} != {ns2.__dict__[k]}\n"
return msg


generate_reports_orig = PyLinter.generate_reports


def generate_reports_spy(self: PyLinter, *args: Any, **kwargs: Any) -> int:
score = generate_reports_orig(self, *args, **kwargs)
# check that generate_reports() worked with base config, not config from most recent module
assert self.config == self._base_config, ns_diff(self.config, self._base_config)
# level1_dir.a, level1_dir.z, level1_dir.sub.b from _create_subconfig_test_fs
# each has 2 statements, one of which is warning => score should be 5
assert score is not None
assert 0 < score < 10
return score


@pytest.mark.parametrize(
"local_config_args",
[["--use-local-configs=y"], ["--use-local-configs=y", "--jobs=2"]],
)
def test_subconfigs_score(
_create_subconfig_test_fs: tuple[Path, ...],
local_config_args: list[str],
) -> None:
"""Check that statements from all checked modules are accounted in score:
given stats from many modules such that
total # of messages > statements in last module,
check that score is >0 and <10.
"""
level1_dir, *_ = _create_subconfig_test_fs
out = StringIO()
with (
patch(
"pylint.lint.run.Run.LinterClass.generate_reports",
side_effect=generate_reports_spy,
autospec=True,
) as reports_patch,
_patch_streams(out),
):
linter = LintRun([*local_config_args, str(level1_dir)], exit=False).linter
reports_patch.assert_called_once()

# level1_dir.a, level1_dir.z, level1_dir.sub.b from _create_subconfig_test_fs
# each has 2 statements, one of which is warning, so 3 warnings total
assert linter.finished_run_stats.statement == 6
assert linter.finished_run_stats.warning == 3

0 comments on commit e69f6f4

Please sign in to comment.