Skip to content

Commit

Permalink
Fix handling of unexpected success results
Browse files Browse the repository at this point in the history
This commit addresses a bug when unexpected success results are used
by a test suite. stestr was relying on wasSuccessful() method in the
StreamResult class from testtools, which doesn't properly account for
unexpected success. So when stestr was run on a test suite it wouldn't
properly handle tests with unexpected success result and thus treat
the run as successful. This addresses the issue by adjusting our
testtools api usage to manually query the results object for
unxsuccess results instead of relying on the exisiting wasSuccessful()
method from the StreamResult class. This is basically the same as the
wasSuccessful() method from the testtools.TestResult class, which exhibits
the correct behavior, but isn't something we're using.

An issue has been opened in testtools testing-cabal/testtools#273
regarding the accounting error with StreamResult.wasSuccessful(),
if/when a fix is introduced into a released testtools we can investigate
switching these calls back to use that method and deleting our local
helper function. But, for the time being this mitigates the issue.

Fixes #189
  • Loading branch information
mtreinish committed Aug 3, 2018
1 parent 2b1b62a commit eb97f84
Show file tree
Hide file tree
Showing 8 changed files with 26 additions and 5 deletions.
2 changes: 1 addition & 1 deletion stestr/commands/failing.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def failing(repo_type='file', repo_url=None, list_tests=False, subunit=False,
case.run(result)
finally:
result.stopTestRun()
failed = not summary.wasSuccessful()
failed = not results.wasSuccessful(summary)
if failed:
result = 1
else:
Expand Down
2 changes: 1 addition & 1 deletion stestr/commands/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def make_tests():
if pretty_out and not subunit_out:
subunit_trace.print_fails(stdout)
subunit_trace.print_summary(stdout, elapsed_time)
if not summary_result.wasSuccessful():
if not results.wasSuccessful(summary_result):
return 1
else:
return 0
5 changes: 3 additions & 2 deletions stestr/commands/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
from stestr import output
from stestr.repository import abstract as repository
from stestr.repository import util
from stestr import results
from stestr.testlist import parse_list
from stestr import user_config

Expand Down Expand Up @@ -346,7 +347,7 @@ def run_tests():
stream.run(summary)
finally:
summary.stopTestRun()
if not summary.wasSuccessful():
if not results.wasSuccessful(summary):
result = 1
if result:
return result
Expand Down Expand Up @@ -495,7 +496,7 @@ def run_tests():
stream.run(summary)
finally:
summary.stopTestRun()
if not summary.wasSuccessful():
if not results.wasSuccessful(summary):
result = 1
if result:
return result
Expand Down
5 changes: 5 additions & 0 deletions stestr/results.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,11 @@
from stestr import output


def wasSuccessful(summary):
return not (summary.errors or summary.failures or
summary.unexpectedSuccesses)


class SummarizingResult(testtools.StreamSummary):

def __init__(self):
Expand Down
3 changes: 2 additions & 1 deletion stestr/subunit_trace.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
import testtools

from stestr import colorizer
from stestr import results

# NOTE(mtreinish) on python3 anydbm was renamed dbm and the python2 dbm module
# was renamed to dbm.ndbm, this block takes that into account
Expand Down Expand Up @@ -388,7 +389,7 @@ def trace(stdin, stdout, print_failures=False, failonly=False,
if count_tests('status', '^success$') == 0:
print("\nNo tests were successful during the run")
return 1
return 0 if summary.wasSuccessful() else 1
return 0 if results.wasSuccessful(summary) else 1


def main():
Expand Down
4 changes: 4 additions & 0 deletions stestr/tests/files/failing-tests
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,7 @@ class FakeTestClass(testtools.TestCase):
def test_pass_list(self):
test_list = ['test', 'a', 'b']
self.assertIn('fail', test_list)

def test_unexpected_pass(self):
self.expectFailure("we are sad",
self.assertEqual, 1, 1)
4 changes: 4 additions & 0 deletions stestr/tests/files/passing-tests
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,7 @@ class FakeTestClass(testtools.TestCase):
def test_pass_list(self):
test_list = ['test', 'a', 'b']
self.assertIn('test', test_list)

def test_xfail(self):
self.expectFailure("we are sad",
self.assertEqual, 1, 0)
6 changes: 6 additions & 0 deletions stestr/tests/test_return_codes.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,12 @@ def test_parallel_passing_bad_regex(self):
def test_parallel_fails(self):
self.assertRunExit('stestr run', 1)

def test_parallel_passing_xfail(self):
self.assertRunExit('stestr run xfail', 0)

def test_parallel_fails_unxsuccess(self):
self.assertRunExit('stestr run unexpected', 1)

def test_parallel_blacklist(self):
fd, path = tempfile.mkstemp()
self.addCleanup(os.remove, path)
Expand Down

0 comments on commit eb97f84

Please sign in to comment.