Skip to content

Commit

Permalink
Fix handling of unexpected success results
Browse files Browse the repository at this point in the history
This commit addresses a bug when unexpected success results are used
by a test suite. stestr was relying on wasSuccessful() method in the
StreamResult class from testtools, which doesn't properly account for
unexpected success. So when stestr was run on a test suite it wouldn't
properly handle tests with unexpected success result and thus treat
the run as successful. This addresses the issue by adjusting our
testtools api usage to call the wasSuccessful() method from the
TestResult class instead which has the correct behavior.

An issue has been opened in testtools testing-cabal/testtools#273
regarding the accounting error with StreamResult.wasSuccessful(),
if/when a fix is introduced into a released testtools we can investigate
switching these calls back to use that function. But for the time being
this mitigates the issue.

Fixes #189
  • Loading branch information
mtreinish committed Aug 3, 2018
1 parent 2b1b62a commit ec18f05
Show file tree
Hide file tree
Showing 7 changed files with 19 additions and 5 deletions.
2 changes: 1 addition & 1 deletion stestr/commands/failing.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ def failing(repo_type='file', repo_url=None, list_tests=False, subunit=False,
case.run(result)
finally:
result.stopTestRun()
failed = not summary.wasSuccessful()
failed = not testtools.TestResult.wasSuccessful(summary)
if failed:
result = 1
else:
Expand Down
2 changes: 1 addition & 1 deletion stestr/commands/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def make_tests():
if pretty_out and not subunit_out:
subunit_trace.print_fails(stdout)
subunit_trace.print_summary(stdout, elapsed_time)
if not summary_result.wasSuccessful():
if not testtools.TestResult.wasSuccessful(summary_result):
return 1
else:
return 0
4 changes: 2 additions & 2 deletions stestr/commands/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -346,7 +346,7 @@ def run_tests():
stream.run(summary)
finally:
summary.stopTestRun()
if not summary.wasSuccessful():
if not testtools.TestResult.wasSuccessful(summary):
result = 1
if result:
return result
Expand Down Expand Up @@ -495,7 +495,7 @@ def run_tests():
stream.run(summary)
finally:
summary.stopTestRun()
if not summary.wasSuccessful():
if not testtools.TestResult.wasSuccessful(summary):
result = 1
if result:
return result
Expand Down
2 changes: 1 addition & 1 deletion stestr/subunit_trace.py
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,7 @@ def trace(stdin, stdout, print_failures=False, failonly=False,
if count_tests('status', '^success$') == 0:
print("\nNo tests were successful during the run")
return 1
return 0 if summary.wasSuccessful() else 1
return 0 if testtools.TestResult.wasSuccessful(summary) else 1


def main():
Expand Down
4 changes: 4 additions & 0 deletions stestr/tests/files/failing-tests
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,7 @@ class FakeTestClass(testtools.TestCase):
def test_pass_list(self):
test_list = ['test', 'a', 'b']
self.assertIn('fail', test_list)

def test_unexpected_pass(self):
self.expectFailure("we are sad",
self.assertEqual, 1, 1)
4 changes: 4 additions & 0 deletions stestr/tests/files/passing-tests
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,7 @@ class FakeTestClass(testtools.TestCase):
def test_pass_list(self):
test_list = ['test', 'a', 'b']
self.assertIn('test', test_list)

def test_xfail(self):
self.expectFailure("we are sad",
self.assertEqual, 1, 0)
6 changes: 6 additions & 0 deletions stestr/tests/test_return_codes.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,12 @@ def test_parallel_passing_bad_regex(self):
def test_parallel_fails(self):
self.assertRunExit('stestr run', 1)

def test_parallel_passing_xfail(self):
self.assertRunExit('stestr run xfail', 0)

def test_parallel_fails_unxsuccess(self):
self.assertRunExit('stestr run unexpected', 1)

def test_parallel_blacklist(self):
fd, path = tempfile.mkstemp()
self.addCleanup(os.remove, path)
Expand Down

0 comments on commit ec18f05

Please sign in to comment.