diff --git a/.gitignore b/.gitignore index 6e58d14c32a1bc..11c39db88944c7 100644 --- a/.gitignore +++ b/.gitignore @@ -117,6 +117,7 @@ tools/*/*.i.tmp # === Rules for test artifacts === /*.tap /*.xml +/v8*-tap.json /node_trace.*.log # coverage related /gcovr diff --git a/Makefile b/Makefile index 9c01f8f244ee19..05fdc1509844ad 100644 --- a/Makefile +++ b/Makefile @@ -33,6 +33,27 @@ ifdef ENABLE_V8_TAP TAP_V8 := --junitout $(PWD)/v8-tap.xml TAP_V8_INTL := --junitout $(PWD)/v8-intl-tap.xml TAP_V8_BENCHMARKS := --junitout $(PWD)/v8-benchmarks-tap.xml +define convert_to_junit + @true +endef +endif + +ifdef ENABLE_CONVERT_V8_JSON_TO_XML + TAP_V8_JSON := $(PWD)/v8-tap.json + TAP_V8_INTL_JSON := $(PWD)/v8-intl-tap.json + TAP_V8_BENCHMARKS_JSON := $(PWD)/v8-benchmarks-tap.json + + # By default, the V8's JSON test output only includes the tests which have + # failed. We use --slow-tests-cutoff to ensure that all tests are present + # in the output, including those which pass. + TAP_V8 := --json-test-results $(TAP_V8_JSON) --slow-tests-cutoff 1000000 + TAP_V8_INTL := --json-test-results $(TAP_V8_INTL_JSON) --slow-tests-cutoff 1000000 + TAP_V8_BENCHMARKS := --json-test-results $(TAP_V8_BENCHMARKS_JSON) --slow-tests-cutoff 1000000 + +define convert_to_junit + export PATH="$(NO_BIN_OVERRIDE_PATH)" && \ + $(PYTHON) tools/v8-json-to-junit.py < $(1) > $(1:.json=.xml) +endef endif V8_TEST_OPTIONS = $(V8_EXTRA_TEST_OPTIONS) @@ -683,6 +704,7 @@ test-v8: v8 ## Runs the V8 test suite on deps/v8. $(PYTHON) deps/v8/tools/run-tests.py --gn --arch=$(V8_ARCH) $(V8_TEST_OPTIONS) \ mjsunit cctest debugger inspector message preparser \ $(TAP_V8) + $(call convert_to_junit,$(TAP_V8_JSON)) $(info Testing hash seed) $(MAKE) test-hash-seed @@ -691,12 +713,14 @@ test-v8-intl: v8 $(PYTHON) deps/v8/tools/run-tests.py --gn --arch=$(V8_ARCH) \ intl \ $(TAP_V8_INTL) + $(call convert_to_junit,$(TAP_V8_INTL_JSON)) test-v8-benchmarks: v8 export PATH="$(NO_BIN_OVERRIDE_PATH)" && \ $(PYTHON) deps/v8/tools/run-tests.py --gn --arch=$(V8_ARCH) \ benchmarks \ $(TAP_V8_BENCHMARKS) + $(call convert_to_junit,$(TAP_V8_BENCHMARKS_JSON)) test-v8-updates: $(PYTHON) tools/test.py $(PARALLEL_ARGS) --mode=$(BUILDTYPE_LOWER) v8-updates diff --git a/tools/test-v8.bat b/tools/test-v8.bat index d322c31a38d3cc..64265157f00d80 100644 --- a/tools/test-v8.bat +++ b/tools/test-v8.bat @@ -20,18 +20,21 @@ if errorlevel 1 set ERROR_STATUS=1&goto test-v8-exit set path=%savedpath% if not defined test_v8 goto test-v8-intl -echo running 'python tools\run-tests.py %common_v8_test_options% %v8_test_options% --junitout ./v8-tap.xml' -call python tools\run-tests.py %common_v8_test_options% %v8_test_options% --junitout ./v8-tap.xml +echo running 'python tools\run-tests.py %common_v8_test_options% %v8_test_options% --slow-tests-cutoff 1000000 --json-test-results v8-tap.xml' +call python tools\run-tests.py %common_v8_test_options% %v8_test_options% --slow-tests-cutoff 1000000 --json-test-results v8-tap.xml +call python ..\..\tools\v8-json-to-junit.py < v8-tap.xml > v8-tap.json :test-v8-intl if not defined test_v8_intl goto test-v8-benchmarks -echo running 'python tools\run-tests.py %common_v8_test_options% intl --junitout ./v8-intl-tap.xml' -call python tools\run-tests.py %common_v8_test_options% intl --junitout ./v8-intl-tap.xml +echo running 'python tools\run-tests.py %common_v8_test_options% intl --slow-tests-cutoff 1000000 --json-test-results v8-intl-tap.xml' +call python tools\run-tests.py %common_v8_test_options% intl --slow-tests-cutoff 1000000 --json-test-results ./v8-intl-tap.xml +call python ..\..\tools\v8-json-to-junit.py < v8-intl-tap.xml > v8-intl-tap.json :test-v8-benchmarks if not defined test_v8_benchmarks goto test-v8-exit -echo running 'python tools\run-tests.py %common_v8_test_options% benchmarks --junitout ./v8-benchmarks-tap.xml' -call python tools\run-tests.py %common_v8_test_options% benchmarks --junitout ./v8-benchmarks-tap.xml +echo running 'python tools\run-tests.py %common_v8_test_options% benchmarks --slow-tests-cutoff 1000000 --json-test-results v8-benchmarks-tap.xml' +call python tools\run-tests.py %common_v8_test_options% benchmarks --slow-tests-cutoff 1000000 --json-test-results ./v8-benchmarks-tap.xml +call python ..\..\tools\v8-json-to-junit.py < v8-benchmarks-tap.xml > v8-benchmarks-tap.json goto test-v8-exit :test-v8-exit diff --git a/tools/v8-json-to-junit.py b/tools/v8-json-to-junit.py new file mode 100755 index 00000000000000..3d94df580876e6 --- /dev/null +++ b/tools/v8-json-to-junit.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python +# Large parts of this file are modified from +# deps/v8/tools/testrunner/local/junit_output.py, which no longer exists in +# latest V8. +# +# Copyright 2013 the V8 project authors. All rights reserved. +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +import json +import utils +import signal +import sys +import xml.etree.ElementTree as xml + +def IsExitCodeCrashing(exit_code): + if utils.IsWindows(): + return 0x80000000 & exit_code and not (0x3FFFFF00 & exit_code) + return exit_code < 0 and exit_code != -signal.SIGABRT + + +class JUnitTestOutput: + def __init__(self, test_suite_name): + self.root = xml.Element("testsuite") + self.root.attrib["name"] = test_suite_name + + def HasRunTest(self, test_name, test_cmd, test_duration, test_failure): + test_case_element = xml.Element("testcase") + test_case_element.attrib["name"] = test_name + test_case_element.attrib["cmd"] = test_cmd + test_case_element.attrib["time"] = str(round(test_duration, 3)) + if test_failure is not None: + failure_element = xml.Element("failure") + failure_element.text = test_failure + test_case_element.append(failure_element) + self.root.append(test_case_element) + + def FinishAndWrite(self, f): + xml.ElementTree(self.root).write(f, "UTF-8") + + +def Main(): + test_results = json.load(sys.stdin) + + # V8's JSON test runner only logs failing and flaky tests under "results". We + # assume the caller has put a large number for --slow-tests-cutoff, to ensure + # that all the tests appear under "slowest_tests". + + failing_tests = {result["name"]: result for result in test_results["results"]} + all_tests = {result["name"]: result for result in test_results["slowest_tests"]} + passing_tests = { + name: result for name, result in all_tests.items() if name not in failing_tests + } + + # These check that --slow-tests-cutoff was passed correctly. + assert len(failing_tests) + len(passing_tests) == len(all_tests) + assert len(all_tests) == len(test_results["slowest_tests"]) + + output = JUnitTestOutput("v8tests") + + for name, failing_test in failing_tests.items(): + failing_output = [] + + stdout = failing_test["stdout"].strip() + if len(stdout): + failing_output.append("stdout:") + failing_output.append(stdout) + + stderr = failing_test["stderr"].strip() + if len(stderr): + failing_output.append("stderr:") + failing_output.append(stderr) + + failing_output.append("Command: " + failing_test["command"]) + + exit_code = failing_test["exit_code"] + if failing_test["result"] == "TIMEOUT": + failing_output.append("--- TIMEOUT ---") + elif IsExitCodeCrashing(exit_code): + failing_output.append("exit code: " + str(exit_code)) + failing_output.append("--- CRASHED ---") + + output.HasRunTest( + test_name=name, + test_cmd=failing_test["command"], + test_duration=failing_test["duration"], + test_failure="\n".join(failing_output), + ) + + for name, passing_test in passing_tests.items(): + output.HasRunTest( + test_name=name, + test_cmd=passing_test["command"], + test_duration=passing_test["duration"], + test_failure=None, + ) + + output.FinishAndWrite(sys.stdout.buffer) + +if __name__ == '__main__': + Main()