From b10cce1b8796dac59cb9b844026cc6847b8cafe0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Sat, 17 Apr 2021 16:28:42 +0200 Subject: [PATCH] deps: V8: cherry-pick d724820c1d5d Original commit message: Merged: Squashed multiple commits. Merged: [test] Make finding build directory more flexible Revision: 4f015e85faf1d64466eafd897d1d59b1d77071f3 Merged: [test] Use the correct precedence for choosing the build directory Revision: 7b24b13981e411602fc77db1305d0ae034a92fd8 Merged: [test] Add fallback to legacy output directory Revision: bf3adea58aab3d21e36e23c60e1e0bbc994cd5b8 Merged: [gcmole] Fix gcmole after property change Revision: c87bdbcf0d1d8f8bcc927f6b364d27e72c22736d Merged: [test] Overhaul mode processing in test runner Revision: 608b732d141689e8e10ee918afc8ed1fae1ab80c Merged: [test] Switch to flattened json output Revision: 373a9a8cfc8db3ef65fcdca0ec0c4ded9e4acc89 BUG=chromium:1132088,v8:10893 NOTRY=true NOTREECHECKS=true R=liviurau@chromium.org Change-Id: I3c1de04ca4fe62e36da29e706a20daec0b3d4d98 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2461745 Reviewed-by: Liviu Rau Commit-Queue: Michael Achenbach Cr-Commit-Position: refs/branch-heads/8.6@{#20} Cr-Branched-From: a64aed2333abf49e494d2a5ce24bbd14fff19f60-refs/heads/8.6.395@{#1} Cr-Branched-From: a626bc036236c9bf92ac7b87dc40c9e538b087e3-refs/heads/master@{#69472} Refs: https://github.com/v8/v8/commit/d724820c1d5d84a523fc02344401765bcaeaf372 PR-URL: https://github.com/nodejs/node/pull/38275 Reviewed-By: Matteo Collina Reviewed-By: Jiawen Geng Reviewed-By: Shelley Vohr --- common.gypi | 2 +- deps/v8/tools/gcmole/gcmole.lua | 2 +- deps/v8/tools/gcmole/run-gcmole.py | 6 +- deps/v8/tools/run_perf.py | 41 +++-- deps/v8/tools/testrunner/base_runner.py | 170 ++++++------------ deps/v8/tools/testrunner/standard_runner.py | 4 +- deps/v8/tools/testrunner/testproc/progress.py | 18 +- deps/v8/tools/unittests/run_perf_test.py | 42 ++++- deps/v8/tools/unittests/run_tests_test.py | 47 +---- .../testdata/builddirs/dir1/out/build/d8 | 1 + .../testdata/expected_test_results1.json | 14 +- .../testdata/expected_test_results2.json | 10 +- 12 files changed, 153 insertions(+), 204 deletions(-) create mode 100644 deps/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8 diff --git a/common.gypi b/common.gypi index 6529871fbffe1f..faf79d8b1efd0d 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.33', + 'v8_embedder_string': '-node.34', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua index a09c3b61ad5161..5ccf2e1b4c9d4d 100644 --- a/deps/v8/tools/gcmole/gcmole.lua +++ b/deps/v8/tools/gcmole/gcmole.lua @@ -116,7 +116,7 @@ local function MakeClangCommandLine( .. " -DV8_INTL_SUPPORT" .. " -I./" .. " -Iinclude/" - .. " -Iout/Release/gen" + .. " -Iout/build/gen" .. " -Ithird_party/icu/source/common" .. " -Ithird_party/icu/source/i18n" .. " " .. arch_options diff --git a/deps/v8/tools/gcmole/run-gcmole.py b/deps/v8/tools/gcmole/run-gcmole.py index 6f2a091c3c7e62..40e2be9699b706 100755 --- a/deps/v8/tools/gcmole/run-gcmole.py +++ b/deps/v8/tools/gcmole/run-gcmole.py @@ -21,9 +21,9 @@ assert len(sys.argv) == 2 -if not os.path.isfile("out/Release/gen/torque-generated/builtin-definitions-tq.h"): - print("Expected generated headers in out/Release/gen.") - print("Either build v8 in out/Release or change gcmole.lua:115") +if not os.path.isfile("out/build/gen/torque-generated/builtin-definitions-tq.h"): + print("Expected generated headers in out/build/gen.") + print("Either build v8 in out/build or change gcmole.lua:115") sys.exit(-1) proc = subprocess.Popen( diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py index d7255a94d35b94..80ea1f956592bd 100644 --- a/deps/v8/tools/run_perf.py +++ b/deps/v8/tools/run_perf.py @@ -575,6 +575,32 @@ def FlattenRunnables(node, node_cb): raise Exception('Invalid suite configuration.') +def find_build_directory(base_path, arch): + """Returns the location of d8 or node in the build output directory. + + This supports a seamless transition between legacy build location + (out/Release) and new build location (out/build). + """ + def is_build(path): + # We support d8 or node as executables. We don't support testing on + # Windows. + return (os.path.isfile(os.path.join(path, 'd8')) or + os.path.isfile(os.path.join(path, 'node'))) + possible_paths = [ + # Location developer wrapper scripts is using. + '%s.release' % arch, + # Current build location on bots. + 'build', + # Legacy build location on bots. + 'Release', + ] + possible_paths = [os.path.join(base_path, p) for p in possible_paths] + actual_paths = filter(is_build, possible_paths) + assert actual_paths, 'No build directory found.' + assert len(actual_paths) == 1, 'Found ambiguous build directories.' + return actual_paths[0] + + class Platform(object): def __init__(self, args): self.shell_dir = args.shell_dir @@ -881,8 +907,7 @@ def Main(argv): 'to auto-detect.', default='x64', choices=SUPPORTED_ARCHS + ['auto']) parser.add_argument('--buildbot', - help='Adapt to path structure used on buildbots and adds ' - 'timestamps/level to all logged status messages', + help='Deprecated', default=False, action='store_true') parser.add_argument('-d', '--device', help='The device ID to run Android tests on. If not ' @@ -978,13 +1003,9 @@ def Main(argv): workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) - if args.buildbot: - build_config = 'Release' - else: - build_config = '%s.release' % args.arch - if args.binary_override_path == None: - args.shell_dir = os.path.join(workspace, args.outdir, build_config) + args.shell_dir = find_build_directory( + os.path.join(workspace, args.outdir), args.arch) default_binary_name = 'd8' else: if not os.path.isfile(args.binary_override_path): @@ -998,8 +1019,8 @@ def Main(argv): default_binary_name = os.path.basename(args.binary_override_path) if args.outdir_secondary: - args.shell_dir_secondary = os.path.join( - workspace, args.outdir_secondary, build_config) + args.shell_dir_secondary = find_build_directory( + os.path.join(workspace, args.outdir_secondary), args.arch) else: args.shell_dir_secondary = None diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py index c4036bb918e3a3..cc731189f73243 100644 --- a/deps/v8/tools/testrunner/base_runner.py +++ b/deps/v8/tools/testrunner/base_runner.py @@ -6,7 +6,7 @@ from __future__ import print_function from functools import reduce -from collections import OrderedDict +from collections import OrderedDict, namedtuple import json import multiprocessing import optparse @@ -115,52 +115,35 @@ ] -class ModeConfig(object): - def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode): - self.flags = flags - self.timeout_scalefactor = timeout_scalefactor - self.status_mode = status_mode - self.execution_mode = execution_mode - +ModeConfig = namedtuple( + 'ModeConfig', 'label flags timeout_scalefactor status_mode') DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"] RELEASE_FLAGS = ["--nohard-abort"] -MODES = { - "debug": ModeConfig( - flags=DEBUG_FLAGS, - timeout_scalefactor=4, - status_mode="debug", - execution_mode="debug", - ), - "optdebug": ModeConfig( + +DEBUG_MODE = ModeConfig( + label='debug', flags=DEBUG_FLAGS, timeout_scalefactor=4, status_mode="debug", - execution_mode="debug", - ), - "release": ModeConfig( +) + +RELEASE_MODE = ModeConfig( + label='release', flags=RELEASE_FLAGS, timeout_scalefactor=1, status_mode="release", - execution_mode="release", - ), - # Normal trybot release configuration. There, dchecks are always on which - # implies debug is set. Hence, the status file needs to assume debug-like - # behavior/timeouts. - "tryrelease": ModeConfig( +) + +# Normal trybot release configuration. There, dchecks are always on which +# implies debug is set. Hence, the status file needs to assume debug-like +# behavior/timeouts. +TRY_RELEASE_MODE = ModeConfig( + label='release+dchecks', flags=RELEASE_FLAGS, - timeout_scalefactor=1, - status_mode="debug", - execution_mode="release", - ), - # This mode requires v8 to be compiled with dchecks and slow dchecks. - "slowrelease": ModeConfig( - flags=RELEASE_FLAGS + ["--enable-slow-asserts"], - timeout_scalefactor=2, + timeout_scalefactor=4, status_mode="debug", - execution_mode="release", - ), -} +) PROGRESS_INDICATORS = { 'verbose': progress.VerboseProgressIndicator, @@ -240,12 +223,29 @@ def __str__(self): return '\n'.join(detected_options) +def _do_load_build_config(outdir, verbose=False): + build_config_path = os.path.join(outdir, "v8_build_config.json") + if not os.path.exists(build_config_path): + if verbose: + print("Didn't find build config: %s" % build_config_path) + raise TestRunnerError() + + with open(build_config_path) as f: + try: + build_config_json = json.load(f) + except Exception: # pragma: no cover + print("%s exists but contains invalid json. Is your build up-to-date?" + % build_config_path) + raise TestRunnerError() + + return BuildConfig(build_config_json) + + class BaseTestRunner(object): def __init__(self, basedir=None): self.basedir = basedir or BASE_DIR self.outdir = None self.build_config = None - self.mode_name = None self.mode_options = None self.target_os = None @@ -279,7 +279,7 @@ def execute(self, sys_args=None): tests = self._load_testsuite_generators(args, options) self._setup_env() print(">>> Running tests for %s.%s" % (self.build_config.arch, - self.mode_name)) + self.mode_options.label)) exit_code = self._do_execute(tests, args, options) if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results: print("Force exit code 0 after failures. Json test results file " @@ -313,9 +313,6 @@ def _add_parser_default_options(self, parser): default="out") parser.add_option("--arch", help="The architecture to run tests for") - parser.add_option("-m", "--mode", - help="The test mode in which to run (uppercase for builds" - " in CI): %s" % MODES.keys()) parser.add_option("--shell-dir", help="DEPRECATED! Executables from build " "directory will be used") parser.add_option("--test-root", help="Root directory of the test suites", @@ -400,9 +397,8 @@ def _add_parser_options(self, parser): def _parse_args(self, parser, sys_args): options, args = parser.parse_args(sys_args) - if any(map(lambda v: v and ',' in v, - [options.arch, options.mode])): # pragma: no cover - print('Multiple arch/mode are deprecated') + if options.arch and ',' in options.arch: # pragma: no cover + print('Multiple architectures are deprecated') raise TestRunnerError() return options, args @@ -410,7 +406,12 @@ def _parse_args(self, parser, sys_args): def _load_build_config(self, options): for outdir in self._possible_outdirs(options): try: - self.build_config = self._do_load_build_config(outdir, options.verbose) + self.build_config = _do_load_build_config(outdir, options.verbose) + + # In auto-detect mode the outdir is always where we found the build config. + # This ensures that we'll also take the build products from there. + self.outdir = outdir + break except TestRunnerError: pass @@ -433,8 +434,7 @@ def _load_build_config(self, options): # Returns possible build paths in order: # gn # outdir - # outdir/arch.mode - # Each path is provided in two versions: and /mode for bots. + # outdir on bots def _possible_outdirs(self, options): def outdirs(): if options.gn: @@ -442,17 +442,13 @@ def outdirs(): return yield options.outdir - if options.arch and options.mode: - yield os.path.join(options.outdir, - '%s.%s' % (options.arch, options.mode)) + + if os.path.basename(options.outdir) != 'build': + yield os.path.join(options.outdir, 'build') for outdir in outdirs(): yield os.path.join(self.basedir, outdir) - # bot option - if options.mode: - yield os.path.join(self.basedir, outdir, options.mode) - def _get_gn_outdir(self): gn_out_dir = os.path.join(self.basedir, DEFAULT_OUT_GN) latest_timestamp = -1 @@ -468,51 +464,13 @@ def _get_gn_outdir(self): print(">>> Latest GN build found: %s" % latest_config) return os.path.join(DEFAULT_OUT_GN, latest_config) - def _do_load_build_config(self, outdir, verbose=False): - build_config_path = os.path.join(outdir, "v8_build_config.json") - if not os.path.exists(build_config_path): - if verbose: - print("Didn't find build config: %s" % build_config_path) - raise TestRunnerError() - - with open(build_config_path) as f: - try: - build_config_json = json.load(f) - except Exception: # pragma: no cover - print("%s exists but contains invalid json. Is your build up-to-date?" - % build_config_path) - raise TestRunnerError() - - # In auto-detect mode the outdir is always where we found the build config. - # This ensures that we'll also take the build products from there. - self.outdir = os.path.dirname(build_config_path) - - return BuildConfig(build_config_json) - def _process_default_options(self, options): - # We don't use the mode for more path-magic. - # Therefore transform the bot mode here to fix build_config value. - if options.mode: - options.mode = self._bot_to_v8_mode(options.mode) - - build_config_mode = 'debug' if self.build_config.is_debug else 'release' - if options.mode: - if options.mode not in MODES: # pragma: no cover - print('%s mode is invalid' % options.mode) - raise TestRunnerError() - if MODES[options.mode].execution_mode != build_config_mode: - print ('execution mode (%s) for %s is inconsistent with build config ' - '(%s)' % ( - MODES[options.mode].execution_mode, - options.mode, - build_config_mode)) - raise TestRunnerError() - - self.mode_name = options.mode + if self.build_config.is_debug: + self.mode_options = DEBUG_MODE + elif self.build_config.dcheck_always_on: + self.mode_options = TRY_RELEASE_MODE else: - self.mode_name = build_config_mode - - self.mode_options = MODES[self.mode_name] + self.mode_options = RELEASE_MODE if options.arch and options.arch != self.build_config.arch: print('--arch value (%s) inconsistent with build config (%s).' % ( @@ -533,15 +491,6 @@ def _process_default_options(self, options): options.command_prefix = shlex.split(options.command_prefix) options.extra_flags = sum(map(shlex.split, options.extra_flags), []) - def _bot_to_v8_mode(self, config): - """Convert build configs from bots to configs understood by the v8 runner. - - V8 configs are always lower case and without the additional _x64 suffix - for 64 bit builds on windows with ninja. - """ - mode = config[:-4] if config.endswith('_x64') else config - return mode.lower() - def _process_options(self, options): pass @@ -689,9 +638,7 @@ def _get_statusfile_variables(self, options): "is_clang": self.build_config.is_clang, "is_full_debug": self.build_config.is_full_debug, "mips_arch_variant": mips_arch_variant, - "mode": self.mode_options.status_mode - if not self.build_config.dcheck_always_on - else "debug", + "mode": self.mode_options.status_mode, "msan": self.build_config.msan, "no_harness": options.no_harness, "no_i18n": self.build_config.no_i18n, @@ -804,10 +751,7 @@ def _create_progress_indicators(self, test_count, options): procs.append(progress.JUnitTestProgressIndicator(options.junitout, options.junittestsuite)) if options.json_test_results: - procs.append(progress.JsonTestProgressIndicator( - self.framework_name, - self.build_config.arch, - self.mode_options.execution_mode)) + procs.append(progress.JsonTestProgressIndicator(self.framework_name)) for proc in procs: proc.configure(options) diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py index 10545fa5f2417b..f6c61466ba00eb 100755 --- a/deps/v8/tools/testrunner/standard_runner.py +++ b/deps/v8/tools/testrunner/standard_runner.py @@ -379,10 +379,8 @@ def _duration_results_text(test): ] assert os.path.exists(options.json_test_results) - complete_results = [] with open(options.json_test_results, "r") as f: - complete_results = json.loads(f.read()) - output = complete_results[0] + output = json.load(f) lines = [] for test in output['slowest_tests']: suffix = '' diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py index a993fc18a372cb..1c1b163e37cf13 100644 --- a/deps/v8/tools/testrunner/testproc/progress.py +++ b/deps/v8/tools/testrunner/testproc/progress.py @@ -358,7 +358,7 @@ def finished(self): class JsonTestProgressIndicator(ProgressIndicator): - def __init__(self, framework_name, arch, mode): + def __init__(self, framework_name): super(JsonTestProgressIndicator, self).__init__() # We want to drop stdout/err for all passed tests on the first try, but we # need to get outputs for all runs after the first one. To accommodate that, @@ -367,8 +367,6 @@ def __init__(self, framework_name, arch, mode): self._requirement = base.DROP_PASS_STDOUT self.framework_name = framework_name - self.arch = arch - self.mode = mode self.results = [] self.duration_sum = 0 self.test_count = 0 @@ -438,24 +436,16 @@ def _test_record(self, test, result, output, run): } def finished(self): - complete_results = [] - if os.path.exists(self.options.json_test_results): - with open(self.options.json_test_results, "r") as f: - # On bots we might start out with an empty file. - complete_results = json.loads(f.read() or "[]") - duration_mean = None if self.test_count: duration_mean = self.duration_sum / self.test_count - complete_results.append({ - "arch": self.arch, - "mode": self.mode, + result = { "results": self.results, "slowest_tests": self.tests.as_list(), "duration_mean": duration_mean, "test_total": self.test_count, - }) + } with open(self.options.json_test_results, "w") as f: - f.write(json.dumps(complete_results)) + json.dump(result, f) diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py index 6cd63ac2b66798..28f71b2b339115 100755 --- a/deps/v8/tools/unittests/run_perf_test.py +++ b/deps/v8/tools/unittests/run_perf_test.py @@ -90,6 +90,21 @@ 'units': 'ms', } + +class UnitTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + sys.path.insert(0, BASE_DIR) + import run_perf + global run_perf + + def testBuildDirectory(self): + base_path = os.path.join(TEST_DATA, 'builddirs', 'dir1', 'out') + expected_path = os.path.join(base_path, 'build') + self.assertEquals( + expected_path, run_perf.find_build_directory(base_path, 'x64')) + + class PerfTest(unittest.TestCase): @classmethod def setUpClass(cls): @@ -125,6 +140,7 @@ def _WriteTestInput(self, json_content): f.write(json.dumps(json_content)) def _MockCommand(self, *args, **kwargs): + on_bots = kwargs.pop('on_bots', False) # Fake output for each test run. test_outputs = [Output(stdout=arg, timed_out=kwargs.get('timed_out', False), @@ -142,6 +158,16 @@ def execute(*args, **kwargs): run_perf.command, 'PosixCommand', mock.MagicMock(side_effect=create_cmd)).start() + build_dir = 'Release' if on_bots else 'x64.release' + out_dirs = ['out', 'out-secondary'] + return_values = [ + os.path.join(os.path.dirname(BASE_DIR), out, build_dir) + for out in out_dirs + ] + mock.patch.object( + run_perf, 'find_build_directory', + mock.MagicMock(side_effect=return_values)).start() + # Check that d8 is called from the correct cwd for each test run. dirs = [os.path.join(TEST_WORKSPACE, arg) for arg in args[0]] def chdir(*args, **kwargs): @@ -394,11 +420,12 @@ def testTwoRunsStdDevRegExp(self): def testBuildbot(self): self._WriteTestInput(V8_JSON) - self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n']) + self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'], + on_bots=True) mock.patch.object( run_perf.Platform, 'ReadBuildConfig', mock.MagicMock(return_value={'is_android': False})).start() - self.assertEqual(0, self._CallMain('--buildbot')) + self.assertEqual(0, self._CallMain()) self._VerifyResults('test', 'score', [ {'name': 'Richards', 'results': [1.234], 'stddev': ''}, {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, @@ -410,11 +437,12 @@ def testBuildbotWithTotal(self): test_input = dict(V8_JSON) test_input['total'] = True self._WriteTestInput(test_input) - self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n']) + self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'], + on_bots=True) mock.patch.object( run_perf.Platform, 'ReadBuildConfig', mock.MagicMock(return_value={'is_android': False})).start() - self.assertEqual(0, self._CallMain('--buildbot')) + self.assertEqual(0, self._CallMain()) self._VerifyResults('test', 'score', [ {'name': 'Richards', 'results': [1.234], 'stddev': ''}, {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, @@ -427,11 +455,12 @@ def testBuildbotWithTotalAndErrors(self): test_input = dict(V8_JSON) test_input['total'] = True self._WriteTestInput(test_input) - self._MockCommand(['.'], ['x\nRichards: bla\nDeltaBlue: 10657567\ny\n']) + self._MockCommand(['.'], ['x\nRichards: bla\nDeltaBlue: 10657567\ny\n'], + on_bots=True) mock.patch.object( run_perf.Platform, 'ReadBuildConfig', mock.MagicMock(return_value={'is_android': False})).start() - self.assertEqual(1, self._CallMain('--buildbot')) + self.assertEqual(1, self._CallMain()) self._VerifyResults('test', 'score', [ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, ]) @@ -484,6 +513,7 @@ def testAndroid(self): mock.patch('run_perf.AndroidPlatform.PreExecution').start() mock.patch('run_perf.AndroidPlatform.PostExecution').start() mock.patch('run_perf.AndroidPlatform.PreTests').start() + mock.patch('run_perf.find_build_directory').start() mock.patch( 'run_perf.AndroidPlatform.Run', return_value=(Output(stdout='Richards: 1.234\nDeltaBlue: 10657567\n'), diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py index 3fc91b8e90f023..8b3275172d55a4 100755 --- a/deps/v8/tools/unittests/run_tests_test.py +++ b/deps/v8/tools/unittests/run_tests_test.py @@ -67,7 +67,7 @@ def temp_base(baseroot='testroot1'): """ basedir = os.path.join(TEST_DATA_ROOT, baseroot) with temp_dir() as tempbase: - builddir = os.path.join(tempbase, 'out', 'Release') + builddir = os.path.join(tempbase, 'out', 'build') testroot = os.path.join(tempbase, 'test') os.makedirs(builddir) shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir) @@ -112,7 +112,7 @@ def run_tests(basedir, *args, **kwargs): def override_build_config(basedir, **kwargs): """Override the build config with new values provided as kwargs.""" - path = os.path.join(basedir, 'out', 'Release', 'v8_build_config.json') + path = os.path.join(basedir, 'out', 'build', 'v8_build_config.json') with open(path) as f: config = json.load(f) config.update(kwargs) @@ -171,7 +171,6 @@ def testPass(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', '--time', @@ -189,7 +188,6 @@ def testShardedProc(self): for shard in [1, 2]: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', '--shard-count=2', @@ -220,7 +218,6 @@ def testSharded(self): for shard in [1, 2]: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', '--shard-count=2', @@ -239,7 +236,6 @@ def testFail(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', 'sweet/strawberries', @@ -252,7 +248,7 @@ def check_cleaned_json_output( self, expected_results_name, actual_json, basedir): # Check relevant properties of the json output. with open(actual_json) as f: - json_output = json.load(f)[0] + json_output = json.load(f) # Replace duration in actual output as it's non-deterministic. Also # replace the python executable prefix as it has a different absolute @@ -285,7 +281,6 @@ def testFailWithRerunAndJSON(self): json_path = os.path.join(basedir, 'out.json') result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--rerun-failures-count=2', @@ -314,7 +309,6 @@ def testFlakeWithRerunAndJSON(self): json_path = os.path.join(basedir, 'out.json') result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--rerun-failures-count=2', @@ -346,7 +340,6 @@ def testAutoDetect(self): v8_enable_pointer_compression=False) result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', 'sweet/bananas', @@ -371,7 +364,6 @@ def testSkips(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=nooptimization', 'sweet/strawberries', @@ -385,7 +377,6 @@ def testRunSkips(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=nooptimization', '--run-skipped', @@ -402,7 +393,6 @@ def testDefault(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', infra_staging=False, ) self.assertIn('0 tests ran', result.stdout, result) @@ -410,24 +400,15 @@ def testDefault(self): def testNoBuildConfig(self): """Test failing run when build config is not found.""" - with temp_base() as basedir: + with temp_dir() as basedir: result = run_tests(basedir) self.assertIn('Failed to load build config', result.stdout, result) self.assertEqual(5, result.returncode, result) - def testInconsistentMode(self): - """Test failing run when attempting to wrongly override the mode.""" - with temp_base() as basedir: - override_build_config(basedir, is_debug=True) - result = run_tests(basedir, '--mode=Release') - self.assertIn('execution mode (release) for release is inconsistent ' - 'with build config (debug)', result.stdout, result) - self.assertEqual(5, result.returncode, result) - def testInconsistentArch(self): """Test failing run when attempting to wrongly override the arch.""" with temp_base() as basedir: - result = run_tests(basedir, '--mode=Release', '--arch=ia32') + result = run_tests(basedir, '--arch=ia32') self.assertIn( '--arch value (ia32) inconsistent with build config (x64).', result.stdout, result) @@ -436,13 +417,13 @@ def testInconsistentArch(self): def testWrongVariant(self): """Test using a bogus variant.""" with temp_base() as basedir: - result = run_tests(basedir, '--mode=Release', '--variants=meh') + result = run_tests(basedir, '--variants=meh') self.assertEqual(5, result.returncode, result) def testModeFromBuildConfig(self): """Test auto-detection of mode from build config.""" with temp_base() as basedir: - result = run_tests(basedir, '--outdir=out/Release', 'sweet/bananas') + result = run_tests(basedir, '--outdir=out/build', 'sweet/bananas') self.assertIn('Running tests for x64.release', result.stdout, result) self.assertEqual(0, result.returncode, result) @@ -455,7 +436,6 @@ def testReport(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--variants=default', 'sweet', '--report', @@ -471,7 +451,6 @@ def testWarnUnusedRules(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--variants=default,nooptimization', 'sweet', '--warn-unused', @@ -486,7 +465,6 @@ def testCatNoSources(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--variants=default', 'sweet/bananas', '--cat', @@ -505,7 +483,6 @@ def testPredictable(self): override_build_config(basedir, v8_enable_verify_predictable=True) result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', 'sweet/bananas', @@ -524,7 +501,6 @@ def testSlowArch(self): override_build_config(basedir, v8_target_cpu='arm64') result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', 'sweet/bananas', @@ -538,7 +514,6 @@ def testRandomSeedStressWithDefault(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--random-seed-stress-count=2', @@ -553,7 +528,6 @@ def testRandomSeedStressWithSeed(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--random-seed-stress-count=2', @@ -577,7 +551,6 @@ def testSpecificVariants(self): override_build_config(basedir, is_asan=True) result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', 'sweet/bananas', @@ -599,7 +572,6 @@ def testDotsProgress(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=dots', 'sweet/cherries', 'sweet/bananas', @@ -620,7 +592,6 @@ def _testCompactProgress(self, name): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=%s' % name, 'sweet/cherries', 'sweet/bananas', @@ -641,7 +612,6 @@ def testExitAfterNFailures(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--exit-after-n-failures=2', '-j1', @@ -660,7 +630,7 @@ def testExitAfterNFailures(self): self.assertEqual(1, result.returncode, result) def testNumFuzzer(self): - sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/Release'] + sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/build'] with temp_base() as basedir: with capture() as (stdout, stderr): @@ -674,7 +644,6 @@ def testRunnerFlags(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--random-seed=42', diff --git a/deps/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8 b/deps/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8 new file mode 100644 index 00000000000000..9daeafb9864cf4 --- /dev/null +++ b/deps/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8 @@ -0,0 +1 @@ +test diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json index d1fdb49525d8d8..08ac623cd734b2 100644 --- a/deps/v8/tools/unittests/testdata/expected_test_results1.json +++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json @@ -1,10 +1,8 @@ { - "arch": "x64", "duration_mean": 1, - "mode": "release", "results": [ { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -29,7 +27,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -54,7 +52,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -81,7 +79,7 @@ ], "slowest_tests": [ { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -105,7 +103,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -129,7 +127,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ diff --git a/deps/v8/tools/unittests/testdata/expected_test_results2.json b/deps/v8/tools/unittests/testdata/expected_test_results2.json index ac9ab9cc595845..dc353f687553e5 100644 --- a/deps/v8/tools/unittests/testdata/expected_test_results2.json +++ b/deps/v8/tools/unittests/testdata/expected_test_results2.json @@ -1,10 +1,8 @@ { - "arch": "x64", "duration_mean": 1, - "mode": "release", "results": [ { - "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -28,7 +26,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 0, "expected": [ @@ -54,7 +52,7 @@ ], "slowest_tests": [ { - "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 0, "expected": [ @@ -77,7 +75,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [