diff --git a/common.gypi b/common.gypi index 96d7ebe61acfeb..ed0d11a619c357 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.20', + 'v8_embedder_string': '-node.7', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/.vpython b/deps/v8/.vpython deleted file mode 100644 index d4a07677ca9a22..00000000000000 --- a/deps/v8/.vpython +++ /dev/null @@ -1,91 +0,0 @@ -# This is a vpython "spec" file. -# -# It describes patterns for python wheel dependencies of the python scripts in -# the V8 repo, particularly for dependencies that have compiled components -# (since pure-python dependencies can be easily vendored into third_party). -# -# When vpython is invoked, it finds this file and builds a python VirtualEnv, -# containing all of the dependencies described in this file, fetching them from -# CIPD (the "Chrome Infrastructure Package Deployer" service). Unlike `pip`, -# this never requires the end-user machine to have a working python extension -# compilation environment. All of these packages are built using: -# https://chromium.googlesource.com/infra/infra/+/master/infra/tools/dockerbuild/ -# -# All python scripts in the repo share this same spec, to avoid dependency -# fragmentation. -# -# If you have depot_tools installed in your $PATH, you can invoke python scripts -# in this repo by running them as you normally would run them, except -# substituting `vpython` instead of `python` on the command line, e.g.: -# vpython path/to/script.py some --arguments -# -# Read more about `vpython` and how to modify this file here: -# https://chromium.googlesource.com/infra/infra/+/master/doc/users/vpython.md - -python_version: "2.7" - -# The default set of platforms vpython checks does not yet include mac-arm64. -# Setting `verify_pep425_tag` to the list of platforms we explicitly must support -# allows us to ensure that vpython specs stay mac-arm64-friendly -verify_pep425_tag: [ - {python: "cp27", abi: "cp27mu", platform: "manylinux1_x86_64"}, - {python: "cp27", abi: "cp27mu", platform: "linux_arm64"}, - {python: "cp27", abi: "cp27mu", platform: "linux_armv6l"}, - - {python: "cp27", abi: "cp27m", platform: "macosx_10_10_intel"}, - {python: "cp27", abi: "cp27m", platform: "macosx_11_0_arm64"}, - - {python: "cp27", abi: "cp27m", platform: "win32"}, - {python: "cp27", abi: "cp27m", platform: "win_amd64"} -] - -# Needed by third_party/catapult/devil/devil, which is imported by -# build/android/test_runner.py when running performance tests. -wheel: < - name: "infra/python/wheels/psutil/${vpython_platform}" - version: "version:5.2.2" -> - -# Used by: -# build/toolchain/win -wheel: < - name: "infra/python/wheels/pypiwin32/${vpython_platform}" - version: "version:219" - match_tag: < - platform: "win32" - > - match_tag: < - platform: "win_amd64" - > -> - -# Used by: -# tools/unittests/run_perf_test.py -wheel: < - name: "infra/python/wheels/coverage/${vpython_platform}" - version: "version:4.3.4" -> -wheel: < - name: "infra/python/wheels/six-py2_py3" - version: "version:1.10.0" -> -wheel: < - name: "infra/python/wheels/pbr-py2_py3" - version: "version:3.0.0" -> -wheel: < - name: "infra/python/wheels/funcsigs-py2_py3" - version: "version:1.0.2" -> -wheel: < - name: "infra/python/wheels/mock-py2_py3" - version: "version:2.0.0" -> - -# Used by: -# tools/run_perf.py -# tools/unittests/run_perf_test.py -wheel: < - name: "infra/python/wheels/numpy/${vpython_platform}" - version: "version:1.11.3" -> diff --git a/deps/v8/.vpython3 b/deps/v8/.vpython3 index 50fab3bb519735..1187542f5e19a1 100644 --- a/deps/v8/.vpython3 +++ b/deps/v8/.vpython3 @@ -47,7 +47,7 @@ wheel: < wheel: < name: "infra/python/wheels/coverage/${vpython_platform}" - version: "version:5.5.chromium.2" + version: "version:5.5.chromium.3" > wheel: < @@ -74,3 +74,8 @@ wheel: < name: "infra/python/wheels/protobuf-py3" version: "version:3.19.3" > + +wheel: < + name: "infra/python/wheels/requests-py2_py3" + version: "version:2.13.0" +> diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 74d6f3b07ff643..af37f8db25121e 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -137,6 +137,7 @@ Ingvar Stepanyan Ioseb Dzmanashvili Isiah Meadows Jaime Bernardo +Jake Hughes James M Snell James Pike Jan Krems diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel index 4e89f90e7e31e1..f216a1811da852 100644 --- a/deps/v8/BUILD.bazel +++ b/deps/v8/BUILD.bazel @@ -891,6 +891,7 @@ filegroup( "src/builtins/typed-array-sort.tq", "src/builtins/typed-array-subarray.tq", "src/builtins/typed-array-to-reversed.tq", + "src/builtins/typed-array-to-sorted.tq", "src/builtins/typed-array-values.tq", "src/builtins/typed-array-with.tq", "src/builtins/typed-array.tq", @@ -925,6 +926,7 @@ filegroup( "src/objects/js-objects.tq", "src/objects/js-promise.tq", "src/objects/js-proxy.tq", + "src/objects/js-raw-json.tq", "src/objects/js-regexp-string-iterator.tq", "src/objects/js-regexp.tq", "src/objects/js-shadow-realm.tq", @@ -980,6 +982,7 @@ filegroup( "src/objects/js-collator.tq", "src/objects/js-date-time-format.tq", "src/objects/js-display-names.tq", + "src/objects/js-duration-format.tq", "src/objects/js-list-format.tq", "src/objects/js-locale.tq", "src/objects/js-number-format.tq", @@ -1267,8 +1270,6 @@ filegroup( "src/debug/debug-scopes.h", "src/debug/debug-stack-trace-iterator.cc", "src/debug/debug-stack-trace-iterator.h", - "src/debug/debug-type-profile.cc", - "src/debug/debug-type-profile.h", "src/debug/debug.cc", "src/debug/debug.h", "src/debug/interface-types.h", @@ -1435,6 +1436,9 @@ filegroup( "src/heap/embedder-tracing.cc", "src/heap/embedder-tracing.h", "src/heap/embedder-tracing-inl.h", + "src/heap/evacuation-verifier.cc", + "src/heap/evacuation-verifier.h", + "src/heap/evacuation-verifier-inl.h", "src/heap/factory-base.cc", "src/heap/factory-base.h", "src/heap/factory-base-inl.h", @@ -1497,6 +1501,8 @@ filegroup( "src/heap/marking-barrier.cc", "src/heap/marking-barrier.h", "src/heap/marking-barrier-inl.h", + "src/heap/marking-state.h", + "src/heap/marking-state-inl.h", "src/heap/marking-visitor-inl.h", "src/heap/marking-visitor.h", "src/heap/marking-worklist-inl.h", @@ -1529,6 +1535,9 @@ filegroup( "src/heap/paged-spaces.h", "src/heap/parallel-work-item.h", "src/heap/parked-scope.h", + "src/heap/pretenuring-handler-inl.h", + "src/heap/pretenuring-handler.cc", + "src/heap/pretenuring-handler.h", "src/heap/progress-bar.h", "src/heap/read-only-heap-inl.h", "src/heap/read-only-heap.cc", @@ -1753,6 +1762,9 @@ filegroup( "src/objects/js-promise.h", "src/objects/js-proxy-inl.h", "src/objects/js-proxy.h", + "src/objects/js-raw-json-inl.h", + "src/objects/js-raw-json.h", + "src/objects/js-raw-json.cc", "src/objects/js-regexp-inl.h", "src/objects/js-regexp-string-iterator-inl.h", "src/objects/js-regexp-string-iterator.h", @@ -1978,8 +1990,6 @@ filegroup( "src/regexp/experimental/experimental-interpreter.h", "src/regexp/experimental/experimental.cc", "src/regexp/experimental/experimental.h", - "src/regexp/property-sequences.cc", - "src/regexp/property-sequences.h", "src/regexp/regexp-ast.cc", "src/regexp/regexp-ast.h", "src/regexp/regexp-bytecode-generator-inl.h", @@ -2061,6 +2071,8 @@ filegroup( "src/sandbox/sandbox.h", "src/sandbox/sandboxed-pointer-inl.h", "src/sandbox/sandboxed-pointer.h", + "src/sandbox/bounded-size-inl.h", + "src/sandbox/bounded-size.h", "src/base/sanitizer/asan.h", "src/base/sanitizer/lsan-page-allocator.cc", "src/base/sanitizer/lsan-page-allocator.h", @@ -2527,8 +2539,8 @@ filegroup( "src/wasm/names-provider.cc", "src/wasm/names-provider.h", "src/wasm/object-access.h", - "src/wasm/signature-map.cc", - "src/wasm/signature-map.h", + "src/wasm/pgo.cc", + "src/wasm/pgo.h", "src/wasm/simd-shuffle.cc", "src/wasm/simd-shuffle.h", "src/wasm/stacks.cc", @@ -2606,6 +2618,9 @@ filegroup( "src/objects/js-display-names.cc", "src/objects/js-display-names.h", "src/objects/js-display-names-inl.h", + "src/objects/js-duration-format.cc", + "src/objects/js-duration-format.h", + "src/objects/js-duration-format-inl.h", "src/objects/js-list-format.cc", "src/objects/js-list-format.h", "src/objects/js-list-format-inl.h", @@ -2872,13 +2887,20 @@ filegroup( "src/compiler/turboshaft/graph.h", "src/compiler/turboshaft/graph-visualizer.cc", "src/compiler/turboshaft/graph-visualizer.h", + "src/compiler/turboshaft/machine-optimization-assembler.h", "src/compiler/turboshaft/operations.cc", "src/compiler/turboshaft/operations.h", + "src/compiler/turboshaft/operation-matching.h", "src/compiler/turboshaft/optimization-phase.cc", "src/compiler/turboshaft/optimization-phase.h", "src/compiler/turboshaft/recreate-schedule.cc", "src/compiler/turboshaft/recreate-schedule.h", + "src/compiler/turboshaft/representations.cc", + "src/compiler/turboshaft/representations.h", "src/compiler/turboshaft/sidetable.h", + "src/compiler/turboshaft/simplify-tf-loops.cc", + "src/compiler/turboshaft/simplify-tf-loops.h", + "src/compiler/turboshaft/utils.cc", "src/compiler/turboshaft/utils.h", "src/compiler/turboshaft/value-numbering-assembler.h", "src/compiler/type-cache.cc", @@ -2891,6 +2913,7 @@ filegroup( "src/compiler/typer.h", "src/compiler/types.cc", "src/compiler/types.h", + "src/compiler/use-info.h", "src/compiler/value-numbering-reducer.cc", "src/compiler/value-numbering-reducer.h", "src/compiler/verifier.cc", @@ -3101,6 +3124,7 @@ filegroup( "src/heap/cppgc/stats-collector.h", "src/heap/cppgc/sweeper.cc", "src/heap/cppgc/sweeper.h", + "src/heap/cppgc/heap-config.h", "src/heap/cppgc/task-handle.h", "src/heap/cppgc/trace-event.h", "src/heap/cppgc/trace-trait.cc", @@ -3119,6 +3143,7 @@ filegroup( srcs = [ "src/heap/base/active-system-pages.cc", "src/heap/base/active-system-pages.h", + "src/heap/base/basic-slot-set.h", "src/heap/base/stack.cc", "src/heap/base/stack.h", "src/heap/base/worklist.cc", diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 3f78b1773f3907..61187af3053a02 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -285,8 +285,11 @@ declare_args() { target_os == "fuchsia" # Enable control-flow integrity features, such as pointer authentication for - # ARM64. - v8_control_flow_integrity = false + # ARM64. Enable it by default for simulator builds and when native code + # supports it as well. + v8_control_flow_integrity = + v8_current_cpu == "arm64" && + (target_is_simulator || arm_control_flow_integrity != "none") # Enable heap reservation of size 4GB. Only possible for 64bit archs. cppgc_enable_caged_heap = @@ -465,6 +468,8 @@ if (v8_builtins_profiling_log_file == "default") { v8_builtins_profiling_log_file = "tools/builtins-pgo/x64.profile" } else if (v8_current_cpu == "arm64") { v8_builtins_profiling_log_file = "tools/builtins-pgo/arm64.profile" + } else if (v8_current_cpu == "arm") { + v8_builtins_profiling_log_file = "tools/builtins-pgo/arm.profile" } } } @@ -496,14 +501,6 @@ assert(!v8_enable_trace_ignition || v8_enable_trace_unoptimized, assert(!v8_enable_trace_baseline_exec || v8_enable_trace_unoptimized, "Baseline tracing requires unoptimized tracing to be enabled.") -# Check if it is a Chromium build and activate PAC/BTI if needed. -# TODO(cavalcantii): have a single point of integration with PAC/BTI flags. -if (build_with_chromium && v8_current_cpu == "arm64" && - (arm_control_flow_integrity == "standard" || - arm_control_flow_integrity == "pac")) { - v8_control_flow_integrity = true -} - if (v8_enable_short_builtin_calls && (!v8_enable_pointer_compression && v8_current_cpu != "x64")) { # Disable short calls when pointer compression is not enabled, except x64, @@ -521,9 +518,8 @@ if (v8_enable_sandbox == "") { # once that is enabled everywhere by default. # TODO(chromium:1325784) the sandbox is not currently supported in Chromium # on Fuchsia. - v8_enable_sandbox = - build_with_chromium && v8_enable_pointer_compression_shared_cage && - v8_enable_external_code_space && target_os != "fuchsia" + v8_enable_sandbox = v8_enable_pointer_compression_shared_cage && + v8_enable_external_code_space && target_os != "fuchsia" } # Enable all available sandbox features if sandbox future is enabled. @@ -1102,18 +1098,9 @@ config("toolchain") { } if (v8_current_cpu == "arm64") { defines += [ "V8_TARGET_ARCH_ARM64" ] - if (current_cpu == "arm64") { - # This will enable PAC+BTI in code generation and static code. - if (v8_control_flow_integrity && - (!build_with_chromium || arm_control_flow_integrity == "standard")) { - cflags += [ "-mbranch-protection=standard" ] - asmflags = [ "-mmark-bti-property" ] - } else if (build_with_chromium && arm_control_flow_integrity == "pac") { - # This should enable PAC only in C++ code (and no CFI in runtime - # generated code). For details, see crbug.com/919548. - cflags += [ "-mbranch-protection=pac-ret" ] - asmflags = [ "-mbranch-protection=pac-ret" ] - } + if (current_cpu == "arm64" && v8_control_flow_integrity && is_clang) { + # Mark assembly code as BTI-compatible. + asmflags = [ "-mmark-bti-property" ] } } @@ -1619,6 +1606,9 @@ action("postmortem-metadata") { "src/objects/js-objects-inl.h", "src/objects/js-promise.h", "src/objects/js-promise-inl.h", + "src/objects/js-raw-json.cc", + "src/objects/js-raw-json.h", + "src/objects/js-raw-json-inl.h", "src/objects/js-regexp.cc", "src/objects/js-regexp.h", "src/objects/js-regexp-inl.h", @@ -1790,6 +1780,7 @@ torque_files = [ "src/builtins/typed-array-sort.tq", "src/builtins/typed-array-subarray.tq", "src/builtins/typed-array-to-reversed.tq", + "src/builtins/typed-array-to-sorted.tq", "src/builtins/typed-array-values.tq", "src/builtins/typed-array-with.tq", "src/builtins/typed-array.tq", @@ -1824,6 +1815,7 @@ torque_files = [ "src/objects/js-objects.tq", "src/objects/js-promise.tq", "src/objects/js-proxy.tq", + "src/objects/js-raw-json.tq", "src/objects/js-regexp-string-iterator.tq", "src/objects/js-regexp.tq", "src/objects/js-shadow-realm.tq", @@ -1870,6 +1862,7 @@ if (v8_enable_i18n_support) { "src/objects/js-collator.tq", "src/objects/js-date-time-format.tq", "src/objects/js-display-names.tq", + "src/objects/js-duration-format.tq", "src/objects/js-list-format.tq", "src/objects/js-locale.tq", "src/objects/js-number-format.tq", @@ -2930,10 +2923,14 @@ v8_header_set("v8_internal_headers") { "src/compiler/turboshaft/graph-builder.h", "src/compiler/turboshaft/graph-visualizer.h", "src/compiler/turboshaft/graph.h", + "src/compiler/turboshaft/machine-optimization-assembler.h", + "src/compiler/turboshaft/operation-matching.h", "src/compiler/turboshaft/operations.h", "src/compiler/turboshaft/optimization-phase.h", "src/compiler/turboshaft/recreate-schedule.h", + "src/compiler/turboshaft/representations.h", "src/compiler/turboshaft/sidetable.h", + "src/compiler/turboshaft/simplify-tf-loops.h", "src/compiler/turboshaft/utils.h", "src/compiler/turboshaft/value-numbering-assembler.h", "src/compiler/type-cache.h", @@ -2941,6 +2938,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/typed-optimization.h", "src/compiler/typer.h", "src/compiler/types.h", + "src/compiler/use-info.h", "src/compiler/value-numbering-reducer.h", "src/compiler/verifier.h", "src/compiler/write-barrier-kind.h", @@ -2956,7 +2954,6 @@ v8_header_set("v8_internal_headers") { "src/debug/debug-scope-iterator.h", "src/debug/debug-scopes.h", "src/debug/debug-stack-trace-iterator.h", - "src/debug/debug-type-profile.h", "src/debug/debug.h", "src/debug/interface-types.h", "src/debug/liveedit-diff.h", @@ -3053,6 +3050,8 @@ v8_header_set("v8_internal_headers") { "src/heap/embedder-tracing.h", "src/heap/evacuation-allocator-inl.h", "src/heap/evacuation-allocator.h", + "src/heap/evacuation-verifier-inl.h", + "src/heap/evacuation-verifier.h", "src/heap/factory-base-inl.h", "src/heap/factory-base.h", "src/heap/factory-inl.h", @@ -3090,6 +3089,8 @@ v8_header_set("v8_internal_headers") { "src/heap/mark-compact.h", "src/heap/marking-barrier-inl.h", "src/heap/marking-barrier.h", + "src/heap/marking-state-inl.h", + "src/heap/marking-state.h", "src/heap/marking-visitor-inl.h", "src/heap/marking-visitor.h", "src/heap/marking-worklist-inl.h", @@ -3111,6 +3112,8 @@ v8_header_set("v8_internal_headers") { "src/heap/paged-spaces.h", "src/heap/parallel-work-item.h", "src/heap/parked-scope.h", + "src/heap/pretenuring-handler-inl.h", + "src/heap/pretenuring-handler.h", "src/heap/progress-bar.h", "src/heap/read-only-heap-inl.h", "src/heap/read-only-heap.h", @@ -3269,6 +3272,8 @@ v8_header_set("v8_internal_headers") { "src/objects/js-promise.h", "src/objects/js-proxy-inl.h", "src/objects/js-proxy.h", + "src/objects/js-raw-json-inl.h", + "src/objects/js-raw-json.h", "src/objects/js-regexp-inl.h", "src/objects/js-regexp-string-iterator-inl.h", "src/objects/js-regexp-string-iterator.h", @@ -3431,7 +3436,6 @@ v8_header_set("v8_internal_headers") { "src/regexp/experimental/experimental-compiler.h", "src/regexp/experimental/experimental-interpreter.h", "src/regexp/experimental/experimental.h", - "src/regexp/property-sequences.h", "src/regexp/regexp-ast.h", "src/regexp/regexp-bytecode-generator-inl.h", "src/regexp/regexp-bytecode-generator.h", @@ -3455,6 +3459,8 @@ v8_header_set("v8_internal_headers") { "src/roots/roots.h", "src/runtime/runtime-utils.h", "src/runtime/runtime.h", + "src/sandbox/bounded-size-inl.h", + "src/sandbox/bounded-size.h", "src/sandbox/external-pointer-inl.h", "src/sandbox/external-pointer-table-inl.h", "src/sandbox/external-pointer-table.h", @@ -3625,7 +3631,7 @@ v8_header_set("v8_internal_headers") { "src/wasm/module-instantiate.h", "src/wasm/names-provider.h", "src/wasm/object-access.h", - "src/wasm/signature-map.h", + "src/wasm/pgo.h", "src/wasm/simd-shuffle.h", "src/wasm/stacks.h", "src/wasm/streaming-decoder.h", @@ -3682,6 +3688,8 @@ v8_header_set("v8_internal_headers") { "src/objects/js-date-time-format.h", "src/objects/js-display-names-inl.h", "src/objects/js-display-names.h", + "src/objects/js-duration-format-inl.h", + "src/objects/js-duration-format.h", "src/objects/js-list-format-inl.h", "src/objects/js-list-format.h", "src/objects/js-locale-inl.h", @@ -4229,6 +4237,9 @@ v8_source_set("v8_turboshaft") { "src/compiler/turboshaft/operations.cc", "src/compiler/turboshaft/optimization-phase.cc", "src/compiler/turboshaft/recreate-schedule.cc", + "src/compiler/turboshaft/representations.cc", + "src/compiler/turboshaft/simplify-tf-loops.cc", + "src/compiler/turboshaft/utils.cc", ] public_deps = [ @@ -4369,7 +4380,6 @@ v8_source_set("v8_base_without_compiler") { "src/debug/debug-scope-iterator.cc", "src/debug/debug-scopes.cc", "src/debug/debug-stack-trace-iterator.cc", - "src/debug/debug-type-profile.cc", "src/debug/debug.cc", "src/debug/liveedit-diff.cc", "src/debug/liveedit.cc", @@ -4436,6 +4446,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/cppgc-js/unified-heap-marking-verifier.cc", "src/heap/cppgc-js/unified-heap-marking-visitor.cc", "src/heap/embedder-tracing.cc", + "src/heap/evacuation-verifier.cc", "src/heap/factory-base.cc", "src/heap/factory.cc", "src/heap/finalization-registry-cleanup-task.cc", @@ -4469,6 +4480,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/object-stats.cc", "src/heap/objects-visiting.cc", "src/heap/paged-spaces.cc", + "src/heap/pretenuring-handler.cc", "src/heap/read-only-heap.cc", "src/heap/read-only-spaces.cc", "src/heap/safepoint.cc", @@ -4541,12 +4553,14 @@ v8_source_set("v8_base_without_compiler") { "src/objects/js-collator.cc", "src/objects/js-date-time-format.cc", "src/objects/js-display-names.cc", + "src/objects/js-duration-format.cc", "src/objects/js-function.cc", "src/objects/js-list-format.cc", "src/objects/js-locale.cc", "src/objects/js-number-format.cc", "src/objects/js-objects.cc", "src/objects/js-plural-rules.cc", + "src/objects/js-raw-json.cc", "src/objects/js-regexp.cc", "src/objects/js-relative-time-format.cc", "src/objects/js-segment-iterator.cc", @@ -4615,7 +4629,6 @@ v8_source_set("v8_base_without_compiler") { "src/regexp/experimental/experimental-compiler.cc", "src/regexp/experimental/experimental-interpreter.cc", "src/regexp/experimental/experimental.cc", - "src/regexp/property-sequences.cc", "src/regexp/regexp-ast.cc", "src/regexp/regexp-bytecode-generator.cc", "src/regexp/regexp-bytecode-peephole.cc", @@ -4767,7 +4780,7 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/module-decoder.cc", "src/wasm/module-instantiate.cc", "src/wasm/names-provider.cc", - "src/wasm/signature-map.cc", + "src/wasm/pgo.cc", "src/wasm/simd-shuffle.cc", "src/wasm/stacks.cc", "src/wasm/streaming-decoder.cc", @@ -5130,6 +5143,7 @@ v8_source_set("v8_base_without_compiler") { "src/objects/js-collator.cc", "src/objects/js-date-time-format.cc", "src/objects/js-display-names.cc", + "src/objects/js-duration-format.cc", "src/objects/js-list-format.cc", "src/objects/js-locale.cc", "src/objects/js-number-format.cc", @@ -5712,6 +5726,7 @@ v8_source_set("v8_bigint") { v8_header_set("v8_heap_base_headers") { sources = [ "src/heap/base/active-system-pages.h", + "src/heap/base/basic-slot-set.h", "src/heap/base/stack.h", "src/heap/base/worklist.h", ] @@ -5856,6 +5871,7 @@ v8_source_set("cppgc_base") { "src/heap/cppgc/globals.h", "src/heap/cppgc/heap-base.cc", "src/heap/cppgc/heap-base.h", + "src/heap/cppgc/heap-config.h", "src/heap/cppgc/heap-consistency.cc", "src/heap/cppgc/heap-growing.cc", "src/heap/cppgc/heap-growing.h", @@ -6253,7 +6269,7 @@ group("gn_all") { } group("v8_python_base") { - data = [ ".vpython" ] + data = [ ".vpython3" ] } group("v8_clusterfuzz") { diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS index 39f241b3e9a2f1..b7dc8f2147de77 100644 --- a/deps/v8/COMMON_OWNERS +++ b/deps/v8/COMMON_OWNERS @@ -18,6 +18,7 @@ machenbach@chromium.org manoskouk@chromium.org mathias@chromium.org marja@chromium.org +mliedtke@chromium.org mlippautz@chromium.org mslekova@chromium.org nicohartmann@chromium.org diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 457fcc13f7477e..6304c386414450 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -45,15 +45,19 @@ vars = { 'reclient_version': 're_client_version:0.69.0.458df98-gomaip', # GN CIPD package version. - 'gn_version': 'git_revision:b4851eb2062f76a880c07f7fa0d12913beb6d79e', + 'gn_version': 'git_revision:cc28efe62ef0c2fb32455f414a29c4a55bb7fbc4', + + # ninja CIPD package version + # https://chrome-infra-packages.appspot.com/p/infra/3pp/tools/ninja + 'ninja_version': 'version:2@1.8.2.chromium.3', # luci-go CIPD package version. - 'luci_go': 'git_revision:c93fd3c5ebdc3999eea86a7623dbd1ed4b40bc78', + 'luci_go': 'git_revision:20c50aa39686d91330c2daceccaa4ef1a0a72ee4', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling Fuchsia sdk # and whatever else without interference from each other. - 'fuchsia_version': 'version:9.20220913.3.1', + 'fuchsia_version': 'version:9.20220919.2.1', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -93,9 +97,9 @@ deps = { 'base/trace_event/common': Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '521ac34ebd795939c7e16b37d9d3ddb40e8ed556', 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '4157fb6cb44135013300168c9f4c5b95d04acf70', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '7e7c21a9ac34c4fc2b255aa44d639efec9c33b90', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'e713c13e2fa3b7aa9131276f27990011e1aa6a73', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '9174abb6ac087b46f22248dc713b6c0328b8f774', 'buildtools/clang_format/script': Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '8b525d2747f2584fc35d8c7e612e66f377858df7', 'buildtools/linux64': { @@ -119,11 +123,11 @@ deps = { 'condition': 'host_os == "mac"', }, 'buildtools/third_party/libc++/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + 'c1e647c7c30238f7c512457eec55798e3458fd8a', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '2e919977e0030ce61bd19c40cefe31b995f1e2d4', 'buildtools/third_party/libc++abi/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '5c3e02e92ae8bbc1bf1001bd9ef0d76e044ddb86', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'db2a783a7d1ef0f0ef31da4b6e3de0c31fcfd93f', 'buildtools/third_party/libunwind/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '60a480ee1819266cf8054548454f99838583cd76', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '08ebcbe7b672a04e341cb3a88d8bf4276f96ac6e', 'buildtools/win': { 'packages': [ { @@ -149,7 +153,7 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '746197355c1705b7d4463fc75c29433c0ce2fd0d', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '58b7a2358286b918efd38eac4b2facbc8ada1206', 'third_party/android_ndk': { 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '8388a2be5421311dc75c5f937aae13d821a27f3d', 'condition': 'checkout_android', @@ -197,7 +201,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '37391a1619e953e23d3441dbc61e658e881fede4', + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'ff03621a71c01a6f2b0f3bf2677cf815291a9e85', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -205,7 +209,7 @@ deps = { 'condition': 'checkout_android', }, 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '9ebcfa6be17c2d1e7bd72135ceab5e767ed89b7d', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + 'a724859f7a9b3531c0373d86886a42314e772532', 'third_party/fuchsia-sdk/sdk': { 'packages': [ { @@ -239,6 +243,16 @@ deps = { Var('chromium_url') + '/infra/luci/luci-py/client/libs/logdog' + '@' + '0b2078a90f7a638d576b3a7c407d136f2fb62399', 'third_party/markupsafe': Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '1b882ef6372b58bfd55a3285f37ed801be9137cd', + 'third_party/ninja': { + 'packages': [ + { + 'package': 'infra/3pp/tools/ninja/${{platform}}', + 'version': Var('ninja_version'), + } + ], + 'dep_type': 'cipd', + 'condition': 'host_cpu != "s390" and host_cpu != "ppc"' + }, 'third_party/perfetto': Var('android_url') + '/platform/external/perfetto.git' + '@' + '0eba417b2c72264fa825dc21067b9adc9b8adf70', 'third_party/protobuf': @@ -248,9 +262,9 @@ deps = { 'condition': 'checkout_android', }, 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'f48cb14d487038d20c85680e29351e095a0fea8b', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'd689fca54d7b43154f7cf77f785d19f2628fa133', 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '02a202a7b1fa863352c0c9fb088fd3c0cf48c978', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'a5e0d72349d028a4023927d6d166a8478355fac3', 'tools/luci-go': { 'packages': [ { @@ -574,16 +588,6 @@ hooks = [ 'action': ['python3', 'tools/clang/scripts/update.py', '--package=objdump'], }, - # Download and initialize "vpython" VirtualEnv environment packages. - { - 'name': 'vpython_common', - 'pattern': '.', - 'condition': 'checkout_android', - 'action': [ 'vpython', - '-vpython-spec', '.vpython', - '-vpython-tool', 'install', - ], - }, { 'name': 'vpython3_common', 'pattern': '.', diff --git a/deps/v8/ENG_REVIEW_OWNERS b/deps/v8/ENG_REVIEW_OWNERS index 78d53b821d67e5..4f80f9d15a74c0 100644 --- a/deps/v8/ENG_REVIEW_OWNERS +++ b/deps/v8/ENG_REVIEW_OWNERS @@ -5,5 +5,6 @@ adamk@chromium.org danno@chromium.org hpayer@chromium.org +mlippautz@chromium.org verwaest@chromium.org vahl@chromium.org diff --git a/deps/v8/bazel/defs.bzl b/deps/v8/bazel/defs.bzl index e957c0fad3bc4b..d8db3fe8ba9a62 100644 --- a/deps/v8/bazel/defs.bzl +++ b/deps/v8/bazel/defs.bzl @@ -151,6 +151,14 @@ def _default_args(): "-fno-integrated-as", ], "//conditions:default": [], + }) + select({ + "@v8//bazel/config:is_debug":[ + "-fvisibility=default", + ], + "//conditions:default": [ + "-fvisibility=hidden", + "-fvisibility-inlines-hidden", + ], }), includes = ["include"], linkopts = select({ @@ -407,15 +415,19 @@ v8_target_cpu_transition = transition( ) def _mksnapshot(ctx): + prefix = ctx.attr.prefix + suffix = ctx.attr.suffix outs = [ - ctx.actions.declare_file(ctx.attr.prefix + "/snapshot.cc"), - ctx.actions.declare_file(ctx.attr.prefix + "/embedded.S"), + ctx.actions.declare_file(prefix + "/snapshot" + suffix + ".cc"), + ctx.actions.declare_file(prefix + "/embedded" + suffix + ".S"), ] ctx.actions.run( outputs = outs, inputs = [], arguments = [ "--embedded_variant=Default", + "--target_os", + ctx.attr.target_os, "--startup_src", outs[0].path, "--embedded_src", @@ -436,26 +448,38 @@ _v8_mksnapshot = rule( executable = True, cfg = "exec", ), + "target_os": attr.string(mandatory = True), "_allowlist_function_transition": attr.label( default = "@bazel_tools//tools/allowlists/function_transition_allowlist", ), "prefix": attr.string(mandatory = True), + "suffix": attr.string(mandatory = True), }, cfg = v8_target_cpu_transition, ) -def v8_mksnapshot(name, args): +def v8_mksnapshot(name, args, suffix = ""): _v8_mksnapshot( name = "noicu/" + name, args = args, prefix = "noicu", - tool = ":noicu/mksnapshot", + tool = ":noicu/mksnapshot" + suffix, + suffix = suffix, + target_os = select({ + "@v8//bazel/config:is_macos": "mac", + "//conditions:default": "", + }), ) _v8_mksnapshot( name = "icu/" + name, args = args, prefix = "icu", - tool = ":icu/mksnapshot", + tool = ":icu/mksnapshot" + suffix, + suffix = suffix, + target_os = select({ + "@v8//bazel/config:is_macos": "mac", + "//conditions:default": "", + }), ) def _quote(val): diff --git a/deps/v8/include/cppgc/heap-handle.h b/deps/v8/include/cppgc/heap-handle.h index 5a0f9cd2edcb9f..8d825133b065d6 100644 --- a/deps/v8/include/cppgc/heap-handle.h +++ b/deps/v8/include/cppgc/heap-handle.h @@ -12,6 +12,7 @@ namespace cppgc { namespace internal { class HeapBase; class WriteBarrierTypeForCagedHeapPolicy; +class WriteBarrierTypeForNonCagedHeapPolicy; } // namespace internal /** @@ -34,6 +35,7 @@ class HeapHandle { friend class internal::HeapBase; friend class internal::WriteBarrierTypeForCagedHeapPolicy; + friend class internal::WriteBarrierTypeForNonCagedHeapPolicy; }; } // namespace cppgc diff --git a/deps/v8/include/cppgc/internal/member-storage.h b/deps/v8/include/cppgc/internal/member-storage.h index 98389b8cd3d531..0eb6382070c4da 100644 --- a/deps/v8/include/cppgc/internal/member-storage.h +++ b/deps/v8/include/cppgc/internal/member-storage.h @@ -61,7 +61,7 @@ class CageBaseGlobal final { #undef CPPGC_REQUIRE_CONSTANT_INIT #undef CPPGC_CONST -class CompressedPointer final { +class V8_TRIVIAL_ABI CompressedPointer final { public: using IntegralType = uint32_t; @@ -170,7 +170,7 @@ class CompressedPointer final { #endif // defined(CPPGC_POINTER_COMPRESSION) -class RawPointer final { +class V8_TRIVIAL_ABI RawPointer final { public: using IntegralType = uintptr_t; diff --git a/deps/v8/include/cppgc/internal/write-barrier.h b/deps/v8/include/cppgc/internal/write-barrier.h index 2d8e14be086d64..37bc5c973ef995 100644 --- a/deps/v8/include/cppgc/internal/write-barrier.h +++ b/deps/v8/include/cppgc/internal/write-barrier.h @@ -12,6 +12,7 @@ #include "cppgc/heap-state.h" #include "cppgc/internal/api-constants.h" #include "cppgc/internal/atomic-entry-flag.h" +#include "cppgc/internal/base-page-handle.h" #include "cppgc/internal/member-storage.h" #include "cppgc/platform.h" #include "cppgc/sentinel-pointer.h" @@ -283,7 +284,7 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch< return SetAndReturnType(params); } #else // !defined(CPPGC_YOUNG_GENERATION) - if (V8_UNLIKELY(!subtle::HeapState::IsMarking(handle))) { + if (V8_UNLIKELY(!handle.is_incremental_marking_in_progress())) { return SetAndReturnType(params); } #endif // !defined(CPPGC_YOUNG_GENERATION) @@ -326,11 +327,6 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final { template struct ValueModeDispatch; - // TODO(chromium:1056170): Create fast path on API. - static bool IsMarking(const void*, HeapHandle**); - // TODO(chromium:1056170): Create fast path on API. - static bool IsMarking(HeapHandle&); - WriteBarrierTypeForNonCagedHeapPolicy() = delete; }; @@ -348,7 +344,13 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch< if (V8_LIKELY(!WriteBarrier::IsEnabled())) { return SetAndReturnType(params); } - if (IsMarking(object, ¶ms.heap)) { + // We know that |object| is within the normal page or in the beginning of a + // large page, so extract the page header by bitmasking. + BasePageHandle* page = + BasePageHandle::FromPayload(const_cast(object)); + + HeapHandle& heap_handle = page->heap_handle(); + if (V8_LIKELY(heap_handle.is_incremental_marking_in_progress())) { return SetAndReturnType(params); } return SetAndReturnType(params); @@ -364,7 +366,7 @@ struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch< HeapHandleCallback callback) { if (V8_UNLIKELY(WriteBarrier::IsEnabled())) { HeapHandle& handle = callback(); - if (IsMarking(handle)) { + if (V8_LIKELY(handle.is_incremental_marking_in_progress())) { params.heap = &handle; return SetAndReturnType(params); } diff --git a/deps/v8/include/cppgc/member.h b/deps/v8/include/cppgc/member.h index 71f9cab65280da..9bc383634f18ea 100644 --- a/deps/v8/include/cppgc/member.h +++ b/deps/v8/include/cppgc/member.h @@ -28,7 +28,7 @@ namespace internal { // MemberBase always refers to the object as const object and defers to // BasicMember on casting to the right type as needed. -class MemberBase { +class V8_TRIVIAL_ABI MemberBase { public: #if defined(CPPGC_POINTER_COMPRESSION) using RawStorage = CompressedPointer; @@ -68,13 +68,16 @@ class MemberBase { V8_INLINE void ClearFromGC() const { raw_.Clear(); } private: + friend class MemberDebugHelper; + mutable RawStorage raw_; }; // The basic class from which all Member classes are 'generated'. template -class BasicMember final : private MemberBase, private CheckingPolicy { +class V8_TRIVIAL_ABI BasicMember final : private MemberBase, + private CheckingPolicy { public: using PointeeType = T; diff --git a/deps/v8/include/js_protocol-1.3.json b/deps/v8/include/js_protocol-1.3.json index ea573d11a61b03..a998d4611d16d3 100644 --- a/deps/v8/include/js_protocol-1.3.json +++ b/deps/v8/include/js_protocol-1.3.json @@ -946,34 +946,6 @@ { "name": "url", "type": "string", "description": "JavaScript script name or url." }, { "name": "functions", "type": "array", "items": { "$ref": "FunctionCoverage" }, "description": "Functions contained in the script that has coverage data." } ] - }, - { "id": "TypeObject", - "type": "object", - "description": "Describes a type collected during runtime.", - "properties": [ - { "name": "name", "type": "string", "description": "Name of a type collected with type profiling." } - ], - "experimental": true - }, - { "id": "TypeProfileEntry", - "type": "object", - "description": "Source offset and types for a parameter or return value.", - "properties": [ - { "name": "offset", "type": "integer", "description": "Source offset of the parameter or end of function for return values." }, - { "name": "types", "type": "array", "items": {"$ref": "TypeObject"}, "description": "The types for this parameter or return value."} - ], - "experimental": true - }, - { - "id": "ScriptTypeProfile", - "type": "object", - "description": "Type profile data collected during runtime for a JavaScript script.", - "properties": [ - { "name": "scriptId", "$ref": "Runtime.ScriptId", "description": "JavaScript script id." }, - { "name": "url", "type": "string", "description": "JavaScript script name or url." }, - { "name": "entries", "type": "array", "items": { "$ref": "TypeProfileEntry" }, "description": "Type profile entries for parameters and return values of the functions in the script." } - ], - "experimental": true } ], "commands": [ @@ -1024,24 +996,6 @@ { "name": "result", "type": "array", "items": { "$ref": "ScriptCoverage" }, "description": "Coverage data for the current isolate." } ], "description": "Collect coverage data for the current isolate. The coverage data may be incomplete due to garbage collection." - }, - { - "name": "startTypeProfile", - "description": "Enable type profile.", - "experimental": true - }, - { - "name": "stopTypeProfile", - "description": "Disable type profile. Disabling releases type profile data collected so far.", - "experimental": true - }, - { - "name": "takeTypeProfile", - "returns": [ - { "name": "result", "type": "array", "items": { "$ref": "ScriptTypeProfile" }, "description": "Type profile for all scripts since startTypeProfile() was turned on." } - ], - "description": "Collect type profile.", - "experimental": true } ], "events": [ diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl index 2d560435522769..b3b97fa11768e6 100644 --- a/deps/v8/include/js_protocol.pdl +++ b/deps/v8/include/js_protocol.pdl @@ -918,30 +918,6 @@ domain Profiler # Functions contained in the script that has coverage data. array of FunctionCoverage functions - # Describes a type collected during runtime. - experimental type TypeObject extends object - properties - # Name of a type collected with type profiling. - string name - - # Source offset and types for a parameter or return value. - experimental type TypeProfileEntry extends object - properties - # Source offset of the parameter or end of function for return values. - integer offset - # The types for this parameter or return value. - array of TypeObject types - - # Type profile data collected during runtime for a JavaScript script. - experimental type ScriptTypeProfile extends object - properties - # JavaScript script id. - Runtime.ScriptId scriptId - # JavaScript script name or url. - string url - # Type profile entries for parameters and return values of the functions in the script. - array of TypeProfileEntry entries - command disable command enable @@ -976,9 +952,6 @@ domain Profiler # Monotonically increasing time (in seconds) when the coverage update was taken in the backend. number timestamp - # Enable type profile. - experimental command startTypeProfile - command stop returns # Recorded profile. @@ -988,9 +961,6 @@ domain Profiler # executing optimized code. command stopPreciseCoverage - # Disable type profile. Disabling releases type profile data collected so far. - experimental command stopTypeProfile - # Collect coverage data for the current isolate, and resets execution counters. Precise code # coverage needs to have started. command takePreciseCoverage @@ -1000,12 +970,6 @@ domain Profiler # Monotonically increasing time (in seconds) when the coverage update was taken in the backend. number timestamp - # Collect type profile. - experimental command takeTypeProfile - returns - # Type profile for all scripts since startTypeProfile() was turned on. - array of ScriptTypeProfile result - event consoleProfileFinished parameters string id diff --git a/deps/v8/include/v8-context.h b/deps/v8/include/v8-context.h index be52c414b4e028..427f3a738607bb 100644 --- a/deps/v8/include/v8-context.h +++ b/deps/v8/include/v8-context.h @@ -290,6 +290,7 @@ class V8_EXPORT Context : public Data { Local after_hook, Local resolve_hook); + bool HasTemplateLiteralObject(Local object); /** * Stack-allocated class which sets the execution context for all * operations executed within a local scope. diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index c97942ed1b476a..a52d066c835e2d 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -182,7 +182,7 @@ constexpr size_t kSandboxSizeLog2 = 37; // 128 GB #else // Everywhere else use a 1TB sandbox. constexpr size_t kSandboxSizeLog2 = 40; // 1 TB -#endif // V8_OS_ANDROID +#endif // V8_TARGET_OS_ANDROID constexpr size_t kSandboxSize = 1ULL << kSandboxSizeLog2; // Required alignment of the sandbox. For simplicity, we require the @@ -223,6 +223,21 @@ static_assert(kSandboxMinimumReservationSize > kPtrComprCageReservationSize, "The minimum reservation size for a sandbox must be larger than " "the pointer compression cage contained within it."); +// The maximum buffer size allowed inside the sandbox. This is mostly dependent +// on the size of the guard regions around the sandbox: an attacker must not be +// able to construct a buffer that appears larger than the guard regions and +// thereby "reach out of" the sandbox. +constexpr size_t kMaxSafeBufferSizeForSandbox = 32ULL * GB - 1; +static_assert(kMaxSafeBufferSizeForSandbox <= kSandboxGuardRegionSize, + "The maximum allowed buffer size must not be larger than the " + "sandbox's guard regions"); + +constexpr size_t kBoundedSizeShift = 29; +static_assert(1ULL << (64 - kBoundedSizeShift) == + kMaxSafeBufferSizeForSandbox + 1, + "The maximum size of a BoundedSize must be synchronized with the " + "kMaxSafeBufferSizeForSandbox"); + #endif // V8_ENABLE_SANDBOX #ifdef V8_COMPRESS_POINTERS diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h index 4f31d8c7a80835..2f8acc88682e38 100644 --- a/deps/v8/include/v8-isolate.h +++ b/deps/v8/include/v8-isolate.h @@ -536,6 +536,7 @@ class V8_EXPORT Isolate { kFunctionPrototypeCaller = 114, kTurboFanOsrCompileStarted = 115, kAsyncStackTaggingCreateTaskCall = 116, + kDurationFormat = 117, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 4061987dc1161f..027a13a3b239bc 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 10 -#define V8_MINOR_VERSION 7 -#define V8_BUILD_NUMBER 193 -#define V8_PATCH_LEVEL 22 +#define V8_MINOR_VERSION 8 +#define V8_BUILD_NUMBER 168 +#define V8_PATCH_LEVEL 20 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index 207afac8b0adeb..a959be130d5319 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -579,6 +579,37 @@ path. Add it with -I to the command line #define V8_NO_UNIQUE_ADDRESS /* NOT SUPPORTED */ #endif +// Marks a type as being eligible for the "trivial" ABI despite having a +// non-trivial destructor or copy/move constructor. Such types can be relocated +// after construction by simply copying their memory, which makes them eligible +// to be passed in registers. The canonical example is std::unique_ptr. +// +// Use with caution; this has some subtle effects on constructor/destructor +// ordering and will be very incorrect if the type relies on its address +// remaining constant. When used as a function argument (by value), the value +// may be constructed in the caller's stack frame, passed in a register, and +// then used and destructed in the callee's stack frame. A similar thing can +// occur when values are returned. +// +// TRIVIAL_ABI is not needed for types which have a trivial destructor and +// copy/move constructors, since those are automatically trivial by the ABI +// spec. +// +// It is also not likely to be effective on types too large to be passed in one +// or two registers on typical target ABIs. +// +// See also: +// https://clang.llvm.org/docs/AttributeReference.html#trivial-abi +// https://libcxx.llvm.org/docs/DesignDocs/UniquePtrTrivialAbi.html +#if defined(__clang__) && defined(__has_attribute) +#if __has_attribute(trivial_abi) +#define V8_TRIVIAL_ABI [[clang::trivial_abi]] +#endif // __has_attribute(trivial_abi) +#endif // defined(__clang__) && defined(__has_attribute) +#if !defined(V8_TRIVIAL_ABI) +#define V8_TRIVIAL_ABI +#endif //!defined(V8_TRIVIAL_ABI) + // Helper macro to define no_sanitize attributes only with clang. #if defined(__clang__) && defined(__has_attribute) #if __has_attribute(no_sanitize) diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index 5488996a641ae4..b5d6231600488a 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -116,6 +116,7 @@ 'V8 Linux64 - disable runtime call stats - builder': 'release_x64_disable_runtime_call_stats', 'V8 Linux64 - debug - single generation - builder': 'debug_x64_single_generation', 'V8 Linux64 - no pointer compression - builder': 'release_x64_no_pointer_compression', + 'V8 Linux64 css - debug builder': 'debug_x64_conservative_stack_scanning', 'V8 Linux64 gcc - builder': 'release_x64_gcc', 'V8 Linux64 gcc - debug builder': 'debug_x64_gcc', 'V8 Linux64 gcc light - debug builder': 'debug_x64_gcc', @@ -207,105 +208,98 @@ 'tryserver.v8': { 'v8_android_arm_compile_rel': 'release_android_arm', 'v8_android_arm64_compile_dbg': 'debug_android_arm64', - 'v8_android_arm64_n5x_rel_ng': 'release_android_arm64', + 'v8_android_arm64_n5x_compile_rel': 'release_android_arm64', 'v8_fuchsia_compile_rel': 'release_x64_fuchsia_trybot', - 'v8_fuchsia_rel_ng': 'release_x64_fuchsia_trybot', 'v8_ios_simulator': 'release_x64_ios_simulator', - 'v8_linux_rel_ng': 'release_x86_gcmole_trybot', - 'v8_linux_optional_rel_ng': 'release_x86_trybot', - 'v8_linux_verify_csa_rel_ng': 'release_x86_verify_csa', - 'v8_linux_nodcheck_rel_ng': 'release_x86_minimal_symbols', - 'v8_linux_dbg_ng': 'debug_x86_trybot', + 'v8_linux_compile_rel': 'release_x86_gcmole_trybot', + 'v8_linux_optional_compile_rel': 'release_x86_trybot', + 'v8_linux_verify_csa_compile_rel': 'release_x86_verify_csa', + 'v8_linux_nodcheck_compile_rel': 'release_x86_minimal_symbols', + 'v8_linux_compile_dbg': 'debug_x86_trybot', 'v8_linux_noi18n_compile_dbg': 'debug_x86_no_i18n', - 'v8_linux_noi18n_rel_ng': 'release_x86_no_i18n_trybot', - 'v8_linux_gc_stress_dbg_ng': 'debug_x86_trybot', + 'v8_linux_noi18n_compile_rel': 'release_x86_no_i18n_trybot', + 'v8_linux_gc_stress_compile_dbg': 'debug_x86_trybot', 'v8_linux_shared_compile_rel': 'release_x86_shared_verify_heap', 'v8_linux_vtunejit': 'debug_x86_vtunejit', - 'v8_linux64_arm64_no_pointer_compression_rel_ng': + 'v8_linux64_arm64_no_pointer_compression_compile_rel': 'release_simulate_arm64_no_pointer_compression', - 'v8_linux64_cppgc_non_default_dbg_ng': 'debug_x64_non_default_cppgc', - 'v8_linux64_dbg_ng': 'debug_x64_trybot', - 'v8_linux64_no_sandbox_dbg_ng': 'debug_x64_no_sandbox', - 'v8_linux64_dict_tracking_dbg_ng': 'debug_x64_dict_tracking_trybot', - 'v8_linux64_disable_runtime_call_stats_rel_ng': 'release_x64_disable_runtime_call_stats', - 'v8_linux64_external_code_space_dbg_ng': 'debug_x64_external_code_space', - 'v8_linux64_gc_stress_custom_snapshot_dbg_ng': 'debug_x64_trybot_custom', - 'v8_linux64_gc_stress_dbg_ng': 'debug_x64_trybot', + 'v8_linux64_cppgc_non_default_compile_dbg': 'debug_x64_non_default_cppgc', + 'v8_linux64_compile_dbg': 'debug_x64_trybot', + 'v8_linux64_no_sandbox_compile_dbg': 'debug_x64_no_sandbox', + 'v8_linux64_dict_tracking_compile_dbg': 'debug_x64_dict_tracking_trybot', + 'v8_linux64_disable_runtime_call_stats_compile_rel': 'release_x64_disable_runtime_call_stats', + 'v8_linux64_external_code_space_compile_dbg': 'debug_x64_external_code_space', + 'v8_linux64_css_compile_dbg': 'debug_x64_conservative_stack_scanning', + 'v8_linux64_gc_stress_custom_snapshot_compile_dbg': 'debug_x64_trybot_custom', + 'v8_linux64_gc_stress_compile_dbg': 'debug_x64_trybot', 'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc', - 'v8_linux64_gcc_compile_rel': 'release_x64_gcc', 'v8_linux64_gcc_light_compile_dbg': 'debug_x64_gcc', - 'v8_linux64_gcc_rel_ng': 'release_x64_gcc', + 'v8_linux64_gcc_compile_rel': 'release_x64_gcc', 'v8_linux64_gcov_coverage': 'release_x64_gcc_coverage', 'v8_linux64_header_includes_dbg': 'debug_x64_header_includes', - 'v8_linux64_heap_sandbox_dbg_ng': 'debug_x64_heap_sandbox', - 'v8_linux64_minor_mc_dbg_ng': 'debug_x64_trybot', - 'v8_linux_arm64_sim_heap_sandbox_dbg_ng': 'debug_x64_heap_sandbox_arm64_sim', - 'v8_linux64_fyi_rel_ng': 'release_x64_test_features_trybot', - 'v8_linux64_nodcheck_rel_ng': 'release_x64', - 'v8_linux64_perfetto_dbg_ng': 'debug_x64_perfetto', - 'v8_linux64_no_pointer_compression_rel_ng': 'release_x64_no_pointer_compression', - 'v8_linux64_rel_ng': 'release_x64_test_features_trybot', - 'v8_linux64_no_sandbox_rel_ng': 'release_x64_no_sandbox', - 'v8_linux64_predictable_rel_ng': 'release_x64_predictable', + 'v8_linux64_heap_sandbox_compile_dbg': 'debug_x64_heap_sandbox', + 'v8_linux64_minor_mc_compile_dbg': 'debug_x64_trybot', + 'v8_linux_arm64_sim_heap_sandbox_compile_dbg': 'debug_x64_heap_sandbox_arm64_sim', + 'v8_linux64_fyi_compile_rel': 'release_x64_test_features_trybot', + 'v8_linux64_nodcheck_compile_rel': 'release_x64', + 'v8_linux64_perfetto_compile_dbg': 'debug_x64_perfetto', + 'v8_linux64_no_pointer_compression_compile_rel': 'release_x64_no_pointer_compression', + 'v8_linux64_compile_rel': 'release_x64_test_features_trybot', + 'v8_linux64_no_sandbox_compile_rel': 'release_x64_no_sandbox', + 'v8_linux64_predictable_compile_rel': 'release_x64_predictable', 'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap', - 'v8_linux64_single_generation_dbg_ng': 'debug_x64_single_generation', + 'v8_linux64_single_generation_compile_dbg': 'debug_x64_single_generation', 'v8_linux64_no_wasm_compile_rel': 'release_x64_webassembly_disabled', - 'v8_linux64_verify_csa_rel_ng': 'release_x64_verify_csa', - 'v8_linux64_asan_rel_ng': 'release_x64_asan_minimal_symbols', - 'v8_linux64_cfi_rel_ng': 'release_x64_cfi', - 'v8_linux64_fuzzilli_ng': 'release_x64_fuzzilli', - 'v8_linux64_loong64_rel_ng': 'release_simulate_loong64', - 'v8_linux64_msan_rel_ng': 'release_simulate_arm64_msan_minimal_symbols', - 'v8_linux_riscv32_rel_ng': 'release_simulate_riscv32', - 'v8_linux64_riscv64_rel_ng': 'release_simulate_riscv64', - 'v8_linux64_tsan_rel_ng': 'release_x64_tsan_minimal_symbols', - 'v8_linux64_tsan_no_cm_rel_ng': 'release_x64_tsan_no_cm', - 'v8_linux64_tsan_isolates_rel_ng': + 'v8_linux64_verify_csa_compile_rel': 'release_x64_verify_csa', + 'v8_linux64_asan_compile_rel': 'release_x64_asan_minimal_symbols', + 'v8_linux64_cfi_compile_rel': 'release_x64_cfi', + 'v8_linux64_fuzzilli_compile_rel': 'release_x64_fuzzilli', + 'v8_linux64_loong64_compile_rel': 'release_simulate_loong64', + 'v8_linux64_msan_compile_rel': 'release_simulate_arm64_msan_minimal_symbols', + 'v8_linux_riscv32_compile_rel': 'release_simulate_riscv32', + 'v8_linux64_riscv64_compile_rel': 'release_simulate_riscv64', + 'v8_linux64_tsan_compile_rel': 'release_x64_tsan_minimal_symbols', + 'v8_linux64_tsan_no_cm_compile_rel': 'release_x64_tsan_no_cm', + 'v8_linux64_tsan_isolates_compile_rel': 'release_x64_tsan_minimal_symbols', - 'v8_linux64_ubsan_rel_ng': 'release_x64_ubsan_minimal_symbols', - 'v8_odroid_arm_rel_ng': 'release_arm', + 'v8_linux64_ubsan_compile_rel': 'release_x64_ubsan_minimal_symbols', + 'v8_odroid_arm_compile_rel': 'release_arm', 'v8_linux_torque_compare': 'torque_compare', # TODO(machenbach): Remove after switching to x64 on infra side. - 'v8_win_dbg_ng': 'debug_x86_trybot', 'v8_win_compile_dbg': 'debug_x86_trybot', - 'v8_win_rel_ng': 'release_x86_trybot', - 'v8_win64_asan_rel_ng': 'release_x64_asan_no_lsan', + 'v8_win_compile_rel': 'release_x86_trybot', + 'v8_win64_asan_compile_rel': 'release_x64_asan_no_lsan', + 'v8_win64_msvc_light_compile_rel': 'release_x64_msvc', + 'v8_win64_compile_dbg': 'debug_x64_minimal_symbols', 'v8_win64_msvc_compile_rel': 'release_x64_msvc', - 'v8_win64_dbg_ng': 'debug_x64_minimal_symbols', - 'v8_win64_msvc_rel_ng': 'release_x64_msvc', - 'v8_win64_rel_ng': 'release_x64_trybot', - 'v8_mac_arm64_rel_ng': 'release_arm64', - 'v8_mac_arm64_dbg_ng': 'debug_arm64', - 'v8_mac_arm64_full_dbg_ng': 'full_debug_arm64', - 'v8_mac_arm64_no_pointer_compression_dbg_ng': 'debug_arm64_no_pointer_compression', - 'v8_mac_arm64_compile_dbg': 'debug_arm64', + 'v8_win64_compile_rel': 'release_x64_trybot', 'v8_mac_arm64_compile_rel': 'release_arm64', + 'v8_mac_arm64_compile_dbg': 'debug_arm64', + 'v8_mac_arm64_full_compile_dbg': 'full_debug_arm64', + 'v8_mac_arm64_no_pointer_compression_compile_dbg': 'debug_arm64_no_pointer_compression', + 'v8_mac_arm64_sim_compile_rel': 'release_simulate_arm64_trybot', 'v8_mac_arm64_sim_compile_dbg': 'debug_simulate_arm64', - 'v8_mac_arm64_sim_compile_rel': 'release_simulate_arm64', - 'v8_mac_arm64_sim_rel_ng': 'release_simulate_arm64_trybot', - 'v8_mac_arm64_sim_dbg_ng': 'debug_simulate_arm64', - 'v8_mac_arm64_sim_nodcheck_rel_ng': 'release_simulate_arm64', - 'v8_mac64_gc_stress_dbg_ng': 'debug_x64_trybot', - 'v8_mac64_rel_ng': 'release_x64_trybot', + 'v8_mac_arm64_sim_nodcheck_compile_rel': 'release_simulate_arm64', + 'v8_mac64_gc_stress_compile_dbg': 'debug_x64_trybot', + 'v8_mac64_compile_rel': 'release_x64_trybot', 'v8_mac64_dbg': 'debug_x64', - 'v8_mac64_dbg_ng': 'debug_x64', - 'v8_mac64_compile_full_dbg_ng': 'full_debug_x64', + 'v8_mac64_compile_dbg': 'debug_x64', + 'v8_mac64_compile_full_compile_dbg': 'full_debug_x64', 'v8_mac64_asan_compile_rel': 'release_x64_asan_no_lsan', - 'v8_mac64_asan_rel_ng': 'release_x64_asan_no_lsan', - 'v8_linux_arm_rel_ng': 'release_simulate_arm_trybot', + 'v8_linux_arm_compile_rel': 'release_simulate_arm_trybot', 'v8_linux_arm_lite_compile_dbg': 'debug_simulate_arm_lite', - 'v8_linux_arm_lite_rel_ng': 'release_simulate_arm_lite_trybot', - 'v8_linux_arm_dbg_ng': 'debug_simulate_arm', + 'v8_linux_arm_lite_compile_rel': 'release_simulate_arm_lite_trybot', + 'v8_linux_arm_compile_dbg': 'debug_simulate_arm', 'v8_linux_arm_armv8a_rel': 'release_simulate_arm_trybot', 'v8_linux_arm_armv8a_dbg': 'debug_simulate_arm', - 'v8_linux_arm64_rel_ng': 'release_simulate_arm64_trybot', - 'v8_linux_arm64_cfi_rel_ng' : 'release_simulate_arm64_cfi', - 'v8_linux_arm64_dbg_ng': 'debug_simulate_arm64', - 'v8_linux_arm64_gc_stress_dbg_ng': 'debug_simulate_arm64', + 'v8_linux_arm64_compile_rel': 'release_simulate_arm64_trybot', + 'v8_linux_arm64_cfi_compile_rel' : 'release_simulate_arm64_cfi', + 'v8_linux_arm64_compile_dbg': 'debug_simulate_arm64', + 'v8_linux_arm64_gc_stress_compile_dbg': 'debug_simulate_arm64', 'v8_linux_mips64el_compile_rel': 'release_simulate_mips64el', - 'v8_numfuzz_ng': 'release_x64', - 'v8_numfuzz_dbg_ng': 'debug_x64', - 'v8_numfuzz_tsan_ng': 'release_x64_tsan', + 'v8_numfuzz_compile_rel': 'release_x64', + 'v8_numfuzz_compile_dbg': 'debug_x64', + 'v8_numfuzz_tsan_compile_rel': 'release_x64_tsan', }, }, @@ -577,6 +571,8 @@ 'debug_x64_asan_no_lsan_static': [ 'debug', 'static', 'goma', 'v8_enable_slow_dchecks', 'v8_optimized_debug', 'x64', 'asan'], + 'debug_x64_conservative_stack_scanning': [ + 'debug_bot', 'x64', 'conservative_stack_scanning'], 'debug_x64_custom': [ 'debug_bot', 'x64', 'v8_snapshot_custom'], 'debug_x64_external_code_space': [ @@ -703,6 +699,11 @@ 'gn_args': 'is_clang=true', }, + 'conservative_stack_scanning': { + 'gn_args': 'v8_enable_conservative_stack_scanning=true ' + 'v8_enable_inner_pointer_resolution_mb=true', + }, + 'coverage': { 'gn_args': 'v8_code_coverage=true', }, diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index ca2fab5eac8e90..516905539cf72c 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -32,7 +32,7 @@ ### luci.v8.try ############################################################################## # Android - 'v8_android_arm64_n5x_rel_ng_triggered': { + 'v8_android_arm64_n5x_rel': { 'swarming_dimensions' : { 'device_os': 'MMB29Q', 'device_type': 'bullhead', @@ -46,7 +46,7 @@ }, ############################################################################## # Fuchsia - 'v8_fuchsia_rel_ng_triggered': { + 'v8_fuchsia_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -64,7 +64,7 @@ }, ############################################################################## # Linux32 - 'v8_linux_dbg_ng_triggered': { + 'v8_linux_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -109,7 +109,7 @@ }, ], }, - 'v8_linux_gc_stress_dbg_ng_triggered': { + 'v8_linux_gc_stress_dbg': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -118,7 +118,7 @@ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 5}, ], }, - 'v8_linux_nodcheck_rel_ng_triggered': { + 'v8_linux_nodcheck_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -143,7 +143,7 @@ }, ], }, - 'v8_linux_noi18n_rel_ng_triggered': { + 'v8_linux_noi18n_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -153,7 +153,7 @@ {'name': 'v8testing', 'variant': 'default', 'shards': 2}, ], }, - 'v8_linux_rel_ng_triggered': { + 'v8_linux_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -182,7 +182,7 @@ }, ], }, - 'v8_linux_optional_rel_ng_triggered': { + 'v8_linux_optional_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -285,7 +285,7 @@ }, ], }, - 'v8_linux_verify_csa_rel_ng_triggered': { + 'v8_linux_verify_csa_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -295,7 +295,7 @@ }, ############################################################################## # Linux32 with arm simulators - 'v8_linux_arm_dbg_ng_triggered': { + 'v8_linux_arm_dbg': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -307,7 +307,7 @@ {'name': 'v8testing', 'variant': 'extra', 'shards': 10}, ], }, - 'v8_linux_arm_lite_rel_ng_triggered': { + 'v8_linux_arm_lite_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -315,7 +315,7 @@ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, ], }, - 'v8_linux_arm_rel_ng_triggered': { + 'v8_linux_arm_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -329,7 +329,7 @@ }, ############################################################################## # Linux64 - 'v8_linux64_asan_rel_ng_triggered': { + 'v8_linux64_asan_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -340,7 +340,7 @@ {'name': 'v8testing', 'variant': 'slow_path'}, ], }, - 'v8_linux64_cfi_rel_ng_triggered': { + 'v8_linux64_cfi_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -352,7 +352,7 @@ {'name': 'v8testing', 'shards': 3}, ], }, - 'v8_linux64_cppgc_non_default_dbg_ng_triggered': { + 'v8_linux64_cppgc_non_default_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -361,7 +361,7 @@ {'name': 'v8testing', 'shards': 3}, ], }, - 'v8_linux64_dbg_ng_triggered': { + 'v8_linux64_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -385,7 +385,7 @@ {'name': 'mjsunit', 'variant': 'maglev'}, ], }, - 'v8_linux64_dict_tracking_dbg_ng_triggered': { + 'v8_linux64_dict_tracking_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -394,7 +394,7 @@ {'name': 'v8testing', 'shards': 3}, ], }, - 'v8_linux64_disable_runtime_call_stats_rel_ng_triggered': { + 'v8_linux64_disable_runtime_call_stats_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -402,7 +402,7 @@ {'name': 'v8testing'}, ], }, - 'v8_linux64_external_code_space_dbg_ng_triggered': { + 'v8_linux64_external_code_space_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -411,14 +411,14 @@ {'name': 'v8testing', 'shards': 3}, ], }, - 'v8_linux64_fuzzilli_ng_triggered': { + 'v8_linux64_fuzzilli_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, # TODO(almuthanna): Add a new test config for the fuzzilli suite. 'tests': [], }, - 'v8_linux64_fyi_rel_ng_triggered': { + 'v8_linux64_fyi_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -440,7 +440,7 @@ {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'}, ], }, - 'v8_linux64_gc_stress_custom_snapshot_dbg_ng_triggered': { + 'v8_linux64_gc_stress_custom_snapshot_dbg': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -452,7 +452,7 @@ }, ], }, - 'v8_linux64_gc_stress_dbg_ng_triggered': { + 'v8_linux64_gc_stress_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -473,7 +473,7 @@ }, ], }, - 'v8_linux64_gcc_rel_ng_triggered': { + 'v8_linux64_gcc_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-20.04', }, @@ -489,7 +489,7 @@ {'name': 'v8testing'}, ], }, - 'v8_linux64_heap_sandbox_dbg_ng_triggered': { + 'v8_linux64_heap_sandbox_dbg': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -497,7 +497,7 @@ {'name': 'v8testing', 'shards': 4}, ], }, - 'v8_linux64_minor_mc_dbg_ng_triggered': { + 'v8_linux64_minor_mc_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -510,7 +510,7 @@ {'name': 'mjsunit', 'variant': 'minor_mc'}, ], }, - 'v8_linux64_msan_rel_ng_triggered': { + 'v8_linux64_msan_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -519,7 +519,7 @@ {'name': 'v8testing', 'shards': 5}, ], }, - 'v8_linux64_nodcheck_rel_ng_triggered': { + 'v8_linux64_nodcheck_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -545,7 +545,7 @@ {'name': 'mjsunit', 'variant': 'maglev'}, ], }, - 'v8_linux64_perfetto_dbg_ng_triggered': { + 'v8_linux64_perfetto_dbg': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -553,7 +553,7 @@ {'name': 'v8testing', 'shards': 3}, ], }, - 'v8_linux64_no_pointer_compression_rel_ng_triggered': { + 'v8_linux64_no_pointer_compression_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -561,7 +561,7 @@ {'name': 'v8testing', 'shards': 3}, ], }, - 'v8_linux64_no_sandbox_dbg_ng_triggered': { + 'v8_linux64_no_sandbox_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -570,7 +570,7 @@ {'name': 'v8testing', 'shards': 5}, ], }, - 'v8_linux64_single_generation_dbg_ng_triggered': { + 'v8_linux64_single_generation_dbg': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -578,7 +578,7 @@ {'name': 'v8testing', 'shards': 3}, ], }, - 'v8_linux64_rel_ng_triggered': { + 'v8_linux64_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -601,7 +601,7 @@ {'name': 'mjsunit', 'variant': 'maglev'}, ], }, - 'v8_linux64_predictable_rel_ng_triggered': { + 'v8_linux64_predictable_rel': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -612,7 +612,7 @@ {'name': 'mozilla'}, ], }, - 'v8_linux64_no_sandbox_rel_ng_triggered': { + 'v8_linux64_no_sandbox_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', 'os': 'Ubuntu-18.04', @@ -621,7 +621,7 @@ {'name': 'v8testing', 'shards': 2}, ], }, - 'v8_linux64_tsan_rel_ng_triggered': { + 'v8_linux64_tsan_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -635,7 +635,7 @@ {'name': 'v8testing', 'variant': 'stress_concurrent_allocation', 'shards': 2}, ], }, - 'v8_linux64_tsan_no_cm_rel_ng_triggered': { + 'v8_linux64_tsan_no_cm_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -653,7 +653,7 @@ {'name': 'v8testing', 'variant': 'stress_concurrent_inlining', 'shards': 2}, ], }, - 'v8_linux64_tsan_isolates_rel_ng_triggered': { + 'v8_linux64_tsan_isolates_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -661,7 +661,7 @@ {'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7}, ], }, - 'v8_linux64_ubsan_rel_ng_triggered': { + 'v8_linux64_ubsan_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -669,7 +669,7 @@ {'name': 'v8testing', 'shards': 2}, ], }, - 'v8_linux64_verify_csa_rel_ng_triggered': { + 'v8_linux64_verify_csa_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -679,7 +679,7 @@ }, ############################################################################## # Linux64 with arm64 simulators - 'v8_linux_arm64_dbg_ng_triggered': { + 'v8_linux_arm64_dbg': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -691,7 +691,7 @@ {'name': 'v8testing', 'variant': 'extra', 'shards': 14}, ], }, - 'v8_linux_arm64_gc_stress_dbg_ng_triggered': { + 'v8_linux_arm64_gc_stress_dbg': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -699,7 +699,7 @@ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 12}, ], }, - 'v8_linux_arm64_sim_heap_sandbox_dbg_ng_triggered': { + 'v8_linux_arm64_sim_heap_sandbox_dbg': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -707,7 +707,7 @@ {'name': 'v8testing', 'shards': 14}, ], }, - 'v8_linux_arm64_rel_ng_triggered': { + 'v8_linux_arm64_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -719,7 +719,7 @@ {'name': 'v8testing', 'variant': 'extra', 'shards': 14}, ], }, - 'v8_linux_arm64_cfi_rel_ng_triggered': { + 'v8_linux_arm64_cfi_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -728,7 +728,7 @@ {'name': 'v8testing', 'shards': 4}, ], }, - 'v8_linux64_arm64_no_pointer_compression_rel_ng_triggered': { + 'v8_linux64_arm64_no_pointer_compression_rel': { 'swarming_dimensions' : { 'os': 'Ubuntu-18.04', }, @@ -738,7 +738,7 @@ }, ############################################################################## # Linux64 with Loongson simulators - 'v8_linux64_loong64_rel_ng_triggered': { + 'v8_linux64_loong64_rel': { 'swarming_dimensions': { 'os': 'Ubuntu-18.04', }, @@ -748,7 +748,7 @@ }, ############################################################################## # Linux with RISC-V simulators - 'v8_linux_riscv32_rel_ng_triggered': { + 'v8_linux_riscv32_rel': { 'swarming_dimensions': { 'os': 'Ubuntu-18.04', }, @@ -756,7 +756,7 @@ {'name': 'v8testing', 'shards': 3}, ], }, - 'v8_linux64_riscv64_rel_ng_triggered': { + 'v8_linux64_riscv64_rel': { 'swarming_dimensions': { 'os': 'Ubuntu-18.04', }, @@ -766,7 +766,7 @@ }, ############################################################################## # Odroids with native arm - 'v8_odroid_arm_rel_ng_triggered': { + 'v8_odroid_arm_rel': { 'swarming_dimensions' : { 'cores': '8', 'cpu': 'armv7l-32-ODROID-XU4', @@ -784,7 +784,7 @@ }, ############################################################################## # Win32 - 'v8_win_dbg_ng_triggered': { + 'v8_win_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Windows-7-SP1', @@ -794,7 +794,7 @@ {'name': 'v8testing', 'shards': 3}, ], }, - 'v8_win_rel_ng_triggered': { + 'v8_win_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Windows-7-SP1', @@ -807,7 +807,7 @@ }, ############################################################################## # Win64 - 'v8_win64_asan_rel_ng_triggered': { + 'v8_win64_asan_rel': { 'swarming_dimensions' : { 'os': 'Windows-10-19042', }, @@ -815,7 +815,7 @@ {'name': 'v8testing', 'shards': 5}, ], }, - 'v8_win64_dbg_ng_triggered': { + 'v8_win64_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Windows-10-19042', @@ -827,7 +827,7 @@ {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, ], }, - 'v8_win64_msvc_rel_ng_triggered': { + 'v8_win64_msvc_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Windows-10-19042', @@ -838,7 +838,7 @@ {'name': 'v8testing', 'shards': 2}, ], }, - 'v8_win64_rel_ng_triggered': { + 'v8_win64_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Windows-10-19042', @@ -852,7 +852,7 @@ }, ############################################################################## # Mac64 - 'v8_mac64_asan_rel_ng_triggered': { + 'v8_mac64_asan_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.15', @@ -861,7 +861,7 @@ {'name': 'v8testing', 'shards': 8}, ], }, - 'v8_mac64_dbg_ng_triggered': { + 'v8_mac64_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.15', @@ -873,7 +873,7 @@ {'name': 'v8testing', 'variant': 'extra', 'shards': 6}, ], }, - 'v8_mac64_gc_stress_dbg_ng_triggered': { + 'v8_mac64_gc_stress_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.15', @@ -882,7 +882,7 @@ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 6}, ], }, - 'v8_mac64_rel_ng_triggered': { + 'v8_mac64_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.15', @@ -894,7 +894,7 @@ {'name': 'v8testing', 'variant': 'extra', 'shards': 3}, ], }, - 'v8_mac_arm64_rel_ng_triggered': { + 'v8_mac_arm64_rel': { 'swarming_dimensions' : { 'cpu': 'arm64', 'os': 'Mac-11', @@ -904,7 +904,7 @@ {'name': 'v8testing'}, ], }, - 'v8_mac_arm64_dbg_ng_triggered': { + 'v8_mac_arm64_dbg': { 'swarming_dimensions' : { 'cpu': 'arm64', 'os': 'Mac-11', @@ -914,7 +914,7 @@ {'name': 'v8testing'}, ], }, - 'v8_mac_arm64_full_dbg_ng_triggered': { + 'v8_mac_arm64_full_dbg': { 'swarming_dimensions' : { 'cpu': 'arm64', 'os': 'Mac-11', @@ -924,7 +924,7 @@ {'name': 'v8testing'}, ], }, - 'v8_mac_arm64_no_pointer_compression_dbg_ng_triggered': { + 'v8_mac_arm64_no_pointer_compression_dbg': { 'swarming_dimensions' : { 'cpu': 'arm64', 'os': 'Mac-11', @@ -934,7 +934,7 @@ {'name': 'v8testing'}, ], }, - 'v8_mac_arm64_sim_rel_ng_triggered': { + 'v8_mac_arm64_sim_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.15', @@ -944,7 +944,7 @@ {'name': 'v8testing', 'variant': 'future', 'shards': 2}, ], }, - 'v8_mac_arm64_sim_dbg_ng_triggered': { + 'v8_mac_arm64_sim_dbg': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.15', @@ -954,7 +954,7 @@ {'name': 'v8testing', 'variant': 'future', 'shards': 2}, ], }, - 'v8_mac_arm64_sim_nodcheck_rel_ng_triggered': { + 'v8_mac_arm64_sim_nodcheck_rel': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.15', @@ -2349,7 +2349,7 @@ }, ], }, - 'v8_numfuzz_ng_triggered': { + 'v8_numfuzz_rel': { 'swarming_dimensions': { 'os': 'Ubuntu-18.04', }, @@ -2366,7 +2366,7 @@ }, ], }, - 'v8_numfuzz_tsan_ng_triggered': { + 'v8_numfuzz_tsan_rel': { 'swarming_dimensions': { 'os': 'Ubuntu-18.04', }, @@ -2419,7 +2419,7 @@ }, ], }, - 'v8_numfuzz_dbg_ng_triggered': { + 'v8_numfuzz_dbg': { 'swarming_dimensions': { 'os': 'Ubuntu-18.04', }, diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS index 1edad011be189c..8912d7fb25186c 100644 --- a/deps/v8/src/DEPS +++ b/deps/v8/src/DEPS @@ -32,6 +32,7 @@ include_rules = [ "+src/heap/local-factory.h", "+src/heap/local-heap.h", "+src/heap/local-heap-inl.h", + "+src/heap/pretenuring-handler-inl.h", # TODO(v8:10496): Don't expose memory chunk outside of heap/. "+src/heap/memory-chunk.h", "+src/heap/memory-chunk-inl.h", diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc index 562b7849b4061d..8624c279d66e4f 100644 --- a/deps/v8/src/api/api-natives.cc +++ b/deps/v8/src/api/api-natives.cc @@ -529,7 +529,7 @@ MaybeHandle InstantiateFunction( if (!data->needs_access_check() && data->GetNamedPropertyHandler().IsUndefined(isolate) && data->GetIndexedPropertyHandler().IsUndefined(isolate)) { - function_type = FLAG_embedder_instance_types && data->HasInstanceType() + function_type = v8_flags.embedder_instance_types && data->HasInstanceType() ? static_cast(data->InstanceType()) : JS_API_OBJECT_TYPE; } diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index b54e554217329b..a4a4381614e1fd 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -284,7 +284,7 @@ void i::V8::FatalProcessOutOfMemory(i::Isolate* i_isolate, const char* location, // BUG(1718): Don't use the take_snapshot since we don't support // HeapObjectIterator here without doing a special GC. i_isolate->heap()->RecordStats(&heap_stats, false); - if (!FLAG_correctness_fuzzer_suppressions) { + if (!v8_flags.correctness_fuzzer_suppressions) { char* first_newline = strchr(last_few_messages, '\n'); if (first_newline == nullptr || first_newline[1] == '\0') first_newline = last_few_messages; @@ -795,7 +795,7 @@ i::Address* GlobalizeTracedReference(i::Isolate* i_isolate, i::Address* obj, i::Handle result = i_isolate->global_handles()->CreateTraced(*obj, slot, store_mode); #ifdef VERIFY_HEAP - if (i::FLAG_verify_heap) { + if (i::v8_flags.verify_heap) { i::Object(*obj).ObjectVerify(i_isolate); } #endif // VERIFY_HEAP @@ -823,7 +823,7 @@ i::Address* GlobalizeReference(i::Isolate* i_isolate, i::Address* obj) { API_RCS_SCOPE(i_isolate, Persistent, New); i::Handle result = i_isolate->global_handles()->Create(*obj); #ifdef VERIFY_HEAP - if (i::FLAG_verify_heap) { + if (i::v8_flags.verify_heap) { i::Object(*obj).ObjectVerify(i_isolate); } #endif // VERIFY_HEAP @@ -1676,7 +1676,7 @@ void ObjectTemplate::SetAccessor(v8::Local name, SideEffectType getter_side_effect_type, SideEffectType setter_side_effect_type) { TemplateSetAccessor(this, name, getter, setter, data, settings, attribute, - i::FLAG_disable_old_api_accessors, false, + i::v8_flags.disable_old_api_accessors, false, getter_side_effect_type, setter_side_effect_type); } @@ -1688,7 +1688,7 @@ void ObjectTemplate::SetAccessor(v8::Local name, SideEffectType getter_side_effect_type, SideEffectType setter_side_effect_type) { TemplateSetAccessor(this, name, getter, setter, data, settings, attribute, - i::FLAG_disable_old_api_accessors, false, + i::v8_flags.disable_old_api_accessors, false, getter_side_effect_type, setter_side_effect_type); } @@ -2100,7 +2100,7 @@ MaybeLocal Script::Run(Local context, // // To avoid this, on running scripts check first if JIT code log is // pending and generate immediately. - if (i::FLAG_enable_etw_stack_walking) { + if (i::v8_flags.enable_etw_stack_walking) { i::ETWJITInterface::MaybeSetHandlerNow(i_isolate); } #endif @@ -2109,14 +2109,15 @@ MaybeLocal Script::Run(Local context, // TODO(crbug.com/1193459): remove once ablation study is completed base::ElapsedTimer timer; base::TimeDelta delta; - if (i::FLAG_script_delay > 0) { - delta = v8::base::TimeDelta::FromMillisecondsD(i::FLAG_script_delay); + if (i::v8_flags.script_delay > 0) { + delta = v8::base::TimeDelta::FromMillisecondsD(i::v8_flags.script_delay); } - if (i::FLAG_script_delay_once > 0 && !i_isolate->did_run_script_delay()) { - delta = v8::base::TimeDelta::FromMillisecondsD(i::FLAG_script_delay_once); + if (i::v8_flags.script_delay_once > 0 && !i_isolate->did_run_script_delay()) { + delta = + v8::base::TimeDelta::FromMillisecondsD(i::v8_flags.script_delay_once); i_isolate->set_did_run_script_delay(true); } - if (i::FLAG_script_delay_fraction > 0.0) { + if (i::v8_flags.script_delay_fraction > 0.0) { timer.Start(); } else if (delta.InMicroseconds() > 0) { timer.Start(); @@ -2125,7 +2126,7 @@ MaybeLocal Script::Run(Local context, } } - if (V8_UNLIKELY(i::FLAG_experimental_web_snapshots)) { + if (V8_UNLIKELY(i::v8_flags.experimental_web_snapshots)) { i::Handle maybe_script = handle(fun->shared().script(), i_isolate); if (maybe_script->IsScript() && @@ -2149,9 +2150,9 @@ MaybeLocal Script::Run(Local context, has_pending_exception = !ToLocal( i::Execution::CallScript(i_isolate, fun, receiver, options), &result); - if (i::FLAG_script_delay_fraction > 0.0) { + if (i::v8_flags.script_delay_fraction > 0.0) { delta = v8::base::TimeDelta::FromMillisecondsD( - timer.Elapsed().InMillisecondsF() * i::FLAG_script_delay_fraction); + timer.Elapsed().InMillisecondsF() * i::v8_flags.script_delay_fraction); timer.Restart(); while (timer.Elapsed() < delta) { // Busy wait. @@ -2742,7 +2743,7 @@ ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreaming( Utils::ApiCheck(options == kNoCompileOptions || options == kEagerCompile, "v8::ScriptCompiler::StartStreaming", "Invalid CompileOptions"); - if (!i::FLAG_script_streaming) return nullptr; + if (!i::v8_flags.script_streaming) return nullptr; i::Isolate* i_isolate = reinterpret_cast(v8_isolate); i::ScriptStreamingData* data = source->impl(); std::unique_ptr task = @@ -2775,20 +2776,22 @@ void ScriptCompiler::ConsumeCodeCacheTask::SourceTextAvailable( bool ScriptCompiler::ConsumeCodeCacheTask::ShouldMergeWithExistingScript() const { - if (!i::FLAG_merge_background_deserialized_script_with_compilation_cache) { + if (!i::v8_flags + .merge_background_deserialized_script_with_compilation_cache) { return false; } return impl_->ShouldMergeWithExistingScript(); } void ScriptCompiler::ConsumeCodeCacheTask::MergeWithExistingScript() { - DCHECK(i::FLAG_merge_background_deserialized_script_with_compilation_cache); + DCHECK( + i::v8_flags.merge_background_deserialized_script_with_compilation_cache); impl_->MergeWithExistingScript(); } ScriptCompiler::ConsumeCodeCacheTask* ScriptCompiler::StartConsumingCodeCache( Isolate* v8_isolate, std::unique_ptr cached_data) { - if (!i::FLAG_concurrent_cache_deserialization) return nullptr; + if (!i::v8_flags.concurrent_cache_deserialization) return nullptr; i::Isolate* i_isolate = reinterpret_cast(v8_isolate); DCHECK_NO_SCRIPT_NO_EXCEPTION(i_isolate); return new ScriptCompiler::ConsumeCodeCacheTask( @@ -4923,7 +4926,7 @@ Maybe Object::SetAccessor(Local context, Local name, SideEffectType setter_side_effect_type) { return ObjectSetAccessor(context, this, name, getter, setter, data.FromMaybe(Local()), settings, attribute, - i::FLAG_disable_old_api_accessors, false, + i::v8_flags.disable_old_api_accessors, false, getter_side_effect_type, setter_side_effect_type); } @@ -6740,6 +6743,14 @@ void v8::Context::SetPromiseHooks(Local init_hook, #endif // V8_ENABLE_JAVASCRIPT_PROMISE_HOOKS } +bool Context::HasTemplateLiteralObject(Local object) { + i::DisallowGarbageCollection no_gc; + i::Object i_object = *Utils::OpenHandle(*object); + if (!i_object.IsJSArray()) return false; + return Utils::OpenHandle(this)->native_context().HasTemplateLiteralObject( + i::JSArray::cast(i_object)); +} + MaybeLocal metrics::Recorder::GetContext( Isolate* v8_isolate, metrics::Recorder::ContextId id) { i::Isolate* i_isolate = reinterpret_cast(v8_isolate); @@ -8149,12 +8160,12 @@ std::unique_ptr v8::ArrayBuffer::NewBackingStore( void* deleter_data) { CHECK_LE(byte_length, i::JSArrayBuffer::kMaxByteLength); #ifdef V8_ENABLE_SANDBOX - Utils::ApiCheck( - !data || i::GetProcessWideSandbox()->Contains(data), - "v8_ArrayBuffer_NewBackingStore", - "When the V8 Sandbox is enabled, ArrayBuffer backing stores must be " - "allocated inside the sandbox address space. Please use an appropriate " - "ArrayBuffer::Allocator to allocate these buffers."); + Utils::ApiCheck(!data || i::GetProcessWideSandbox()->Contains(data), + "v8_ArrayBuffer_NewBackingStore", + "When the V8 Sandbox is enabled, ArrayBuffer backing stores " + "must be allocated inside the sandbox address space. Please " + "use an appropriate ArrayBuffer::Allocator to allocate these " + "buffers, or disable the sandbox."); #endif // V8_ENABLE_SANDBOX std::unique_ptr backing_store = @@ -8246,7 +8257,7 @@ static_assert( Local Type##Array::New( \ Local shared_array_buffer, size_t byte_offset, \ size_t length) { \ - CHECK(i::FLAG_harmony_sharedarraybuffer); \ + CHECK(i::v8_flags.harmony_sharedarraybuffer); \ i::Isolate* i_isolate = \ Utils::OpenHandle(*shared_array_buffer)->GetIsolate(); \ API_RCS_SCOPE(i_isolate, Type##Array, New); \ @@ -8281,7 +8292,7 @@ Local DataView::New(Local array_buffer, Local DataView::New(Local shared_array_buffer, size_t byte_offset, size_t byte_length) { - CHECK(i::FLAG_harmony_sharedarraybuffer); + CHECK(i::v8_flags.harmony_sharedarraybuffer); i::Handle buffer = Utils::OpenHandle(*shared_array_buffer); i::Isolate* i_isolate = buffer->GetIsolate(); API_RCS_SCOPE(i_isolate, DataView, New); @@ -8298,7 +8309,7 @@ size_t v8::SharedArrayBuffer::ByteLength() const { Local v8::SharedArrayBuffer::New(Isolate* v8_isolate, size_t byte_length) { - CHECK(i::FLAG_harmony_sharedarraybuffer); + CHECK(i::v8_flags.harmony_sharedarraybuffer); i::Isolate* i_isolate = reinterpret_cast(v8_isolate); API_RCS_SCOPE(i_isolate, SharedArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); @@ -8320,7 +8331,7 @@ Local v8::SharedArrayBuffer::New(Isolate* v8_isolate, Local v8::SharedArrayBuffer::New( Isolate* v8_isolate, std::shared_ptr backing_store) { - CHECK(i::FLAG_harmony_sharedarraybuffer); + CHECK(i::v8_flags.harmony_sharedarraybuffer); CHECK_IMPLIES(backing_store->ByteLength() != 0, backing_store->Data() != nullptr); i::Isolate* i_isolate = reinterpret_cast(v8_isolate); @@ -8711,7 +8722,7 @@ bool Isolate::HasPendingBackgroundTasks() { } void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) { - Utils::ApiCheck(i::FLAG_expose_gc, + Utils::ApiCheck(i::v8_flags.expose_gc, "v8::Isolate::RequestGarbageCollectionForTesting", "Must use --expose-gc"); if (type == kMinorGarbageCollection) { @@ -9227,7 +9238,7 @@ int64_t Isolate::AdjustAmountOfExternalAllocatedMemory( void Isolate::SetEventLogger(LogEventCallback that) { // Do not overwrite the event logger if we want to log explicitly. - if (i::FLAG_log_internal_timer_events) return; + if (i::v8_flags.log_internal_timer_events) return; i::Isolate* i_isolate = reinterpret_cast(this); i_isolate->set_event_logger(that); } @@ -9360,7 +9371,7 @@ bool Isolate::IdleNotificationDeadline(double deadline_in_seconds) { // Returning true tells the caller that it need not // continue to call IdleNotification. i::Isolate* i_isolate = reinterpret_cast(this); - if (!i::FLAG_use_idle_notification) return true; + if (!i::v8_flags.use_idle_notification) return true; return i_isolate->heap()->IdleNotification(deadline_in_seconds); } @@ -9563,7 +9574,7 @@ void Isolate::InstallConditionalFeatures(Local context) { if (i_isolate->is_execution_terminating()) return; i_isolate->InstallConditionalFeatures(Utils::OpenHandle(*context)); #if V8_ENABLE_WEBASSEMBLY - if (i::FLAG_expose_wasm && !i_isolate->has_pending_exception()) { + if (i::v8_flags.expose_wasm && !i_isolate->has_pending_exception()) { i::WasmJs::InstallConditionalFeatures(i_isolate, Utils::OpenHandle(*context)); } diff --git a/deps/v8/src/base/bits.cc b/deps/v8/src/base/bits.cc index e604cff6d5c96c..2a3dce97761c59 100644 --- a/deps/v8/src/base/bits.cc +++ b/deps/v8/src/base/bits.cc @@ -52,6 +52,46 @@ int32_t SignedMulHigh32(int32_t lhs, int32_t rhs) { 32u); } +// The algorithm used is described in section 8.2 of +// Hacker's Delight, by Henry S. Warren, Jr. +// It assumes that a right shift on a signed integer is an arithmetic shift. +int64_t SignedMulHigh64(int64_t u, int64_t v) { + uint64_t u0 = u & 0xFFFFFFFF; + int64_t u1 = u >> 32; + uint64_t v0 = v & 0xFFFFFFFF; + int64_t v1 = v >> 32; + + uint64_t w0 = u0 * v0; + int64_t t = u1 * v0 + (w0 >> 32); + int64_t w1 = t & 0xFFFFFFFF; + int64_t w2 = t >> 32; + w1 = u0 * v1 + w1; + + return u1 * v1 + w2 + (w1 >> 32); +} + +// The algorithm used is described in section 8.2 of +// Hacker's Delight, by Henry S. Warren, Jr. +uint64_t UnsignedMulHigh64(uint64_t u, uint64_t v) { + uint64_t u0 = u & 0xFFFFFFFF; + uint64_t u1 = u >> 32; + uint64_t v0 = v & 0xFFFFFFFF; + uint64_t v1 = v >> 32; + + uint64_t w0 = u0 * v0; + uint64_t t = u1 * v0 + (w0 >> 32); + uint64_t w1 = t & 0xFFFFFFFFLL; + uint64_t w2 = t >> 32; + w1 = u0 * v1 + w1; + + return u1 * v1 + w2 + (w1 >> 32); +} + +uint32_t UnsignedMulHigh32(uint32_t lhs, uint32_t rhs) { + uint64_t const value = + static_cast(lhs) * static_cast(rhs); + return static_cast(value >> 32u); +} int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs, int32_t acc) { return base::bit_cast( @@ -66,12 +106,22 @@ int32_t SignedDiv32(int32_t lhs, int32_t rhs) { return lhs / rhs; } +int64_t SignedDiv64(int64_t lhs, int64_t rhs) { + if (rhs == 0) return 0; + if (rhs == -1) return lhs == std::numeric_limits::min() ? lhs : -lhs; + return lhs / rhs; +} int32_t SignedMod32(int32_t lhs, int32_t rhs) { if (rhs == 0 || rhs == -1) return 0; return lhs % rhs; } +int64_t SignedMod64(int64_t lhs, int64_t rhs) { + if (rhs == 0 || rhs == -1) return 0; + return lhs % rhs; +} + int64_t SignedSaturatedAdd64(int64_t lhs, int64_t rhs) { using limits = std::numeric_limits; // Underflow if {lhs + rhs < min}. In that case, return {min}. diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h index 3209a4b081df7f..0cb22a9a904aea 100644 --- a/deps/v8/src/base/bits.h +++ b/deps/v8/src/base/bits.h @@ -70,6 +70,30 @@ T ReverseBits(T value) { return result; } +// ReverseBytes(value) returns |value| in reverse byte order. +template +T ReverseBytes(T value) { + static_assert((sizeof(value) == 1) || (sizeof(value) == 2) || + (sizeof(value) == 4) || (sizeof(value) == 8)); + T result = 0; + for (unsigned i = 0; i < sizeof(value); i++) { + result = (result << 8) | (value & 0xff); + value >>= 8; + } + return result; +} + +template +inline constexpr std::make_unsigned_t Unsigned(T value) { + static_assert(std::is_signed_v); + return static_cast>(value); +} +template +inline constexpr std::make_signed_t Signed(T value) { + static_assert(std::is_unsigned_v); + return static_cast>(value); +} + // CountLeadingZeros(value) returns the number of zero bits following the most // significant 1 bit in |value| if |value| is non-zero, otherwise it returns // {sizeof(T) * 8}. @@ -104,6 +128,15 @@ inline constexpr unsigned CountLeadingZeros64(uint64_t value) { return CountLeadingZeros(value); } +// The number of leading zeros for a positive number, +// the number of leading ones for a negative number. +template +constexpr unsigned CountLeadingSignBits(T value) { + static_assert(std::is_signed_v); + return value < 0 ? CountLeadingZeros(~Unsigned(value)) + : CountLeadingZeros(Unsigned(value)); +} + // CountTrailingZeros(value) returns the number of zero bits preceding the // least significant 1 bit in |value| if |value| is non-zero, otherwise it // returns {sizeof(T) * 8}. @@ -297,6 +330,21 @@ inline bool SignedSubOverflow64(int64_t lhs, int64_t rhs, int64_t* val) { // those. V8_BASE_EXPORT int32_t SignedMulHigh32(int32_t lhs, int32_t rhs); +// UnsignedMulHigh32(lhs, rhs) multiplies two unsigned 32-bit values |lhs| and +// |rhs|, extracts the most significant 32 bits of the result, and returns +// those. +V8_BASE_EXPORT uint32_t UnsignedMulHigh32(uint32_t lhs, uint32_t rhs); + +// SignedMulHigh64(lhs, rhs) multiplies two signed 64-bit values |lhs| and +// |rhs|, extracts the most significant 64 bits of the result, and returns +// those. +V8_BASE_EXPORT int64_t SignedMulHigh64(int64_t lhs, int64_t rhs); + +// UnsignedMulHigh64(lhs, rhs) multiplies two unsigned 64-bit values |lhs| and +// |rhs|, extracts the most significant 64 bits of the result, and returns +// those. +V8_BASE_EXPORT uint64_t UnsignedMulHigh64(uint64_t lhs, uint64_t rhs); + // SignedMulHighAndAdd32(lhs, rhs, acc) multiplies two signed 32-bit values // |lhs| and |rhs|, extracts the most significant 32 bits of the result, and // adds the accumulate value |acc|. @@ -308,11 +356,21 @@ V8_BASE_EXPORT int32_t SignedMulHighAndAdd32(int32_t lhs, int32_t rhs, // is minint and |rhs| is -1, it returns minint. V8_BASE_EXPORT int32_t SignedDiv32(int32_t lhs, int32_t rhs); +// SignedDiv64(lhs, rhs) divides |lhs| by |rhs| and returns the quotient +// truncated to int64. If |rhs| is zero, then zero is returned. If |lhs| +// is minint and |rhs| is -1, it returns minint. +V8_BASE_EXPORT int64_t SignedDiv64(int64_t lhs, int64_t rhs); + // SignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder // truncated to int32. If either |rhs| is zero or |lhs| is minint and |rhs| // is -1, it returns zero. V8_BASE_EXPORT int32_t SignedMod32(int32_t lhs, int32_t rhs); +// SignedMod64(lhs, rhs) divides |lhs| by |rhs| and returns the remainder +// truncated to int64. If either |rhs| is zero or |lhs| is minint and |rhs| +// is -1, it returns zero. +V8_BASE_EXPORT int64_t SignedMod64(int64_t lhs, int64_t rhs); + // UnsignedAddOverflow32(lhs,rhs,val) performs an unsigned summation of |lhs| // and |rhs| and stores the result into the variable pointed to by |val| and // returns true if the unsigned summation resulted in an overflow. @@ -332,6 +390,11 @@ inline uint32_t UnsignedDiv32(uint32_t lhs, uint32_t rhs) { return rhs ? lhs / rhs : 0u; } +// UnsignedDiv64(lhs, rhs) divides |lhs| by |rhs| and returns the quotient +// truncated to uint64. If |rhs| is zero, then zero is returned. +inline uint64_t UnsignedDiv64(uint64_t lhs, uint64_t rhs) { + return rhs ? lhs / rhs : 0u; +} // UnsignedMod32(lhs, rhs) divides |lhs| by |rhs| and returns the remainder // truncated to uint32. If |rhs| is zero, then zero is returned. @@ -339,6 +402,12 @@ inline uint32_t UnsignedMod32(uint32_t lhs, uint32_t rhs) { return rhs ? lhs % rhs : 0u; } +// UnsignedMod64(lhs, rhs) divides |lhs| by |rhs| and returns the remainder +// truncated to uint64. If |rhs| is zero, then zero is returned. +inline uint64_t UnsignedMod64(uint64_t lhs, uint64_t rhs) { + return rhs ? lhs % rhs : 0u; +} + // Wraparound integer arithmetic without undefined behavior. inline int32_t WraparoundAdd32(int32_t lhs, int32_t rhs) { diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h index 3221de08349843..d7ddefd7137811 100644 --- a/deps/v8/src/base/compiler-specific.h +++ b/deps/v8/src/base/compiler-specific.h @@ -135,4 +135,15 @@ #define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment))) #endif +// Forces the linker to not GC the section corresponding to the symbol. +#if defined(__has_attribute) +#if __has_attribute(used) && __has_attribute(retain) +#define V8_DONT_STRIP_SYMBOL __attribute__((used, retain)) +#endif // __has_attribute(used) && __has_attribute(retain) +#endif // defined(__has_attribute) + +#if !defined(V8_DONT_STRIP_SYMBOL) +#define V8_DONT_STRIP_SYMBOL +#endif // !defined(V8_DONT_STRIP_SYMBOL) + #endif // V8_BASE_COMPILER_SPECIFIC_H_ diff --git a/deps/v8/src/base/division-by-constant.cc b/deps/v8/src/base/division-by-constant.cc index 97dfd5680b04df..fbc36463dc51d0 100644 --- a/deps/v8/src/base/division-by-constant.cc +++ b/deps/v8/src/base/division-by-constant.cc @@ -6,15 +6,16 @@ #include +#include + #include "src/base/logging.h" #include "src/base/macros.h" namespace v8 { namespace base { -template +template , bool>> MagicNumbersForDivision SignedDivisionByConstant(T d) { - static_assert(static_cast(0) < static_cast(-1)); DCHECK(d != static_cast(-1) && d != 0 && d != 1); const unsigned bits = static_cast(sizeof(T)) * 8; const T min = (static_cast(1) << (bits - 1)); @@ -48,11 +49,10 @@ MagicNumbersForDivision SignedDivisionByConstant(T d) { return MagicNumbersForDivision(neg ? (0 - mul) : mul, p - bits, false); } - template MagicNumbersForDivision UnsignedDivisionByConstant(T d, unsigned leading_zeros) { - static_assert(static_cast(0) < static_cast(-1)); + static_assert(std::is_unsigned_v); DCHECK_NE(d, 0); const unsigned bits = static_cast(sizeof(T)) * 8; const T ones = ~static_cast(0) >> leading_zeros; diff --git a/deps/v8/src/base/division-by-constant.h b/deps/v8/src/base/division-by-constant.h index 744283981bc3de..4b9f4a873c289c 100644 --- a/deps/v8/src/base/division-by-constant.h +++ b/deps/v8/src/base/division-by-constant.h @@ -7,6 +7,9 @@ #include +#include +#include + #include "src/base/base-export.h" #include "src/base/export-template.h" @@ -16,10 +19,10 @@ namespace base { // ---------------------------------------------------------------------------- // The magic numbers for division via multiplication, see Warren's "Hacker's -// Delight", chapter 10. The template parameter must be one of the unsigned -// integral types. +// Delight", chapter 10. template struct EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT) MagicNumbersForDivision { + static_assert(std::is_integral_v); MagicNumbersForDivision(T m, unsigned s, bool a) : multiplier(m), shift(s), add(a) {} bool operator==(const MagicNumbersForDivision& rhs) const { @@ -31,13 +34,20 @@ struct EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT) MagicNumbersForDivision { bool add; }; - // Calculate the multiplier and shift for signed division via multiplication. // The divisor must not be -1, 0 or 1 when interpreted as a signed value. -template +template , bool> = true> EXPORT_TEMPLATE_DECLARE(V8_BASE_EXPORT) MagicNumbersForDivision SignedDivisionByConstant(T d); +template , bool> = true> +MagicNumbersForDivision SignedDivisionByConstant(T d) { + using Unsigned = std::make_unsigned_t; + MagicNumbersForDivision magic = + SignedDivisionByConstant(static_cast(d)); + return {static_cast(magic.multiplier), magic.shift, magic.add}; +} + // Calculate the multiplier and shift for unsigned division via multiplication, // see Warren's "Hacker's Delight", chapter 10. The divisor must not be 0 and // leading_zeros can be used to speed up the calculation if the given number of diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc index 4db43686acc11a..25123cb7cd80eb 100644 --- a/deps/v8/src/baseline/baseline-compiler.cc +++ b/deps/v8/src/baseline/baseline-compiler.cc @@ -967,14 +967,6 @@ void BaselineCompiler::VisitDefineKeyedOwnPropertyInLiteral() { IndexAsTagged(3)); // slot } -void BaselineCompiler::VisitCollectTypeProfile() { - SaveAccumulatorScope accumulator_scope(&basm_); - CallRuntime(Runtime::kCollectTypeProfile, - IntAsSmi(0), // position - kInterpreterAccumulatorRegister, // value - FeedbackVector()); // feedback vector -} - void BaselineCompiler::VisitAdd() { CallBuiltin( RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); @@ -1158,10 +1150,10 @@ void BaselineCompiler::VisitGetSuperConstructor() { StoreRegister(0, prototype); } -void BaselineCompiler::VisitFindNonDefaultConstructor() { +void BaselineCompiler::VisitFindNonDefaultConstructorOrConstruct() { SaveAccumulatorScope accumulator_scope(&basm_); - CallBuiltin(RegisterOperand(0), - RegisterOperand(1)); + CallBuiltin( + RegisterOperand(0), RegisterOperand(1)); StoreRegisterPair(2, kReturnRegister0, kReturnRegister1); } @@ -1421,9 +1413,9 @@ void BaselineCompiler::VisitIntrinsicAsyncGeneratorResolve( CallBuiltin(args); } -void BaselineCompiler::VisitIntrinsicAsyncGeneratorYield( +void BaselineCompiler::VisitIntrinsicAsyncGeneratorYieldWithAwait( interpreter::RegisterList args) { - CallBuiltin(args); + CallBuiltin(args); } void BaselineCompiler::VisitConstruct() { diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 81f5c961bc442a..d6a6591dfe769c 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -1051,7 +1051,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // Drop the frame created by the baseline call. __ ldm(ia_w, sp, {fp, lr}); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ Trap(); } @@ -1300,7 +1300,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ jmp(&after_stack_check_interrupt); __ bind(&flags_need_processing); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ bind(&is_baseline); { diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 00368d0da6a974..168270bf6fb66c 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -1205,7 +1205,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); // Drop the frame created by the baseline call. __ Pop(fp, lr); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ Trap(); } @@ -1474,7 +1474,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ jmp(&after_stack_check_interrupt); __ bind(&flags_need_processing); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ bind(&is_baseline); { diff --git a/deps/v8/src/builtins/array-to-reversed.tq b/deps/v8/src/builtins/array-to-reversed.tq index 542bc867f04e6e..5d97d6546ea1fe 100644 --- a/deps/v8/src/builtins/array-to-reversed.tq +++ b/deps/v8/src/builtins/array-to-reversed.tq @@ -8,8 +8,8 @@ macro FastPackedArrayToReversed( kind: constexpr ElementsKind, elements: FixedArrayBase, length: Smi): JSArray { // 3. Let A be ? ArrayCreate(𝔽(len)). - const copy: FixedArrayBase = - AllocateFixedArray(kind, SmiUntag(length), AllocationFlag::kNone); + const copy: FixedArrayBase = AllocateFixedArray( + kind, SmiUntag(length), AllocationFlag::kAllowLargeObjectAllocation); // 4. Let k be 0. let k: Smi = 0; @@ -39,6 +39,8 @@ macro TryFastPackedArrayToReversed(implicit context: Context)(receiver: JSAny): JSArray labels Slow { const array: FastJSArray = Cast(receiver) otherwise Slow; + if (array.length < 1) return ArrayCreate(0); + const kind: ElementsKind = array.map.elements_kind; if (kind == ElementsKind::PACKED_SMI_ELEMENTS) { return FastPackedArrayToReversed( diff --git a/deps/v8/src/builtins/array-to-sorted.tq b/deps/v8/src/builtins/array-to-sorted.tq index 0a953ab4e533d0..0a36502825c3cc 100644 --- a/deps/v8/src/builtins/array-to-sorted.tq +++ b/deps/v8/src/builtins/array-to-sorted.tq @@ -15,7 +15,8 @@ CopyWorkArrayToNewFastJSArray(implicit context: Context, sortState: SortState)( dcheck(len <= kMaxFastArrayLength); const copy: FixedArray = UnsafeCast(AllocateFixedArray( - elementsKind, Convert(len), AllocationFlag::kNone)); + elementsKind, Convert(len), + AllocationFlag::kAllowLargeObjectAllocation)); const workArray = sortState.workArray; CopyElements( diff --git a/deps/v8/src/builtins/array-to-spliced.tq b/deps/v8/src/builtins/array-to-spliced.tq index 999c1388624970..505a58b733cf05 100644 --- a/deps/v8/src/builtins/array-to-spliced.tq +++ b/deps/v8/src/builtins/array-to-spliced.tq @@ -14,11 +14,13 @@ macro CopyFastPackedArrayForToSpliced(implicit context: Context)( const insertCount: intptr = Convert(insertCountSmi); const actualDeleteCount: intptr = Convert(actualDeleteCountSmi); - const copy: FixedArrayBase = - AllocateFixedArray(kind, newLen, AllocationFlag::kNone); + const copy: FixedArrayBase = AllocateFixedArray( + kind, newLen, AllocationFlag::kAllowLargeObjectAllocation); - // Copy the part before the inserted items. - CopyElements(kind, copy, 0, array.elements, 0, actualStart); + if (actualStart > 0) { + // Copy the part before the inserted items. + CopyElements(kind, copy, 0, array.elements, 0, actualStart); + } // Initialize elements that will hold the inserted items because the // NewJSArray below may allocate. Leave the actual insertion for later since @@ -36,9 +38,11 @@ macro CopyFastPackedArrayForToSpliced(implicit context: Context)( // Copy the part after the inserted items. const secondPartStart: intptr = actualStart + insertCount; const secondPartLen: intptr = newLen - secondPartStart; - const r: intptr = actualStart + actualDeleteCount; - dcheck(Convert(r + secondPartLen) <= array.length); - CopyElements(kind, copy, secondPartStart, array.elements, r, secondPartLen); + if (secondPartLen > 0) { + const r: intptr = actualStart + actualDeleteCount; + dcheck(Convert(r + secondPartLen) <= array.length); + CopyElements(kind, copy, secondPartStart, array.elements, r, secondPartLen); + } const map: Map = LoadJSArrayElementsMap(kind, LoadNativeContext(context)); return NewJSArray(map, copy); diff --git a/deps/v8/src/builtins/array-with.tq b/deps/v8/src/builtins/array-with.tq index 161bce9f0b4b8a..e6a6c7cfbcf580 100644 --- a/deps/v8/src/builtins/array-with.tq +++ b/deps/v8/src/builtins/array-with.tq @@ -55,10 +55,8 @@ transitioning builtin GenericArrayWith( // https://tc39.es/proposal-change-array-by-copy/#sec-array.prototype.with transitioning javascript builtin ArrayPrototypeWith( - js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { - const index = arguments[0]; - const value = arguments[1]; - + js-implicit context: NativeContext, receiver: JSAny)( + index: JSAny, value: JSAny): JSAny { // 1. Let O be ? ToObject(this value). const object: JSReceiver = ToObject_Inline(context, receiver); diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 4d5f054a178f90..40f702549d93a6 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -437,10 +437,11 @@ extern enum MessageTemplate { kWasmTrapArrayOutOfBounds, kWasmTrapArrayTooLarge, kWasmTrapStringOffsetOutOfBounds, + kWasmObjectsAreOpaque, kWeakRefsRegisterTargetAndHoldingsMustNotBeSame, - kWeakRefsRegisterTargetMustBeObject, - kWeakRefsUnregisterTokenMustBeObject, - kWeakRefsWeakRefConstructorTargetMustBeObject, + kInvalidWeakRefsRegisterTarget, + kInvalidWeakRefsUnregisterToken, + kInvalidWeakRefsWeakRefConstructorTarget, ... } @@ -917,10 +918,10 @@ macro Float64IsNaN(n: float64): bool { // The type of all tagged values that can safely be compared with TaggedEqual. @if(V8_ENABLE_WEBASSEMBLY) type TaggedWithIdentity = JSReceiver | FixedArrayBase | Oddball | Map | - WeakCell | Context | EmptyString | WasmInternalFunction; + WeakCell | Context | EmptyString | Symbol | WasmInternalFunction; @ifnot(V8_ENABLE_WEBASSEMBLY) type TaggedWithIdentity = JSReceiver | FixedArrayBase | Oddball | Map | - WeakCell | Context | EmptyString; + WeakCell | Context | EmptyString | Symbol; extern operator '==' macro TaggedEqual(TaggedWithIdentity, Object): bool; extern operator '==' macro TaggedEqual(Object, TaggedWithIdentity): bool; diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc index 79e8396bf49897..49fe48d6987a46 100644 --- a/deps/v8/src/builtins/builtins-array.cc +++ b/deps/v8/src/builtins/builtins-array.cc @@ -503,6 +503,8 @@ namespace { // Returns true, iff we can use ElementsAccessor for shifting. V8_WARN_UNUSED_RESULT bool CanUseFastArrayShift(Isolate* isolate, Handle receiver) { + if (V8_COMPRESS_POINTERS_8GB_BOOL) return false; + if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, nullptr, 0, 0) || !IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) { @@ -1599,7 +1601,8 @@ enum class ArrayGroupMode { kToObject, kToMap }; template inline MaybeHandle GenericArrayGroup( Isolate* isolate, Handle O, Handle callbackfn, - Handle groups, double initialK, double len) { + Handle thisArg, Handle groups, double initialK, + double len) { // 6. Repeat, while k < len for (double k = initialK; k < len; ++k) { // 6a. Let Pk be ! ToString(𝔽(k)). @@ -1617,9 +1620,9 @@ inline MaybeHandle GenericArrayGroup( // 6c. Let key be ? Call(callbackfn, thisArg, « kValue, 𝔽(k), O »). Handle propertyKey; Handle argv[] = {kValue, isolate->factory()->NewNumber(k), O}; - ASSIGN_RETURN_ON_EXCEPTION(isolate, propertyKey, - Execution::Call(isolate, callbackfn, O, 3, argv), - OrderedHashMap); + ASSIGN_RETURN_ON_EXCEPTION( + isolate, propertyKey, + Execution::Call(isolate, callbackfn, thisArg, 3, argv), OrderedHashMap); if (mode == ArrayGroupMode::kToMap) { // 6d. If key is -0𝔽, set key to +0𝔽. @@ -1649,7 +1652,7 @@ inline MaybeHandle GenericArrayGroup( template inline MaybeHandle FastArrayGroup( Isolate* isolate, Handle array, Handle callbackfn, - Handle groups, double len, + Handle thisArg, Handle groups, double len, ElementsKind* result_elements_kind) { DCHECK_NOT_NULL(result_elements_kind); @@ -1662,8 +1665,8 @@ inline MaybeHandle FastArrayGroup( for (InternalIndex k : InternalIndex::Range(uint_len)) { if (!CheckArrayMapNotModified(array, original_map) || k.as_uint32() >= static_cast(array->length().Number())) { - return GenericArrayGroup(isolate, array, callbackfn, groups, - k.as_uint32(), len); + return GenericArrayGroup(isolate, array, callbackfn, thisArg, + groups, k.as_uint32(), len); } // 6a. Let Pk be ! ToString(𝔽(k)). // 6b. Let kValue be ? Get(O, Pk). @@ -1679,7 +1682,7 @@ inline MaybeHandle FastArrayGroup( kValue, isolate->factory()->NewNumber(k.as_uint32()), array}; ASSIGN_RETURN_ON_EXCEPTION( isolate, propertyKey, - Execution::Call(isolate, callbackfn, array, 3, argv), OrderedHashMap); + Execution::Call(isolate, callbackfn, thisArg, 3, argv), OrderedHashMap); if (mode == ArrayGroupMode::kToMap) { // 6d. If key is -0𝔽, set key to +0𝔽. @@ -1719,7 +1722,7 @@ inline MaybeHandle FastArrayGroup( } // namespace -// https://tc39.es/proposal-array-grouping/#sec-array.prototype.groupby +// https://tc39.es/proposal-array-grouping/#sec-array.prototype.group BUILTIN(ArrayPrototypeGroup) { const char* const kMethodName = "Array.prototype.group"; HandleScope scope(isolate); @@ -1741,6 +1744,8 @@ BUILTIN(ArrayPrototypeGroup) { isolate, NewTypeError(MessageTemplate::kCalledNonCallable, callbackfn)); } + Handle thisArg = args.atOrUndefined(isolate, 2); + // 5. Let groups be a new empty List. Handle groups = isolate->factory()->NewOrderedHashMap(); ElementsKind result_elements_kind = ElementsKind::PACKED_ELEMENTS; @@ -1748,14 +1753,15 @@ BUILTIN(ArrayPrototypeGroup) { Handle array = Handle::cast(O); ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, groups, - FastArrayGroup( - isolate, array, callbackfn, groups, len, &result_elements_kind)); + FastArrayGroup(isolate, array, callbackfn, + thisArg, groups, len, + &result_elements_kind)); } else { // 4. Let k be 0. ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, groups, GenericArrayGroup(isolate, O, callbackfn, - groups, 0, len)); + thisArg, groups, 0, len)); } // 7. Let obj be ! OrdinaryObjectCreate(null). @@ -1781,7 +1787,7 @@ BUILTIN(ArrayPrototypeGroup) { return *obj; } -// https://tc39.es/proposal-array-grouping/#sec-array.prototype.groupbymap +// https://tc39.es/proposal-array-grouping/#sec-array.prototype.grouptomap BUILTIN(ArrayPrototypeGroupToMap) { const char* const kMethodName = "Array.prototype.groupToMap"; HandleScope scope(isolate); @@ -1803,21 +1809,23 @@ BUILTIN(ArrayPrototypeGroupToMap) { isolate, NewTypeError(MessageTemplate::kCalledNonCallable, callbackfn)); } + Handle thisArg = args.atOrUndefined(isolate, 2); + // 5. Let groups be a new empty List. Handle groups = isolate->factory()->NewOrderedHashMap(); ElementsKind result_elements_kind = ElementsKind::PACKED_ELEMENTS; if (IsFastArray(O)) { Handle array = Handle::cast(O); - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, groups, - FastArrayGroup( - isolate, array, callbackfn, groups, len, &result_elements_kind)); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, groups, + FastArrayGroup( + isolate, array, callbackfn, thisArg, + groups, len, &result_elements_kind)); } else { // 4. Let k be 0. ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, groups, GenericArrayGroup(isolate, O, callbackfn, - groups, 0, len)); + thisArg, groups, 0, len)); } // 7. Let map be ! Construct(%Map%). diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc index 4d19f6ed42dda8..fbe29b434fae36 100644 --- a/deps/v8/src/builtins/builtins-arraybuffer.cc +++ b/deps/v8/src/builtins/builtins-arraybuffer.cc @@ -24,7 +24,7 @@ namespace internal { } #define CHECK_RESIZABLE(expected, name, method) \ - if (name->is_resizable() != expected) { \ + if (name->is_resizable_by_js() != expected) { \ THROW_NEW_ERROR_RETURN_FAILURE( \ isolate, \ NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, \ @@ -316,7 +316,7 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate, if (new_len_size != 0) { size_t from_byte_length = array_buffer->GetByteLength(); - if (V8_UNLIKELY(!is_shared && array_buffer->is_resizable())) { + if (V8_UNLIKELY(!is_shared && array_buffer->is_resizable_by_js())) { // The above steps might have resized the underlying buffer. In that case, // only copy the still-accessible portion of the underlying data. if (first_size > from_byte_length) { @@ -569,7 +569,7 @@ BUILTIN(ArrayBufferPrototypeTransfer) { // Case 2: We can reuse the same BackingStore. auto from_backing_store = array_buffer->GetBackingStore(); - if (!from_backing_store->is_resizable() && + if (from_backing_store && !from_backing_store->is_resizable_by_js() && (new_byte_length == array_buffer->GetByteLength() || from_backing_store->CanReallocate())) { // Reallocate covers steps 6-12. diff --git a/deps/v8/src/builtins/builtins-async-generator-gen.cc b/deps/v8/src/builtins/builtins-async-generator-gen.cc index beccd0dfa25d81..26dcabe6c3b320 100644 --- a/deps/v8/src/builtins/builtins-async-generator-gen.cc +++ b/deps/v8/src/builtins/builtins-async-generator-gen.cc @@ -602,7 +602,7 @@ TF_BUILTIN(AsyncGeneratorReject, AsyncGeneratorBuiltinsAssembler) { TrueConstant())); } -TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) { +TF_BUILTIN(AsyncGeneratorYieldWithAwait, AsyncGeneratorBuiltinsAssembler) { const auto generator = Parameter(Descriptor::kGenerator); const auto value = Parameter(Descriptor::kValue); const auto is_caught = Parameter(Descriptor::kIsCaught); @@ -614,13 +614,14 @@ TF_BUILTIN(AsyncGeneratorYield, AsyncGeneratorBuiltinsAssembler) { LoadPromiseFromAsyncGeneratorRequest(request); Await(context, generator, value, outer_promise, - AsyncGeneratorYieldResolveSharedFunConstant(), + AsyncGeneratorYieldWithAwaitResolveSharedFunConstant(), AsyncGeneratorAwaitRejectSharedFunConstant(), is_caught); SetGeneratorAwaiting(generator); Return(UndefinedConstant()); } -TF_BUILTIN(AsyncGeneratorYieldResolveClosure, AsyncGeneratorBuiltinsAssembler) { +TF_BUILTIN(AsyncGeneratorYieldWithAwaitResolveClosure, + AsyncGeneratorBuiltinsAssembler) { const auto context = Parameter(Descriptor::kContext); const auto value = Parameter(Descriptor::kValue); const TNode generator = diff --git a/deps/v8/src/builtins/builtins-bigint-gen.h b/deps/v8/src/builtins/builtins-bigint-gen.h index c1c9265e4c729e..8543f5fe999148 100644 --- a/deps/v8/src/builtins/builtins-bigint-gen.h +++ b/deps/v8/src/builtins/builtins-bigint-gen.h @@ -63,32 +63,34 @@ class BigIntBuiltinsAssembler : public CodeStubAssembler { std::make_pair(MachineType::AnyTagged(), y)); } - TNode CppAbsoluteMulAndCanonicalize(TNode result, - TNode x, TNode y) { + TNode CppAbsoluteMulAndCanonicalize(TNode result, + TNode x, + TNode y) { TNode mutable_big_int_absolute_mul_and_canonicalize = ExternalConstant( ExternalReference:: mutable_big_int_absolute_mul_and_canonicalize_function()); - TNode success = UncheckedCast(CallCFunction( - mutable_big_int_absolute_mul_and_canonicalize, MachineType::Bool(), + TNode return_code = UncheckedCast(CallCFunction( + mutable_big_int_absolute_mul_and_canonicalize, MachineType::Int32(), std::make_pair(MachineType::AnyTagged(), result), std::make_pair(MachineType::AnyTagged(), x), std::make_pair(MachineType::AnyTagged(), y))); - return success; + return return_code; } - TNode CppAbsoluteDivAndCanonicalize(TNode result, - TNode x, TNode y) { + TNode CppAbsoluteDivAndCanonicalize(TNode result, + TNode x, + TNode y) { TNode mutable_big_int_absolute_div_and_canonicalize = ExternalConstant( ExternalReference:: mutable_big_int_absolute_div_and_canonicalize_function()); - TNode success = UncheckedCast(CallCFunction( - mutable_big_int_absolute_div_and_canonicalize, MachineType::Bool(), + TNode return_code = UncheckedCast(CallCFunction( + mutable_big_int_absolute_div_and_canonicalize, MachineType::Int32(), std::make_pair(MachineType::AnyTagged(), result), std::make_pair(MachineType::AnyTagged(), x), std::make_pair(MachineType::AnyTagged(), y))); - return success; + return return_code; } void CppBitwiseAndPosPosAndCanonicalize(TNode result, TNode x, diff --git a/deps/v8/src/builtins/builtins-bigint.tq b/deps/v8/src/builtins/builtins-bigint.tq index be5d42aff0de4a..5ef53a54ce399c 100644 --- a/deps/v8/src/builtins/builtins-bigint.tq +++ b/deps/v8/src/builtins/builtins-bigint.tq @@ -14,9 +14,9 @@ extern macro BigIntBuiltinsAssembler::CppAbsoluteAddAndCanonicalize( extern macro BigIntBuiltinsAssembler::CppAbsoluteSubAndCanonicalize( MutableBigInt, BigIntBase, BigIntBase): void; extern macro BigIntBuiltinsAssembler::CppAbsoluteMulAndCanonicalize( - MutableBigInt, BigIntBase, BigIntBase): bool; + MutableBigInt, BigIntBase, BigIntBase): int32; extern macro BigIntBuiltinsAssembler::CppAbsoluteDivAndCanonicalize( - MutableBigInt, BigIntBase, BigIntBase): bool; + MutableBigInt, BigIntBase, BigIntBase): int32; extern macro BigIntBuiltinsAssembler::CppBitwiseAndPosPosAndCanonicalize( MutableBigInt, BigIntBase, BigIntBase): void; extern macro BigIntBuiltinsAssembler::CppBitwiseAndNegNegAndCanonicalize( @@ -236,7 +236,7 @@ macro BigIntMultiplyImpl(implicit context: Context)(x: BigInt, y: BigInt): const result = AllocateEmptyBigIntNoThrow(resultSign, xlength + ylength) otherwise BigIntTooBig; - if (!CppAbsoluteMulAndCanonicalize(result, x, y)) { + if (CppAbsoluteMulAndCanonicalize(result, x, y) == 1) { goto TerminationRequested; } @@ -305,7 +305,7 @@ macro BigIntDivideImpl(implicit context: Context)(x: BigInt, y: BigInt): const result = AllocateEmptyBigIntNoThrow(resultSign, resultLength) otherwise unreachable; - if (!CppAbsoluteDivAndCanonicalize(result, x, y)) { + if (CppAbsoluteDivAndCanonicalize(result, x, y) == 1) { goto TerminationRequested; } diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index f6238e30728766..f6edbb6bc028f4 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -22,130 +22,6 @@ namespace internal { template using TVariable = compiler::TypedCodeAssemblerVariable; -class BaseCollectionsAssembler : public CodeStubAssembler { - public: - explicit BaseCollectionsAssembler(compiler::CodeAssemblerState* state) - : CodeStubAssembler(state) {} - - virtual ~BaseCollectionsAssembler() = default; - - protected: - enum Variant { kMap, kSet, kWeakMap, kWeakSet }; - - // Adds an entry to a collection. For Maps, properly handles extracting the - // key and value from the entry (see LoadKeyValue()). - void AddConstructorEntry(Variant variant, TNode context, - TNode collection, TNode add_function, - TNode key_value, - Label* if_may_have_side_effects = nullptr, - Label* if_exception = nullptr, - TVariable* var_exception = nullptr); - - // Adds constructor entries to a collection. Choosing a fast path when - // possible. - void AddConstructorEntries(Variant variant, TNode context, - TNode native_context, - TNode collection, - TNode initial_entries); - - // Fast path for adding constructor entries. Assumes the entries are a fast - // JS array (see CodeStubAssembler::BranchIfFastJSArray()). - void AddConstructorEntriesFromFastJSArray(Variant variant, - TNode context, - TNode native_context, - TNode collection, - TNode fast_jsarray, - Label* if_may_have_side_effects); - - // Adds constructor entries to a collection using the iterator protocol. - void AddConstructorEntriesFromIterable(Variant variant, - TNode context, - TNode native_context, - TNode collection, - TNode iterable); - - // Constructs a collection instance. Choosing a fast path when possible. - TNode AllocateJSCollection(TNode context, - TNode constructor, - TNode new_target); - - // Fast path for constructing a collection instance if the constructor - // function has not been modified. - TNode AllocateJSCollectionFast(TNode constructor); - - // Fallback for constructing a collection instance if the constructor function - // has been modified. - TNode AllocateJSCollectionSlow(TNode context, - TNode constructor, - TNode new_target); - - // Allocates the backing store for a collection. - virtual TNode AllocateTable( - Variant variant, TNode at_least_space_for) = 0; - - // Main entry point for a collection constructor builtin. - void GenerateConstructor(Variant variant, - Handle constructor_function_name, - TNode new_target, TNode argc, - TNode context); - - // Retrieves the collection function that adds an entry. `set` for Maps and - // `add` for Sets. - TNode GetAddFunction(Variant variant, TNode context, - TNode collection); - - // Retrieves the collection constructor function. - TNode GetConstructor(Variant variant, - TNode native_context); - - // Retrieves the initial collection function that adds an entry. Should only - // be called when it is certain that a collection prototype's map hasn't been - // changed. - TNode GetInitialAddFunction(Variant variant, - TNode native_context); - - // Checks whether {collection}'s initial add/set function has been modified - // (depending on {variant}, loaded from {native_context}). - void GotoIfInitialAddFunctionModified(Variant variant, - TNode native_context, - TNode collection, - Label* if_modified); - - // Gets root index for the name of the add/set function. - RootIndex GetAddFunctionNameIndex(Variant variant); - - // Retrieves the offset to access the backing table from the collection. - int GetTableOffset(Variant variant); - - // Estimates the number of entries the collection will have after adding the - // entries passed in the constructor. AllocateTable() can use this to avoid - // the time of growing/rehashing when adding the constructor entries. - TNode EstimatedInitialSize(TNode initial_entries, - TNode is_fast_jsarray); - - void GotoIfCannotBeWeakKey(const TNode obj, - Label* if_cannot_be_weak_key); - - // Determines whether the collection's prototype has been modified. - TNode HasInitialCollectionPrototype(Variant variant, - TNode native_context, - TNode collection); - - // Gets the initial prototype map for given collection {variant}. - TNode GetInitialCollectionPrototype(Variant variant, - TNode native_context); - - // Loads an element from a fixed array. If the element is the hole, returns - // `undefined`. - TNode LoadAndNormalizeFixedArrayElement(TNode elements, - TNode index); - - // Loads an element from a fixed double array. If the element is the hole, - // returns `undefined`. - TNode LoadAndNormalizeFixedDoubleArrayElement( - TNode elements, TNode index); -}; - void BaseCollectionsAssembler::AddConstructorEntry( Variant variant, TNode context, TNode collection, TNode add_function, TNode key_value, @@ -177,6 +53,9 @@ void BaseCollectionsAssembler::AddConstructorEntries( EstimatedInitialSize(initial_entries, use_fast_loop.value()); Label allocate_table(this, &use_fast_loop), exit(this), fast_loop(this), slow_loop(this, Label::kDeferred); + TVARIABLE(JSReceiver, var_iterator_object); + TVARIABLE(Object, var_exception); + Label if_exception(this, Label::kDeferred); Goto(&allocate_table); BIND(&allocate_table); { @@ -189,6 +68,7 @@ void BaseCollectionsAssembler::AddConstructorEntries( } BIND(&fast_loop); { + Label if_exception_during_fast_iteration(this); TNode initial_entries_jsarray = UncheckedCast(initial_entries); #if DEBUG @@ -198,9 +78,13 @@ void BaseCollectionsAssembler::AddConstructorEntries( #endif Label if_may_have_side_effects(this, Label::kDeferred); - AddConstructorEntriesFromFastJSArray(variant, context, native_context, - collection, initial_entries_jsarray, - &if_may_have_side_effects); + { + compiler::ScopedExceptionHandler handler( + this, &if_exception_during_fast_iteration, &var_exception); + AddConstructorEntriesFromFastJSArray(variant, context, native_context, + collection, initial_entries_jsarray, + &if_may_have_side_effects); + } Goto(&exit); if (variant == kMap || variant == kWeakMap) { @@ -222,13 +106,37 @@ void BaseCollectionsAssembler::AddConstructorEntries( use_fast_loop = Int32FalseConstant(); Goto(&allocate_table); } + BIND(&if_exception_during_fast_iteration); + { + // In case exception is thrown during collection population, materialize + // the iteator and execute iterator closing protocol. It might be + // non-trivial in case "return" callback is added somewhere in the + // iterator's prototype chain. + TNode native_context = LoadNativeContext(context); + var_iterator_object = CreateArrayIterator( + native_context, UncheckedCast(initial_entries), + IterationKind::kEntries); + Goto(&if_exception); + } } BIND(&slow_loop); { - AddConstructorEntriesFromIterable(variant, context, native_context, - collection, initial_entries); + AddConstructorEntriesFromIterable( + variant, context, native_context, collection, initial_entries, + &if_exception, &var_iterator_object, &var_exception); Goto(&exit); } + BIND(&if_exception); + { + TNode message = GetPendingMessage(); + SetPendingMessage(TheHoleConstant()); + // iterator.next field is not used by IteratorCloseOnException. + TorqueStructIteratorRecord iterator = {var_iterator_object.value(), {}}; + IteratorCloseOnException(context, iterator); + CallRuntime(Runtime::kReThrowWithMessage, context, var_exception.value(), + message); + Unreachable(); + } BIND(&exit); } @@ -306,20 +214,22 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray( void BaseCollectionsAssembler::AddConstructorEntriesFromIterable( Variant variant, TNode context, TNode native_context, - TNode collection, TNode iterable) { - Label exit(this), loop(this), if_exception(this, Label::kDeferred); + TNode collection, TNode iterable, Label* if_exception, + TVariable* var_iterator_object, + TVariable* var_exception) { + Label exit(this), loop(this); CSA_DCHECK(this, Word32BinaryNot(IsNullOrUndefined(iterable))); TNode add_func = GetAddFunction(variant, context, collection); IteratorBuiltinsAssembler iterator_assembler(this->state()); TorqueStructIteratorRecord iterator = iterator_assembler.GetIterator(context, iterable); + *var_iterator_object = iterator.object; CSA_DCHECK(this, Word32BinaryNot(IsUndefined(iterator.object))); TNode fast_iterator_result_map = CAST( LoadContextElement(native_context, Context::ITERATOR_RESULT_MAP_INDEX)); - TVARIABLE(Object, var_exception); Goto(&loop); BIND(&loop); @@ -329,18 +239,9 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromIterable( TNode next_value = iterator_assembler.IteratorValue( context, next, fast_iterator_result_map); AddConstructorEntry(variant, context, collection, add_func, next_value, - nullptr, &if_exception, &var_exception); + nullptr, if_exception, var_exception); Goto(&loop); } - BIND(&if_exception); - { - TNode message = GetPendingMessage(); - SetPendingMessage(TheHoleConstant()); - IteratorCloseOnException(context, iterator); - CallRuntime(Runtime::kReThrowWithMessage, context, var_exception.value(), - message); - Unreachable(); - } BIND(&exit); } @@ -523,16 +424,28 @@ TNode BaseCollectionsAssembler::EstimatedInitialSize( [=] { return IntPtrConstant(0); }); } -void BaseCollectionsAssembler::GotoIfCannotBeWeakKey( - const TNode obj, Label* if_cannot_be_weak_key) { - GotoIf(TaggedIsSmi(obj), if_cannot_be_weak_key); +// https://tc39.es/proposal-symbols-as-weakmap-keys/#sec-canbeheldweakly-abstract-operation +void BaseCollectionsAssembler::GotoIfCannotBeHeldWeakly( + const TNode obj, Label* if_cannot_be_held_weakly) { + Label check_symbol_key(this); + Label end(this); + GotoIf(TaggedIsSmi(obj), if_cannot_be_held_weakly); TNode instance_type = LoadMapInstanceType(LoadMap(CAST(obj))); - GotoIfNot(IsJSReceiverInstanceType(instance_type), if_cannot_be_weak_key); + GotoIfNot(IsJSReceiverInstanceType(instance_type), &check_symbol_key); // TODO(v8:12547) Shared structs and arrays should only be able to point // to shared values in weak collections. For now, disallow them as weak // collection keys. - GotoIf(IsJSSharedStructInstanceType(instance_type), if_cannot_be_weak_key); - GotoIf(IsJSSharedArrayInstanceType(instance_type), if_cannot_be_weak_key); + GotoIf(IsJSSharedStructInstanceType(instance_type), if_cannot_be_held_weakly); + GotoIf(IsJSSharedArrayInstanceType(instance_type), if_cannot_be_held_weakly); + Goto(&end); + Bind(&check_symbol_key); + GotoIfNot(HasHarmonySymbolAsWeakmapKeyFlag(), if_cannot_be_held_weakly); + GotoIfNot(IsSymbolInstanceType(instance_type), if_cannot_be_held_weakly); + TNode flags = LoadSymbolFlags(CAST(obj)); + GotoIf(Word32And(flags, Symbol::IsInPublicSymbolTableBit::kMask), + if_cannot_be_held_weakly); + Goto(&end); + Bind(&end); } TNode BaseCollectionsAssembler::GetInitialCollectionPrototype( @@ -2414,67 +2327,6 @@ TF_BUILTIN(FindOrderedHashSetEntry, CollectionsBuiltinsAssembler) { Return(SmiConstant(-1)); } -class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler { - public: - explicit WeakCollectionsBuiltinsAssembler(compiler::CodeAssemblerState* state) - : BaseCollectionsAssembler(state) {} - - protected: - void AddEntry(TNode table, TNode key_index, - TNode key, TNode value, - TNode number_of_elements); - - TNode AllocateTable(Variant variant, - TNode at_least_space_for) override; - - // Generates and sets the identity for a JSRececiver. - TNode CreateIdentityHash(TNode receiver); - TNode EntryMask(TNode capacity); - - // Builds code that finds the EphemeronHashTable entry for a {key} using the - // comparison code generated by {key_compare}. The key index is returned if - // the {key} is found. - using KeyComparator = - std::function entry_key, Label* if_same)>; - TNode FindKeyIndex(TNode table, TNode key_hash, - TNode entry_mask, - const KeyComparator& key_compare); - - // Builds code that finds an EphemeronHashTable entry available for a new - // entry. - TNode FindKeyIndexForInsertion(TNode table, - TNode key_hash, - TNode entry_mask); - - // Builds code that finds the EphemeronHashTable entry with key that matches - // {key} and returns the entry's key index. If {key} cannot be found, jumps to - // {if_not_found}. - TNode FindKeyIndexForKey(TNode table, TNode key, - TNode hash, - TNode entry_mask, - Label* if_not_found); - - TNode InsufficientCapacityToAdd(TNode capacity, - TNode number_of_elements, - TNode number_of_deleted); - TNode KeyIndexFromEntry(TNode entry); - - TNode LoadNumberOfElements(TNode table, - int offset); - TNode LoadNumberOfDeleted(TNode table, - int offset = 0); - TNode LoadTable(TNode collection); - TNode LoadTableCapacity(TNode table); - - void RemoveEntry(TNode table, TNode key_index, - TNode number_of_elements); - TNode ShouldRehash(TNode number_of_elements, - TNode number_of_deleted); - TNode ShouldShrink(TNode capacity, - TNode number_of_elements); - TNode ValueIndexFromKeyIndex(TNode key_index); -}; - void WeakCollectionsBuiltinsAssembler::AddEntry( TNode table, TNode key_index, TNode key, TNode value, TNode number_of_elements) { @@ -2490,6 +2342,25 @@ void WeakCollectionsBuiltinsAssembler::AddEntry( SmiFromIntPtr(number_of_elements)); } +TNode WeakCollectionsBuiltinsAssembler::GetHash( + const TNode key, Label* if_no_hash) { + TVARIABLE(IntPtrT, var_hash); + Label if_symbol(this); + Label return_result(this); + GotoIfNot(IsJSReceiver(key), &if_symbol); + var_hash = LoadJSReceiverIdentityHash(CAST(key), if_no_hash); + Goto(&return_result); + Bind(&if_symbol); + CSA_DCHECK(this, IsSymbol(key)); + CSA_DCHECK(this, Word32BinaryNot( + Word32And(LoadSymbolFlags(CAST(key)), + Symbol::IsInPublicSymbolTableBit::kMask))); + var_hash = ChangeInt32ToIntPtr(LoadNameHash(CAST(key), nullptr)); + Goto(&return_result); + Bind(&return_result); + return var_hash.value(); +} + TNode WeakCollectionsBuiltinsAssembler::AllocateTable( Variant variant, TNode at_least_space_for) { // See HashTable::New(). @@ -2715,18 +2586,17 @@ TF_BUILTIN(WeakMapLookupHashIndex, WeakCollectionsBuiltinsAssembler) { auto table = Parameter(Descriptor::kTable); auto key = Parameter(Descriptor::kKey); - Label if_cannot_be_weak_key(this); + Label if_cannot_be_held_weakly(this); - GotoIfCannotBeWeakKey(key, &if_cannot_be_weak_key); + GotoIfCannotBeHeldWeakly(key, &if_cannot_be_held_weakly); - TNode hash = - LoadJSReceiverIdentityHash(CAST(key), &if_cannot_be_weak_key); + TNode hash = GetHash(CAST(key), &if_cannot_be_held_weakly); TNode capacity = LoadTableCapacity(table); TNode key_index = FindKeyIndexForKey( - table, key, hash, EntryMask(capacity), &if_cannot_be_weak_key); + table, key, hash, EntryMask(capacity), &if_cannot_be_held_weakly); Return(SmiTag(ValueIndexFromKeyIndex(key_index))); - BIND(&if_cannot_be_weak_key); + BIND(&if_cannot_be_held_weakly); Return(SmiConstant(-1)); } @@ -2781,23 +2651,22 @@ TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) { auto collection = Parameter(Descriptor::kCollection); auto key = Parameter(Descriptor::kKey); - Label call_runtime(this), if_cannot_be_weak_key(this); + Label call_runtime(this), if_cannot_be_held_weakly(this); - GotoIfCannotBeWeakKey(key, &if_cannot_be_weak_key); + GotoIfCannotBeHeldWeakly(key, &if_cannot_be_held_weakly); - TNode hash = - LoadJSReceiverIdentityHash(CAST(key), &if_cannot_be_weak_key); + TNode hash = GetHash(CAST(key), &if_cannot_be_held_weakly); TNode table = LoadTable(collection); TNode capacity = LoadTableCapacity(table); TNode key_index = FindKeyIndexForKey( - table, key, hash, EntryMask(capacity), &if_cannot_be_weak_key); + table, key, hash, EntryMask(capacity), &if_cannot_be_held_weakly); TNode number_of_elements = LoadNumberOfElements(table, -1); GotoIf(ShouldShrink(capacity, number_of_elements), &call_runtime); RemoveEntry(table, key_index, number_of_elements); Return(TrueConstant()); - BIND(&if_cannot_be_weak_key); + BIND(&if_cannot_be_held_weakly); Return(FalseConstant()); BIND(&call_runtime); @@ -2810,10 +2679,10 @@ TF_BUILTIN(WeakCollectionDelete, WeakCollectionsBuiltinsAssembler) { TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) { auto context = Parameter(Descriptor::kContext); auto collection = Parameter(Descriptor::kCollection); - auto key = Parameter(Descriptor::kKey); + auto key = Parameter(Descriptor::kKey); auto value = Parameter(Descriptor::kValue); - CSA_DCHECK(this, IsJSReceiver(key)); + CSA_DCHECK(this, Word32Or(IsJSReceiver(key), IsSymbol(key))); Label call_runtime(this), if_no_hash(this), if_not_found(this); @@ -2821,7 +2690,7 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) { TNode capacity = LoadTableCapacity(table); TNode entry_mask = EntryMask(capacity); - TVARIABLE(IntPtrT, var_hash, LoadJSReceiverIdentityHash(key, &if_no_hash)); + TVARIABLE(IntPtrT, var_hash, GetHash(key, &if_no_hash)); TNode key_index = FindKeyIndexForKey(table, key, var_hash.value(), entry_mask, &if_not_found); @@ -2830,6 +2699,7 @@ TF_BUILTIN(WeakCollectionSet, WeakCollectionsBuiltinsAssembler) { BIND(&if_no_hash); { + CSA_DCHECK(this, IsJSReceiver(key)); var_hash = SmiUntag(CreateIdentityHash(key)); Goto(&if_not_found); } @@ -2881,7 +2751,7 @@ TF_BUILTIN(WeakMapPrototypeSet, WeakCollectionsBuiltinsAssembler) { "WeakMap.prototype.set"); Label throw_invalid_key(this); - GotoIfCannotBeWeakKey(key, &throw_invalid_key); + GotoIfCannotBeHeldWeakly(key, &throw_invalid_key); Return( CallBuiltin(Builtin::kWeakCollectionSet, context, receiver, key, value)); @@ -2899,7 +2769,7 @@ TF_BUILTIN(WeakSetPrototypeAdd, WeakCollectionsBuiltinsAssembler) { "WeakSet.prototype.add"); Label throw_invalid_value(this); - GotoIfCannotBeWeakKey(value, &throw_invalid_value); + GotoIfCannotBeHeldWeakly(value, &throw_invalid_value); Return(CallBuiltin(Builtin::kWeakCollectionSet, context, receiver, value, TrueConstant())); diff --git a/deps/v8/src/builtins/builtins-collections-gen.h b/deps/v8/src/builtins/builtins-collections-gen.h index a132557e3cd0a4..6dd2381ddd3743 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.h +++ b/deps/v8/src/builtins/builtins-collections-gen.h @@ -20,6 +20,191 @@ void BranchIfIterableWithOriginalValueSetIterator( TNode context, compiler::CodeAssemblerLabel* if_true, compiler::CodeAssemblerLabel* if_false); +class BaseCollectionsAssembler : public CodeStubAssembler { + public: + explicit BaseCollectionsAssembler(compiler::CodeAssemblerState* state) + : CodeStubAssembler(state) {} + + virtual ~BaseCollectionsAssembler() = default; + + void GotoIfCannotBeHeldWeakly(const TNode obj, + Label* if_cannot_be_held_weakly); + + protected: + enum Variant { kMap, kSet, kWeakMap, kWeakSet }; + + // Adds an entry to a collection. For Maps, properly handles extracting the + // key and value from the entry (see LoadKeyValue()). + void AddConstructorEntry(Variant variant, TNode context, + TNode collection, TNode add_function, + TNode key_value, + Label* if_may_have_side_effects = nullptr, + Label* if_exception = nullptr, + TVariable* var_exception = nullptr); + + // Adds constructor entries to a collection. Choosing a fast path when + // possible. + void AddConstructorEntries(Variant variant, TNode context, + TNode native_context, + TNode collection, + TNode initial_entries); + + // Fast path for adding constructor entries. Assumes the entries are a fast + // JS array (see CodeStubAssembler::BranchIfFastJSArray()). + void AddConstructorEntriesFromFastJSArray(Variant variant, + TNode context, + TNode native_context, + TNode collection, + TNode fast_jsarray, + Label* if_may_have_side_effects); + + // Adds constructor entries to a collection using the iterator protocol. + void AddConstructorEntriesFromIterable( + Variant variant, TNode context, TNode native_context, + TNode collection, TNode iterable, Label* if_exception, + TVariable* var_iterator, TVariable* var_exception); + + // Constructs a collection instance. Choosing a fast path when possible. + TNode AllocateJSCollection(TNode context, + TNode constructor, + TNode new_target); + + // Fast path for constructing a collection instance if the constructor + // function has not been modified. + TNode AllocateJSCollectionFast(TNode constructor); + + // Fallback for constructing a collection instance if the constructor function + // has been modified. + TNode AllocateJSCollectionSlow(TNode context, + TNode constructor, + TNode new_target); + + // Allocates the backing store for a collection. + virtual TNode AllocateTable( + Variant variant, TNode at_least_space_for) = 0; + + // Main entry point for a collection constructor builtin. + void GenerateConstructor(Variant variant, + Handle constructor_function_name, + TNode new_target, TNode argc, + TNode context); + + // Retrieves the collection function that adds an entry. `set` for Maps and + // `add` for Sets. + TNode GetAddFunction(Variant variant, TNode context, + TNode collection); + + // Retrieves the collection constructor function. + TNode GetConstructor(Variant variant, + TNode native_context); + + // Retrieves the initial collection function that adds an entry. Should only + // be called when it is certain that a collection prototype's map hasn't been + // changed. + TNode GetInitialAddFunction(Variant variant, + TNode native_context); + + // Checks whether {collection}'s initial add/set function has been modified + // (depending on {variant}, loaded from {native_context}). + void GotoIfInitialAddFunctionModified(Variant variant, + TNode native_context, + TNode collection, + Label* if_modified); + + // Gets root index for the name of the add/set function. + RootIndex GetAddFunctionNameIndex(Variant variant); + + // Retrieves the offset to access the backing table from the collection. + int GetTableOffset(Variant variant); + + // Estimates the number of entries the collection will have after adding the + // entries passed in the constructor. AllocateTable() can use this to avoid + // the time of growing/rehashing when adding the constructor entries. + TNode EstimatedInitialSize(TNode initial_entries, + TNode is_fast_jsarray); + + // Determines whether the collection's prototype has been modified. + TNode HasInitialCollectionPrototype(Variant variant, + TNode native_context, + TNode collection); + + // Gets the initial prototype map for given collection {variant}. + TNode GetInitialCollectionPrototype(Variant variant, + TNode native_context); + + // Loads an element from a fixed array. If the element is the hole, returns + // `undefined`. + TNode LoadAndNormalizeFixedArrayElement(TNode elements, + TNode index); + + // Loads an element from a fixed double array. If the element is the hole, + // returns `undefined`. + TNode LoadAndNormalizeFixedDoubleArrayElement( + TNode elements, TNode index); +}; + +class WeakCollectionsBuiltinsAssembler : public BaseCollectionsAssembler { + public: + explicit WeakCollectionsBuiltinsAssembler(compiler::CodeAssemblerState* state) + : BaseCollectionsAssembler(state) {} + + protected: + void AddEntry(TNode table, TNode key_index, + TNode key, TNode value, + TNode number_of_elements); + + TNode AllocateTable(Variant variant, + TNode at_least_space_for) override; + + TNode GetHash(const TNode key, Label* if_no_hash); + // Generates and sets the identity for a JSRececiver. + TNode CreateIdentityHash(TNode receiver); + TNode EntryMask(TNode capacity); + + // Builds code that finds the EphemeronHashTable entry for a {key} using the + // comparison code generated by {key_compare}. The key index is returned if + // the {key} is found. + using KeyComparator = + std::function entry_key, Label* if_same)>; + TNode FindKeyIndex(TNode table, TNode key_hash, + TNode entry_mask, + const KeyComparator& key_compare); + + // Builds code that finds an EphemeronHashTable entry available for a new + // entry. + TNode FindKeyIndexForInsertion(TNode table, + TNode key_hash, + TNode entry_mask); + + // Builds code that finds the EphemeronHashTable entry with key that matches + // {key} and returns the entry's key index. If {key} cannot be found, jumps to + // {if_not_found}. + TNode FindKeyIndexForKey(TNode table, TNode key, + TNode hash, + TNode entry_mask, + Label* if_not_found); + + TNode InsufficientCapacityToAdd(TNode capacity, + TNode number_of_elements, + TNode number_of_deleted); + TNode KeyIndexFromEntry(TNode entry); + + TNode LoadNumberOfElements(TNode table, + int offset); + TNode LoadNumberOfDeleted(TNode table, + int offset = 0); + TNode LoadTable(TNode collection); + TNode LoadTableCapacity(TNode table); + + void RemoveEntry(TNode table, TNode key_index, + TNode number_of_elements); + TNode ShouldRehash(TNode number_of_elements, + TNode number_of_deleted); + TNode ShouldShrink(TNode capacity, + TNode number_of_elements); + TNode ValueIndexFromKeyIndex(TNode key_index); +}; + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index bbc12a5b650a79..cb2b79bef7ce72 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -596,13 +596,16 @@ TNode ConstructorBuiltinsAssembler::CreateShallowObjectLiteral( static_assert(JSObject::kMaxInstanceSize < kMaxRegularHeapObjectSize); TNode instance_size = TimesTaggedSize(LoadMapInstanceSizeInWords(boilerplate_map)); + TNode aligned_instance_size = + AlignToAllocationAlignment(instance_size); TNode allocation_size = instance_size; bool needs_allocation_memento = v8_flags.allocation_site_pretenuring; if (needs_allocation_memento) { DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL); // Prepare for inner-allocating the AllocationMemento. - allocation_size = - IntPtrAdd(instance_size, IntPtrConstant(AllocationMemento::kSize)); + allocation_size = IntPtrAdd(aligned_instance_size, + IntPtrConstant(ALIGN_TO_ALLOCATION_ALIGNMENT( + AllocationMemento::kSize))); } TNode copy = @@ -620,7 +623,7 @@ TNode ConstructorBuiltinsAssembler::CreateShallowObjectLiteral( // Initialize the AllocationMemento before potential GCs due to heap number // allocation when copying the in-object properties. if (needs_allocation_memento) { - InitializeAllocationMemento(copy, instance_size, allocation_site); + InitializeAllocationMemento(copy, aligned_instance_size, allocation_site); } { diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc index 6bdc561361a06a..8f3f789e0c32d4 100644 --- a/deps/v8/src/builtins/builtins-dataview.cc +++ b/deps/v8/src/builtins/builtins-dataview.cc @@ -75,7 +75,7 @@ BUILTIN(DataViewConstructor) { bool length_tracking = false; if (byte_length->IsUndefined(isolate)) { view_byte_length = buffer_byte_length - view_byte_offset; - length_tracking = array_buffer->is_resizable(); + length_tracking = array_buffer->is_resizable_by_js(); } else { // 11. Else, // a. Set byteLengthChecked be ? ToIndex(byteLength). @@ -113,7 +113,7 @@ BUILTIN(DataViewConstructor) { raw.SetEmbedderField(i, Smi::zero()); } raw.set_bit_field(0); - raw.set_is_backed_by_rab(array_buffer->is_resizable() && + raw.set_is_backed_by_rab(array_buffer->is_resizable_by_js() && !array_buffer->is_shared()); raw.set_is_length_tracking(length_tracking); raw.set_byte_length(0); diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index 175acbd49588ed..c656b02e75566c 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -198,6 +198,7 @@ namespace internal { \ /* Maglev Compiler */ \ ASM(MaglevOnStackReplacement, OnStackReplacement) \ + ASM(MaglevOutOfLinePrologue, NoContext) \ \ /* Code life-cycle */ \ TFC(CompileLazy, JSTrampoline) \ @@ -619,6 +620,8 @@ namespace internal { /* JSON */ \ CPP(JsonParse) \ CPP(JsonStringify) \ + CPP(JsonRawJson) \ + CPP(JsonIsRawJson) \ \ /* Web snapshots */ \ CPP(WebSnapshotSerialize) \ @@ -1021,7 +1024,7 @@ namespace internal { \ TFS(AsyncGeneratorResolve, kGenerator, kValue, kDone) \ TFS(AsyncGeneratorReject, kGenerator, kValue) \ - TFS(AsyncGeneratorYield, kGenerator, kValue, kIsCaught) \ + TFS(AsyncGeneratorYieldWithAwait, kGenerator, kValue, kIsCaught) \ TFS(AsyncGeneratorReturn, kGenerator, kValue, kIsCaught) \ TFS(AsyncGeneratorResumeNext, kGenerator) \ \ @@ -1046,8 +1049,8 @@ namespace internal { kValue) \ TFJ(AsyncGeneratorAwaitRejectClosure, kJSArgcReceiverSlots + 1, kReceiver, \ kValue) \ - TFJ(AsyncGeneratorYieldResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \ - kValue) \ + TFJ(AsyncGeneratorYieldWithAwaitResolveClosure, kJSArgcReceiverSlots + 1, \ + kReceiver, kValue) \ TFJ(AsyncGeneratorReturnClosedResolveClosure, kJSArgcReceiverSlots + 1, \ kReceiver, kValue) \ TFJ(AsyncGeneratorReturnClosedRejectClosure, kJSArgcReceiverSlots + 1, \ @@ -1094,7 +1097,8 @@ namespace internal { TFS(CreateDataProperty, kReceiver, kKey, kValue) \ ASM(MemCopyUint8Uint8, CCall) \ ASM(MemMove, CCall) \ - TFC(FindNonDefaultConstructor, FindNonDefaultConstructor) \ + TFC(FindNonDefaultConstructorOrConstruct, \ + FindNonDefaultConstructorOrConstruct) \ \ /* Trace */ \ CPP(IsTraceCategoryEnabled) \ @@ -1750,6 +1754,16 @@ namespace internal { CPP(DisplayNamesPrototypeResolvedOptions) \ /* ecma402 #sec-Intl.DisplayNames.supportedLocalesOf */ \ CPP(DisplayNamesSupportedLocalesOf) \ + /* ecma402 #sec-intl-durationformat-constructor */ \ + CPP(DurationFormatConstructor) \ + /* ecma402 #sec-Intl.DurationFormat.prototype.format */ \ + CPP(DurationFormatPrototypeFormat) \ + /* ecma402 #sec-Intl.DurationFormat.prototype.formatToParts */ \ + CPP(DurationFormatPrototypeFormatToParts) \ + /* ecma402 #sec-Intl.DurationFormat.prototype.resolvedOptions */ \ + CPP(DurationFormatPrototypeResolvedOptions) \ + /* ecma402 #sec-Intl.DurationFormat.supportedLocalesOf */ \ + CPP(DurationFormatSupportedLocalesOf) \ /* ecma402 #sec-intl.getcanonicallocales */ \ CPP(IntlGetCanonicalLocales) \ /* ecma402 #sec-intl.supportedvaluesof */ \ diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index eb7790d3fc4d1e..907b41e8da641d 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -1305,10 +1305,17 @@ void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { // architectures. #ifndef V8_TARGET_ARCH_X64 void Builtins::Generate_MaglevOnStackReplacement(MacroAssembler* masm) { - using D = OnStackReplacementDescriptor; + using D = + i::CallInterfaceDescriptorFor::type; static_assert(D::kParameterCount == 1); masm->Trap(); } +void Builtins::Generate_MaglevOutOfLinePrologue(MacroAssembler* masm) { + using D = + i::CallInterfaceDescriptorFor::type; + static_assert(D::kParameterCount == 0); + masm->Trap(); +} #endif // V8_TARGET_ARCH_X64 // ES6 [[Get]] operation. @@ -1502,7 +1509,7 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) { TailCallJSCode(code, context, function, new_target, arg_count); } -TF_BUILTIN(FindNonDefaultConstructor, CodeStubAssembler) { +TF_BUILTIN(FindNonDefaultConstructorOrConstruct, CodeStubAssembler) { auto this_function = Parameter(Descriptor::kThisFunction); auto new_target = Parameter(Descriptor::kNewTarget); auto context = Parameter(Descriptor::kContext); @@ -1511,8 +1518,9 @@ TF_BUILTIN(FindNonDefaultConstructor, CodeStubAssembler) { Label found_default_base_ctor(this, &constructor), found_something_else(this, &constructor); - FindNonDefaultConstructor(context, this_function, constructor, - &found_default_base_ctor, &found_something_else); + FindNonDefaultConstructorOrConstruct(context, this_function, constructor, + &found_default_base_ctor, + &found_something_else); BIND(&found_default_base_ctor); { diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc index 452e55120742c9..0410c3ef91fe38 100644 --- a/deps/v8/src/builtins/builtins-intl.cc +++ b/deps/v8/src/builtins/builtins-intl.cc @@ -21,6 +21,7 @@ #include "src/objects/js-collator-inl.h" #include "src/objects/js-date-time-format-inl.h" #include "src/objects/js-display-names-inl.h" +#include "src/objects/js-duration-format-inl.h" #include "src/objects/js-list-format-inl.h" #include "src/objects/js-locale-inl.h" #include "src/objects/js-number-format-inl.h" @@ -383,6 +384,51 @@ BUILTIN(DisplayNamesPrototypeOf) { JSDisplayNames::Of(isolate, holder, code_obj)); } +// Intl.DurationFormat +BUILTIN(DurationFormatConstructor) { + HandleScope scope(isolate); + + return DisallowCallConstructor( + args, isolate, v8::Isolate::UseCounterFeature::kDurationFormat, + "Intl.DurationFormat"); +} + +BUILTIN(DurationFormatPrototypeResolvedOptions) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSDurationFormat, holder, + "Intl.DurationFormat.prototype.resolvedOptions"); + return *JSDurationFormat::ResolvedOptions(isolate, holder); +} + +BUILTIN(DurationFormatSupportedLocalesOf) { + HandleScope scope(isolate); + Handle locales = args.atOrUndefined(isolate, 1); + Handle options = args.atOrUndefined(isolate, 2); + + RETURN_RESULT_OR_FAILURE( + isolate, Intl::SupportedLocalesOf( + isolate, "Intl.DurationFormat.supportedLocalesOf", + JSDurationFormat::GetAvailableLocales(), locales, options)); +} + +BUILTIN(DurationFormatPrototypeFormat) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSDurationFormat, holder, + "Intl.DurationFormat.prototype.format"); + Handle value = args.atOrUndefined(isolate, 1); + RETURN_RESULT_OR_FAILURE(isolate, + JSDurationFormat::Format(isolate, holder, value)); +} + +BUILTIN(DurationFormatPrototypeFormatToParts) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSDurationFormat, holder, + "Intl.DurationFormat.prototype.formatToParts"); + Handle value = args.atOrUndefined(isolate, 1); + RETURN_RESULT_OR_FAILURE( + isolate, JSDurationFormat::FormatToParts(isolate, holder, value)); +} + // Intl.NumberFormat BUILTIN(NumberFormatConstructor) { diff --git a/deps/v8/src/builtins/builtins-json.cc b/deps/v8/src/builtins/builtins-json.cc index 896a45389c4abd..5ac1cd2bfc57e8 100644 --- a/deps/v8/src/builtins/builtins-json.cc +++ b/deps/v8/src/builtins/builtins-json.cc @@ -7,6 +7,7 @@ #include "src/json/json-parser.h" #include "src/json/json-stringifier.h" #include "src/logging/counters.h" +#include "src/objects/js-raw-json.h" #include "src/objects/objects-inl.h" namespace v8 { @@ -37,5 +38,19 @@ BUILTIN(JsonStringify) { JsonStringify(isolate, object, replacer, indent)); } +// https://tc39.es/proposal-json-parse-with-source/#sec-json.rawjson +BUILTIN(JsonRawJson) { + HandleScope scope(isolate); + Handle text = args.atOrUndefined(isolate, 1); + RETURN_RESULT_OR_FAILURE(isolate, JSRawJson::Create(isolate, text)); +} + +// https://tc39.es/proposal-json-parse-with-source/#sec-json.israwjson +BUILTIN(JsonIsRawJson) { + HandleScope scope(isolate); + Handle text = args.atOrUndefined(isolate, 1); + return isolate->heap()->ToBoolean(text->IsJSRawJson()); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-object.cc b/deps/v8/src/builtins/builtins-object.cc index d636801ad6b576..e6d26ef7c75f33 100644 --- a/deps/v8/src/builtins/builtins-object.cc +++ b/deps/v8/src/builtins/builtins-object.cc @@ -4,10 +4,9 @@ #include "src/builtins/builtins-utils-inl.h" #include "src/builtins/builtins.h" -#include "src/codegen/code-factory.h" #include "src/common/message-template.h" +#include "src/execution/isolate.h" #include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop. -#include "src/logging/counters.h" #include "src/objects/keys.h" #include "src/objects/lookup.h" #include "src/objects/objects-inl.h" @@ -150,6 +149,10 @@ Object ObjectLookupAccessor(Isolate* isolate, Handle object, return ObjectLookupAccessor(isolate, prototype, key, component); } + case LookupIterator::WASM_OBJECT: + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kWasmObjectsAreOpaque)); + case LookupIterator::INTEGER_INDEXED_EXOTIC: case LookupIterator::DATA: return ReadOnlyRoots(isolate).undefined_value(); diff --git a/deps/v8/src/builtins/builtins-regexp-gen.h b/deps/v8/src/builtins/builtins-regexp-gen.h index ef606463143a6d..f89449356116f4 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.h +++ b/deps/v8/src/builtins/builtins-regexp-gen.h @@ -138,6 +138,9 @@ class RegExpBuiltinsAssembler : public CodeStubAssembler { TNode FastFlagGetterUnicode(TNode regexp) { return FastFlagGetter(regexp, JSRegExp::kUnicode); } + TNode FastFlagGetterUnicodeSets(TNode regexp) { + return FastFlagGetter(regexp, JSRegExp::kUnicodeSets); + } TNode SlowFlagGetter(TNode context, TNode regexp, JSRegExp::Flag flag); TNode FlagGetter(TNode context, TNode regexp, diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc index 805837f722b46d..4d2f0b541b7b50 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.cc +++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc @@ -64,8 +64,8 @@ TNode TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBitFieldOffset, Int32Constant(bitfield_value)); - StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset, - UintPtrConstant(0)); + StoreBoundedSizeToObject(buffer, JSArrayBuffer::kRawByteLengthOffset, + UintPtrConstant(0)); StoreSandboxedPointerToObject(buffer, JSArrayBuffer::kBackingStoreOffset, EmptyBackingStoreBufferConstant()); StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset, @@ -141,7 +141,7 @@ TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) { // Default to zero if the {receiver}s buffer was detached. TNode byte_length = Select( IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); }, - [=] { return LoadJSArrayBufferViewRawByteLength(receiver_array); }); + [=] { return LoadJSArrayBufferViewByteLength(receiver_array); }); Return(ChangeUintPtrToTagged(byte_length)); } } diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc index aee330b4bd4f3c..a944159f247bec 100644 --- a/deps/v8/src/builtins/builtins-weak-refs.cc +++ b/deps/v8/src/builtins/builtins-weak-refs.cc @@ -9,6 +9,7 @@ namespace v8 { namespace internal { +// https://tc39.es/proposal-symbols-as-weakmap-keys/#sec-finalization-registry.prototype.unregister BUILTIN(FinalizationRegistryUnregister) { HandleScope scope(isolate); const char* method_name = "FinalizationRegistry.prototype.unregister"; @@ -24,16 +25,16 @@ BUILTIN(FinalizationRegistryUnregister) { Handle unregister_token = args.atOrUndefined(isolate, 1); - // 4. If Type(unregisterToken) is not Object, throw a TypeError exception. - if (!unregister_token->IsJSReceiver()) { + // 4. If CanBeHeldWeakly(unregisterToken) is false, throw a TypeError + // exception. + if (!unregister_token->CanBeHeldWeakly()) { THROW_NEW_ERROR_RETURN_FAILURE( - isolate, - NewTypeError(MessageTemplate::kWeakRefsUnregisterTokenMustBeObject, - unregister_token)); + isolate, NewTypeError(MessageTemplate::kInvalidWeakRefsUnregisterToken, + unregister_token)); } bool success = JSFinalizationRegistry::Unregister( - finalization_registry, Handle::cast(unregister_token), + finalization_registry, Handle::cast(unregister_token), isolate); return *isolate->factory()->ToBoolean(success); diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq index 5cb6e0bc92e0ed..0d347e3dd35cdc 100644 --- a/deps/v8/src/builtins/cast.tq +++ b/deps/v8/src/builtins/cast.tq @@ -697,6 +697,21 @@ Cast(o: HeapObject): JSReceiver|Null } } +Cast(implicit context: Context)(o: Object): JSReceiver|Symbol + labels CastError { + typeswitch (o) { + case (o: JSReceiver): { + return o; + } + case (o: Symbol): { + return o; + } + case (Object): { + goto CastError; + } + } +} + Cast(o: Object): Smi|PromiseReaction labels CastError { typeswitch (o) { case (o: Smi): { diff --git a/deps/v8/src/builtins/finalization-registry.tq b/deps/v8/src/builtins/finalization-registry.tq index 38cae7ed20b9ff..4e4b4be068669f 100644 --- a/deps/v8/src/builtins/finalization-registry.tq +++ b/deps/v8/src/builtins/finalization-registry.tq @@ -1,6 +1,7 @@ // Copyright 2020 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/builtins/builtins-collections-gen.h" namespace runtime { extern runtime @@ -15,6 +16,9 @@ extern transitioning macro RemoveFinalizationRegistryCellFromUnregisterTokenMap( JSFinalizationRegistry, WeakCell): void; +extern macro WeakCollectionsBuiltinsAssembler::GotoIfCannotBeHeldWeakly(JSAny): + void labels NotWeakKey; + macro SplitOffTail(weakCell: WeakCell): WeakCell|Undefined { const weakCellTail = weakCell.next; weakCell.next = Undefined; @@ -125,6 +129,7 @@ FinalizationRegistryConstructor( return finalizationRegistry; } +// https://tc39.es/proposal-symbols-as-weakmap-keys/#sec-finalization-registry.prototype.register transitioning javascript builtin FinalizationRegistryRegister( js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { @@ -134,33 +139,32 @@ FinalizationRegistryRegister( ThrowTypeError( MessageTemplate::kIncompatibleMethodReceiver, 'FinalizationRegistry.prototype.register', receiver); - // 3. If Type(target) is not Object, throw a TypeError exception. - const target = Cast(arguments[0]) otherwise ThrowTypeError( - MessageTemplate::kWeakRefsRegisterTargetMustBeObject); + // 3. If CanBeHeldWeakly(target) is false, throw a TypeError exception. + GotoIfCannotBeHeldWeakly(arguments[0]) + otherwise ThrowTypeError(MessageTemplate::kInvalidWeakRefsRegisterTarget); + + const target = UnsafeCast<(JSReceiver | Symbol)>(arguments[0]); const heldValue = arguments[1]; // 4. If SameValue(target, heldValue), throw a TypeError exception. if (target == heldValue) { ThrowTypeError( MessageTemplate::kWeakRefsRegisterTargetAndHoldingsMustNotBeSame); } - // 5. If Type(unregisterToken) is not Object, + // 5. If CanBeHeldWeakly(unregisterToken) is false, // a. If unregisterToken is not undefined, throw a TypeError exception. // b. Set unregisterToken to empty. const unregisterTokenRaw = arguments[2]; - let unregisterToken: JSReceiver|Undefined; - typeswitch (unregisterTokenRaw) { - case (Undefined): { - unregisterToken = Undefined; - } - case (unregisterTokenObj: JSReceiver): { - unregisterToken = unregisterTokenObj; - } - case (JSAny): deferred { - ThrowTypeError( - MessageTemplate::kWeakRefsUnregisterTokenMustBeObject, - unregisterTokenRaw); - } + let unregisterToken: JSReceiver|Undefined|Symbol; + + if (IsUndefined(unregisterTokenRaw)) { + unregisterToken = Undefined; + } else { + GotoIfCannotBeHeldWeakly(unregisterTokenRaw) + otherwise ThrowTypeError( + MessageTemplate::kInvalidWeakRefsUnregisterToken, unregisterTokenRaw); + unregisterToken = UnsafeCast<(JSReceiver | Symbol)>(unregisterTokenRaw); } + // 6. Let cell be the Record { [[WeakRefTarget]] : target, [[HeldValue]]: // heldValue, [[UnregisterToken]]: unregisterToken }. // Allocate the WeakCell object in the old space, because 1) WeakCell weakness diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index ecb1bd9136279e..5535c68c7edb4e 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -1111,7 +1111,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( { // Restore actual argument count. __ movd(eax, xmm0); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, xmm1); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, xmm1); } __ bind(&compile_lazy); @@ -1640,8 +1640,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // requires the stack to only contain valid frames. __ Drop(2); __ movd(arg_count, saved_arg_count); // Restore actual argument count. - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, - saved_feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, saved_feedback_vector); __ Trap(); } diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc index c147a80d1a3b06..157c45c4325269 100644 --- a/deps/v8/src/builtins/loong64/builtins-loong64.cc +++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc @@ -1022,7 +1022,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // Ensure the flags is not allocated again. // Drop the frame created by the baseline call. __ Pop(ra, fp); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ Trap(); } @@ -1275,7 +1275,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ jmp(&after_stack_check_interrupt); __ bind(&flags_need_processing); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ bind(&is_baseline); { @@ -2997,8 +2997,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ St_w(s2, MemOperand(s5, kLevelOffset)); Label profiler_enabled, done_api_call; - __ li(t7, ExternalReference::is_profiling_address(isolate)); - __ Ld_b(t7, MemOperand(t7, 0)); + __ Ld_b(t7, __ ExternalReferenceAsOperand( + ExternalReference::is_profiling_address(isolate), t7)); __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg)); #ifdef V8_RUNTIME_CALL_STATS __ li(t7, ExternalReference::address_of_runtime_stats_flag()); diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index f164f1d9b05ee1..598ef531a9e607 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -1021,7 +1021,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // Ensure the flags is not allocated again. // Drop the frame created by the baseline call. __ Pop(ra, fp); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ Trap(); } @@ -1270,7 +1270,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ jmp(&after_stack_check_interrupt); __ bind(&flags_need_processing); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ bind(&is_baseline); { // Load the feedback vector from the closure. @@ -3014,8 +3014,8 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, __ Sw(s2, MemOperand(s5, kLevelOffset)); Label profiler_enabled, done_api_call; - __ li(t9, ExternalReference::is_profiling_address(isolate)); - __ Lb(t9, MemOperand(t9, 0)); + __ Lb(t9, __ ExternalReferenceAsOperand( + ExternalReference::is_profiling_address(isolate), t9)); __ Branch(&profiler_enabled, ne, t9, Operand(zero_reg)); #ifdef V8_RUNTIME_CALL_STATS __ li(t9, ExternalReference::address_of_runtime_stats_flag()); diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index 4993dfccde45b6..8decc418fce299 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -433,7 +433,7 @@ void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source, ConstantPoolUnavailableScope constant_pool_unavailable(masm); __ addi(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start - if (v8_flags.enable_embedded_constant_pool) { + if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r3); } @@ -845,7 +845,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // r8: argv __ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used. __ push(r0); - if (v8_flags.enable_embedded_constant_pool) { + if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { __ li(kConstantPoolRegister, Operand::Zero()); __ push(kConstantPoolRegister); } @@ -1292,13 +1292,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); // Drop the frame created by the baseline call. - if (v8_flags.enable_embedded_constant_pool) { + if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { __ Pop(r0, fp, kConstantPoolRegister); } else { __ Pop(r0, fp); } __ mtlr(r0); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ Trap(); } @@ -1568,7 +1568,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ jmp(&after_stack_check_interrupt); __ bind(&flags_need_processing); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ bind(&is_baseline); { @@ -3055,7 +3055,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, ConstantPoolUnavailableScope constant_pool_unavailable(masm); __ Move(ip, pending_handler_entrypoint_address); __ LoadU64(ip, MemOperand(ip)); - if (v8_flags.enable_embedded_constant_pool) { + if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { __ Move(kConstantPoolRegister, pending_handler_constant_pool_address); __ LoadU64(kConstantPoolRegister, MemOperand(kConstantPoolRegister)); } diff --git a/deps/v8/src/builtins/promise-any.tq b/deps/v8/src/builtins/promise-any.tq index ffb285a06a8355..7e707e649f11bc 100644 --- a/deps/v8/src/builtins/promise-any.tq +++ b/deps/v8/src/builtins/promise-any.tq @@ -119,7 +119,19 @@ PromiseAnyRejectElementClosure( kPromiseAnyRejectElementRemainingSlot); // 9. Set errors[index] to x. - const newCapacity = IntPtrMax(SmiUntag(remainingElementsCount), index + 1); + + // The max computation below is an optimization to avoid excessive allocations + // in the case of input promises being asynchronously rejected in ascending + // index order. + // + // Note that subtracting 1 from remainingElementsCount is intentional. The + // value of remainingElementsCount is 1 larger than the actual value during + // iteration. So in the case of synchronous rejection, newCapacity is the + // correct size by subtracting 1. In the case of asynchronous rejection this + // is 1 smaller than the correct size, but is not incorrect as it is maxed + // with index + 1. + const newCapacity = + IntPtrMax(SmiUntag(remainingElementsCount) - 1, index + 1); if (newCapacity > errors.length_intptr) deferred { errors = ExtractFixedArray(errors, 0, errors.length_intptr, newCapacity); *ContextSlot( @@ -306,6 +318,7 @@ Reject(JSAny) { PromiseAnyRejectElementContextSlots:: kPromiseAnyRejectElementErrorsSlot); + check(errors.length == index - 1); const error = ConstructAggregateError(errors); // 3. Return ThrowCompletion(error). goto Reject(error); diff --git a/deps/v8/src/builtins/promise-resolve.tq b/deps/v8/src/builtins/promise-resolve.tq index 114b1e922b1f48..c5ad5eefd5c42b 100644 --- a/deps/v8/src/builtins/promise-resolve.tq +++ b/deps/v8/src/builtins/promise-resolve.tq @@ -161,6 +161,12 @@ ResolvePromise(implicit context: Context)( } goto Slow; } label Slow deferred { + // Skip "then" lookup for Wasm objects as they are opaque. + @if(V8_ENABLE_WEBASSEMBLY) + if (Is(resolution)) { + return FulfillPromise(promise, resolution); + } + // 9. Let then be Get(resolution, "then"). // 10. If then is an abrupt completion, then try { diff --git a/deps/v8/src/builtins/reflect.tq b/deps/v8/src/builtins/reflect.tq index 477c586403c022..c0591e7f6c92c2 100644 --- a/deps/v8/src/builtins/reflect.tq +++ b/deps/v8/src/builtins/reflect.tq @@ -38,6 +38,13 @@ transitioning javascript builtin ReflectSetPrototypeOf( const objectJSReceiver = Cast(object) otherwise ThrowTypeError( MessageTemplate::kCalledOnNonObject, 'Reflect.setPrototypeOf'); + + // Wasm objects do not support having prototypes. + @if(V8_ENABLE_WEBASSEMBLY) + if (Is(objectJSReceiver)) { + ThrowTypeError(MessageTemplate::kWasmObjectsAreOpaque); + } + typeswitch (proto) { case (proto: JSReceiver|Null): { return object::ObjectSetPrototypeOfDontThrow(objectJSReceiver, proto); diff --git a/deps/v8/src/builtins/regexp-match-all.tq b/deps/v8/src/builtins/regexp-match-all.tq index 1f9aa1819f497a..48f12218b94889 100644 --- a/deps/v8/src/builtins/regexp-match-all.tq +++ b/deps/v8/src/builtins/regexp-match-all.tq @@ -52,9 +52,10 @@ transitioning macro RegExpPrototypeMatchAllImpl(implicit context: Context)( // 10. Else, let global be false. global = FastFlagGetter(matcherRegExp, Flag::kGlobal); - // 11. If flags contains "u", let fullUnicode be true. + // 11. If flags contains "u" or "v", let fullUnicode be true. // 12. Else, let fullUnicode be false. - unicode = FastFlagGetter(matcherRegExp, Flag::kUnicode); + unicode = FastFlagGetter(matcherRegExp, Flag::kUnicode) || + FastFlagGetter(matcherRegExp, Flag::kUnicodeSets); } case (Object): { // 4. Let C be ? SpeciesConstructor(R, %RegExp%). @@ -81,12 +82,15 @@ transitioning macro RegExpPrototypeMatchAllImpl(implicit context: Context)( const globalIndex: Smi = StringIndexOf(flagsString, globalCharString, 0); global = globalIndex != -1; - // 11. If flags contains "u", let fullUnicode be true. + // 11. If flags contains "u" or "v", let fullUnicode be true. // 12. Else, let fullUnicode be false. const unicodeCharString = StringConstant('u'); + const unicodeSetsCharString = StringConstant('v'); const unicodeIndex: Smi = StringIndexOf(flagsString, unicodeCharString, 0); - unicode = unicodeIndex != -1; + const unicodeSetsIndex: Smi = + StringIndexOf(flagsString, unicodeSetsCharString, 0); + unicode = unicodeIndex != -1 || unicodeSetsIndex != -1; } } diff --git a/deps/v8/src/builtins/regexp-match.tq b/deps/v8/src/builtins/regexp-match.tq index 3da132636a9e86..ff2dcf2c33c0fc 100644 --- a/deps/v8/src/builtins/regexp-match.tq +++ b/deps/v8/src/builtins/regexp-match.tq @@ -33,7 +33,8 @@ transitioning macro RegExpPrototypeMatchBody(implicit context: Context)( } dcheck(isGlobal); - const isUnicode: bool = FlagGetter(regexp, Flag::kUnicode, isFastPath); + const isUnicode: bool = FlagGetter(regexp, Flag::kUnicode, isFastPath) || + FlagGetter(regexp, Flag::kUnicodeSets, isFastPath); StoreLastIndex(regexp, 0, isFastPath); diff --git a/deps/v8/src/builtins/regexp-replace.tq b/deps/v8/src/builtins/regexp-replace.tq index d26f8d6949cd16..ecd99af0320598 100644 --- a/deps/v8/src/builtins/regexp-replace.tq +++ b/deps/v8/src/builtins/regexp-replace.tq @@ -132,7 +132,7 @@ transitioning macro RegExpReplaceFastString(implicit context: Context)( const global: bool = fastRegexp.global; if (global) { - unicode = fastRegexp.unicode; + unicode = fastRegexp.unicode || fastRegexp.unicodeSets; fastRegexp.lastIndex = 0; } diff --git a/deps/v8/src/builtins/riscv/builtins-riscv.cc b/deps/v8/src/builtins/riscv/builtins-riscv.cc index 0571568ebb3b6c..7cba83d4f0c704 100644 --- a/deps/v8/src/builtins/riscv/builtins-riscv.cc +++ b/deps/v8/src/builtins/riscv/builtins-riscv.cc @@ -1078,7 +1078,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ASM_CODE_COMMENT_STRING(masm, "Optimized marker check"); // Drop the frame created by the baseline call. __ Pop(ra, fp); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ Trap(); } @@ -1319,7 +1319,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ Branch(&after_stack_check_interrupt); __ bind(&flags_need_processing); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ bind(&is_baseline); { // Load the feedback vector from the closure. diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index e5c5e9d0437ed1..b9bc2bab3384b1 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -1335,7 +1335,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // Drop the frame created by the baseline call. __ Pop(r14, fp); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ Trap(); } @@ -1599,7 +1599,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ jmp(&after_stack_check_interrupt); __ bind(&flags_need_processing); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector); __ bind(&is_baseline); { diff --git a/deps/v8/src/builtins/typed-array-slice.tq b/deps/v8/src/builtins/typed-array-slice.tq index a1dba47bacf36f..0ac50a3ec7563d 100644 --- a/deps/v8/src/builtins/typed-array-slice.tq +++ b/deps/v8/src/builtins/typed-array-slice.tq @@ -36,8 +36,20 @@ macro FastCopy( otherwise unreachable; const srcPtr: RawPtr = src.data_ptr + Convert(startOffset); - dcheck(countBytes <= dest.byte_length); - dcheck(countBytes <= src.byte_length - startOffset); + @if(DEBUG) { + const srcLength = + LoadJSTypedArrayLengthAndCheckDetached(src) otherwise unreachable; + const srcByteLength = GetTypedArrayElementsInfo(src).CalculateByteLength( + srcLength) otherwise unreachable; + + const destLength = + LoadJSTypedArrayLengthAndCheckDetached(dest) otherwise unreachable; + const destByteLength = GetTypedArrayElementsInfo(dest).CalculateByteLength( + destLength) otherwise unreachable; + + dcheck(countBytes <= destByteLength); + dcheck(countBytes <= srcByteLength - startOffset); + } if (IsSharedArrayBuffer(src.buffer)) { // SABs need a relaxed memmove to preserve atomicity. diff --git a/deps/v8/src/builtins/typed-array-sort.tq b/deps/v8/src/builtins/typed-array-sort.tq index 37760ccb5c15f8..5793bf92ba8768 100644 --- a/deps/v8/src/builtins/typed-array-sort.tq +++ b/deps/v8/src/builtins/typed-array-sort.tq @@ -79,32 +79,16 @@ TypedArrayMergeSort(implicit context: Context)( return Undefined; } -// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.sort -transitioning javascript builtin TypedArrayPrototypeSort( - js-implicit context: NativeContext, - receiver: JSAny)(...arguments): JSTypedArray { - // 1. If comparefn is not undefined and IsCallable(comparefn) is false, - // throw a TypeError exception. - const comparefnObj: JSAny = arguments[0]; - if (comparefnObj != Undefined && !Is(comparefnObj)) { - ThrowTypeError(MessageTemplate::kBadSortComparisonFunction, comparefnObj); - } - - // 2. Let obj be the this value. - const obj: JSAny = receiver; - - // 3. Let buffer be ? ValidateTypedArray(obj). - // 4. Let len be IntegerIndexedObjectLength(obj). - let len: uintptr = - ValidateTypedArrayAndGetLength(context, obj, kBuiltinNameSort); - const array: JSTypedArray = UnsafeCast(obj); - +// Shared between TypedArray.prototype.sort and TypedArray.prototype.toSorted. +transitioning macro TypedArraySortCommon(implicit context: Context)( + array: JSTypedArray, len: uintptr, comparefnArg: Undefined|Callable, + isSort: constexpr bool): JSTypedArray { // Arrays of length 1 or less are considered sorted. if (len < 2) return array; // Default sorting is done in C++ using std::sort - if (comparefnObj == Undefined) { - return TypedArraySortFast(context, obj); + if (comparefnArg == Undefined) { + return TypedArraySortFast(context, array); } // Throw rather than crash if the TypedArray's size exceeds max FixedArray @@ -116,7 +100,7 @@ transitioning javascript builtin TypedArrayPrototypeSort( } const comparefn: Callable = - Cast(comparefnObj) otherwise unreachable; + Cast(comparefnArg) otherwise unreachable; const accessor: TypedArrayAccessor = GetTypedArrayAccessor(array.elements_kind); @@ -134,25 +118,58 @@ transitioning javascript builtin TypedArrayPrototypeSort( TypedArrayMergeSort(work2, 0, len, work1, array, comparefn); - // Reload the length; it's possible the backing ArrayBuffer has been resized - // to be OOB or detached, in which case treat it as length 0. - - try { - const newLen = LoadJSTypedArrayLengthAndCheckDetached(array) - otherwise DetachedOrOutOfBounds; - if (newLen < len) { - len = newLen; + // If this is TypedArray.prototype.sort, reload the length; it's possible the + // backing ArrayBuffer has been resized to be OOB or detached, in which case + // treat it as length 0. + // + // This is not possible in TypedArray.prototype.toSorted as the array being + // sorted is a copy that has not yet escaped to user script. + + let writebackLen = len; + if constexpr (isSort) { + try { + const newLen = LoadJSTypedArrayLengthAndCheckDetached(array) + otherwise DetachedOrOutOfBounds; + if (newLen < writebackLen) { + writebackLen = newLen; + } + } label DetachedOrOutOfBounds { + writebackLen = 0; } - } label DetachedOrOutOfBounds { - len = 0; + } else { + dcheck( + writebackLen == + LoadJSTypedArrayLengthAndCheckDetached(array) otherwise unreachable); } // work1 contains the sorted numbers. Write them back. - for (let i: uintptr = 0; i < len; ++i) { + for (let i: uintptr = 0; i < writebackLen; ++i) { accessor.StoreNumeric( context, array, i, UnsafeCast(work1.objects[i])); } return array; } + +// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.sort +transitioning javascript builtin TypedArrayPrototypeSort( + js-implicit context: NativeContext, + receiver: JSAny)(...arguments): JSTypedArray { + // 1. If comparefn is not undefined and IsCallable(comparefn) is false, + // throw a TypeError exception. + const comparefnObj: JSAny = arguments[0]; + const comparefn = Cast<(Undefined | Callable)>(comparefnObj) otherwise + ThrowTypeError(MessageTemplate::kBadSortComparisonFunction, comparefnObj); + + // 2. Let obj be the this value. + const obj: JSAny = receiver; + + // 3. Let buffer be ? ValidateTypedArray(obj). + // 4. Let len be IntegerIndexedObjectLength(obj). + const len: uintptr = + ValidateTypedArrayAndGetLength(context, obj, kBuiltinNameSort); + const array: JSTypedArray = UnsafeCast(obj); + const kIsSort: constexpr bool = true; + return TypedArraySortCommon(array, len, comparefn, kIsSort); +} } diff --git a/deps/v8/src/builtins/typed-array-to-sorted.tq b/deps/v8/src/builtins/typed-array-to-sorted.tq new file mode 100644 index 00000000000000..c73821a9fc5b04 --- /dev/null +++ b/deps/v8/src/builtins/typed-array-to-sorted.tq @@ -0,0 +1,59 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace typed_array { +// https://tc39.es/proposal-change-array-by-copy/#sec-%typedarray%.prototype.toSorted + +const kBuiltinNameToSorted: constexpr string = + '%TypedArray%.prototype.toSorted'; + +transitioning javascript builtin TypedArrayPrototypeToSorted( + js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { + // 1. If comparefn is not undefined and IsCallable(comparefn) is false, + // throw a TypeError exception. + const comparefnObj: JSAny = arguments[0]; + const comparefn = Cast<(Undefined | Callable)>(comparefnObj) otherwise + ThrowTypeError(MessageTemplate::kBadSortComparisonFunction, comparefnObj); + + // 2. Let O be the this value. + const obj: JSAny = receiver; + + // 3. Perform ? ValidateTypedArray(O). + // 4. Let buffer be obj.[[ViewedArrayBuffer]]. + // 5. Let len be O.[[ArrayLength]]. + const len: uintptr = + ValidateTypedArrayAndGetLength(context, obj, kBuiltinNameToSorted); + const array: JSTypedArray = UnsafeCast(obj); + + // 6. Let A be ? TypedArrayCreateSameType(O, « 𝔽(len) »). + const copy = TypedArrayCreateSameType(array, len); + + // 7. NOTE: The following closure performs a numeric comparison rather than + // the string comparison used in 1.1.1.5. + // 8. Let SortCompare be a new Abstract Closure with parameters (x, y) that + // captures comparefn and buffer and performs the following steps when + // called: + // a. Return ? CompareTypedArrayElements(x, y, comparefn, buffer). + // 9. Let sortedList be ? SortIndexedProperties(obj, len, SortCompare, false). + // 10. Let j be 0. + // 11. Repeat, while j < len, + // a. Perform ! Set(A, ! ToString(𝔽(j)), sortedList[j], true). + // b. Set j to j + 1. + // 12. Return A. + + // Perform the sorting by copying the source TypedArray and sorting the copy + // in-place using the same code that as TypedArray.prototype.sort + const info = GetTypedArrayElementsInfo(copy); + const countBytes: uintptr = + info.CalculateByteLength(len) otherwise unreachable; + if (IsSharedArrayBuffer(array.buffer)) { + CallCRelaxedMemmove(copy.data_ptr, array.data_ptr, countBytes); + } else { + CallCMemmove(copy.data_ptr, array.data_ptr, countBytes); + } + + const kIsSort: constexpr bool = false; + return TypedArraySortCommon(copy, len, comparefn, kIsSort); +} +} diff --git a/deps/v8/src/builtins/typed-array-with.tq b/deps/v8/src/builtins/typed-array-with.tq index 25b58a37188c7c..c14de29364defc 100644 --- a/deps/v8/src/builtins/typed-array-with.tq +++ b/deps/v8/src/builtins/typed-array-with.tq @@ -7,10 +7,8 @@ const kBuiltinNameWith: constexpr string = '%TypedArray%.prototype.with'; // https://tc39.es/proposal-change-array-by-copy/#sec-%typedarray%.prototype.with transitioning javascript builtin TypedArrayPrototypeWith( - js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { - const index = arguments[0]; - let value: JSAny = arguments[1]; - + js-implicit context: NativeContext, receiver: JSAny)( + index: JSAny, valueArg: JSAny): JSAny { try { // 1. Let O be the this value. // 2. Perform ? ValidateTypedArray(O). @@ -21,12 +19,13 @@ transitioning javascript builtin TypedArrayPrototypeWith( otherwise IsDetachedOrOutOfBounds; const originalLength = attachedArrayAndLength.length; + let value: JSAny; if (IsBigInt64ElementsKind(array.elements_kind)) { // 4. If O.[[ContentType]] is BigInt, set value to ? ToBigInt(value). - value = ToBigInt(context, value); + value = ToBigInt(context, valueArg); } else { // 5. Else, set value to ? ToNumber(value). - value = ToNumber_Inline(value); + value = ToNumber_Inline(valueArg); } // 6. Let relativeIndex be ? ToIntegerOrInfinity(index). diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq index dbf80befe2d7dd..03e117025ce3a5 100644 --- a/deps/v8/src/builtins/wasm.tq +++ b/deps/v8/src/builtins/wasm.tq @@ -746,14 +746,17 @@ macro IsWord16WasmArrayMap(map: Map): bool { } // Non-standard experimental feature. +// Arguments: array, start, count. transitioning javascript builtin ExperimentalWasmConvertArrayToString( - js-implicit context: NativeContext)( - array: JSAny, start: JSAny, count: JSAny): String { + js-implicit context: NativeContext)(...arguments): String { try { - const start = TryNumberToIntptr(start) otherwise goto InvalidArgument; - const count = TryNumberToIntptr(count) otherwise goto InvalidArgument; + if (arguments.length != 3) goto InvalidArgument; + const array = Cast(arguments[0]) otherwise goto InvalidArgument; + const start = TryNumberToIntptr(arguments[1]) + otherwise goto InvalidArgument; + const count = TryNumberToIntptr(arguments[2]) + otherwise goto InvalidArgument; - const array = Cast(array) otherwise goto InvalidArgument; if (!IsWord16WasmArrayMap(array.map)) goto InvalidArgument; const arrayContent = torque_internal::unsafe::NewConstSlice( array, kWasmArrayHeaderSize, Convert(array.length)); @@ -768,16 +771,17 @@ transitioning javascript builtin ExperimentalWasmConvertArrayToString( } // Non-standard experimental feature. +// Arguments: string, sampleArray. transitioning javascript builtin ExperimentalWasmConvertStringToArray( - js-implicit context: NativeContext)( - string: JSAny, sampleArray: JSAny): WasmArray { + js-implicit context: NativeContext)(...arguments): WasmArray { try { + if (arguments.length != 2) goto InvalidArgument; + const string = Cast(arguments[0]) otherwise goto InvalidArgument; const sampleArray = - Cast(sampleArray) otherwise goto InvalidArgument; + Cast(arguments[1]) otherwise goto InvalidArgument; const arrayMap = sampleArray.map; if (!IsWord16WasmArrayMap(arrayMap)) goto InvalidArgument; - const string = Cast(string) otherwise goto InvalidArgument; const length = string.length; const result = diff --git a/deps/v8/src/builtins/weak-ref.tq b/deps/v8/src/builtins/weak-ref.tq index 56d3fc1c4314bf..051831698534ce 100644 --- a/deps/v8/src/builtins/weak-ref.tq +++ b/deps/v8/src/builtins/weak-ref.tq @@ -2,15 +2,18 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include 'src/builtins/builtins-collections-gen.h' + namespace runtime { -extern runtime JSWeakRefAddToKeptObjects(implicit context: Context)(JSReceiver): - void; +extern runtime JSWeakRefAddToKeptObjects(implicit context: Context)( + JSReceiver | Symbol): void; } // namespace runtime namespace weakref { +// https://tc39.es/proposal-symbols-as-weakmap-keys/#sec-weak-ref-target transitioning javascript builtin WeakRefConstructor( js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny, @@ -19,15 +22,17 @@ WeakRefConstructor( if (newTarget == Undefined) { ThrowTypeError(MessageTemplate::kConstructorNotFunction, 'WeakRef'); } - // 2. If Type(target) is not Object, throw a TypeError exception. - const weakTarget = Cast(weakTarget) otherwise - ThrowTypeError( - MessageTemplate::kWeakRefsWeakRefConstructorTargetMustBeObject); + + // 2. If CanBeHeldWeakly(weakTarget) is false, throw a TypeError exception. + GotoIfCannotBeHeldWeakly(weakTarget) otherwise ThrowTypeError( + MessageTemplate::kInvalidWeakRefsWeakRefConstructorTarget); + // 3. Let weakRef be ? OrdinaryCreateFromConstructor(NewTarget, // "%WeakRefPrototype%", « [[WeakRefTarget]] »). const map = GetDerivedMap(target, UnsafeCast(newTarget)); const weakRef = UnsafeCast(AllocateFastOrSlowJSObjectFromMap(map)); // 4. Perfom ! AddToKeptObjects(target). + const weakTarget = UnsafeCast<(JSReceiver | Symbol)>(weakTarget); runtime::JSWeakRefAddToKeptObjects(weakTarget); // 5. Set weakRef.[[WeakRefTarget]] to target. weakRef.target = weakTarget; @@ -52,7 +57,8 @@ WeakRefDeref(js-implicit context: NativeContext, receiver: JSAny)(): JSAny { if (target != Undefined) { // JSWeakRefAddToKeptObjects might allocate and cause a GC, but it // won't clear `target` since we hold it here on the stack. - runtime::JSWeakRefAddToKeptObjects(UnsafeCast(target)); + runtime::JSWeakRefAddToKeptObjects( + UnsafeCast<(JSReceiver | Symbol)>(target)); } return target; } diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index 03539e1caa29c8..6dfdffcdb84472 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -1197,8 +1197,7 @@ void Builtins::Generate_InterpreterEntryTrampoline( __ int3(); // Should not return. __ bind(&flags_need_processing); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector, - closure); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector, closure); __ bind(&is_baseline); { @@ -1627,8 +1626,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // return since we may do a runtime call along the way that requires the // stack to only contain valid frames. __ Drop(1); - __ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( - flags, feedback_vector, closure, JumpMode::kPushAndReturn); + __ OptimizeCodeOrTailCallOptimizedCodeSlot(flags, feedback_vector, closure, + JumpMode::kPushAndReturn); __ Trap(); } @@ -2697,12 +2696,218 @@ void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { } void Builtins::Generate_MaglevOnStackReplacement(MacroAssembler* masm) { - using D = OnStackReplacementDescriptor; + using D = + i::CallInterfaceDescriptorFor::type; static_assert(D::kParameterCount == 1); OnStackReplacement(masm, OsrSourceTier::kMaglev, D::MaybeTargetCodeRegister()); } +// Called immediately at the start of Maglev-generated functions, with all +// state (register and stack) unchanged, except: +// +// - the stack slot byte size and +// - the tagged stack slot byte size +// +// are pushed as untagged arguments to the stack. This prologue builtin takes +// care of a few things that each Maglev function needs on entry: +// +// - the deoptimization check +// - tiering support (checking FeedbackVector flags) +// - the stack overflow / interrupt check +// - and finally, setting up the Maglev frame. +// +// If this builtin returns, the Maglev frame is fully set up and we are +// prepared for continued execution. Otherwise, we take one of multiple +// possible non-standard exit paths (deoptimization, tailcalling other code, or +// throwing a stack overflow exception). +void Builtins::Generate_MaglevOutOfLinePrologue(MacroAssembler* masm) { + using D = + i::CallInterfaceDescriptorFor::type; + static_assert(D::kParameterCount == 0); + + // This builtin is called by Maglev code prior to any register mutations, and + // the only stack mutation is pushing the arguments for this builtin. In + // other words: + // + // - The register state is the same as when we entered the Maglev code object, + // i.e. set up for a standard JS call. + // - The caller has not yet set up a stack frame. + // - The caller has pushed the (untagged) stack parameters for this builtin. + + static constexpr int kStackParameterCount = 2; + static constexpr int kReturnAddressCount = 1; + static constexpr int kReturnAddressOffset = 0 * kSystemPointerSize; + static constexpr int kTaggedStackSlotBytesOffset = 1 * kSystemPointerSize; + static constexpr int kTotalStackSlotBytesOffset = 2 * kSystemPointerSize; + USE(kReturnAddressOffset); + USE(kTaggedStackSlotBytesOffset); + USE(kTotalStackSlotBytesOffset); + + // Scratch registers. Don't clobber regs related to the calling + // convention (e.g. kJavaScriptCallArgCountRegister). + const Register scratch0 = rcx; + const Register scratch1 = r9; + const Register scratch2 = rbx; + + Label deoptimize, optimize, call_stack_guard, call_stack_guard_return; + + // A modified version of BailoutIfDeoptimized that drops the builtin frame + // before deoptimizing. + { + static constexpr int kCodeStartToCodeDataContainerOffset = + Code::kCodeDataContainerOffset - Code::kHeaderSize; + __ LoadTaggedPointerField(scratch0, + Operand(kJavaScriptCallCodeStartRegister, + kCodeStartToCodeDataContainerOffset)); + __ testl( + FieldOperand(scratch0, CodeDataContainer::kKindSpecificFlagsOffset), + Immediate(1 << Code::kMarkedForDeoptimizationBit)); + __ j(not_zero, &deoptimize); + } + + // Tiering support. + const Register flags = scratch0; + const Register feedback_vector = scratch1; + { + __ LoadTaggedPointerField( + feedback_vector, + FieldOperand(kJSFunctionRegister, JSFunction::kFeedbackCellOffset)); + __ LoadTaggedPointerField( + feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset)); + __ AssertFeedbackVector(feedback_vector); + + __ LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( + flags, feedback_vector, CodeKind::MAGLEV, &optimize); + } + + // Good to go - set up the MAGLEV stack frame and return. + + // First, tear down to the caller frame. + const Register tagged_stack_slot_bytes = scratch1; + const Register total_stack_slot_bytes = scratch0; + const Register return_address = scratch2; + __ PopReturnAddressTo(return_address); + __ Pop(tagged_stack_slot_bytes); + __ Pop(total_stack_slot_bytes); + + __ EnterFrame(StackFrame::MAGLEV); + + // Save arguments in frame. + // TODO(leszeks): Consider eliding this frame if we don't make any calls + // that could clobber these registers. + __ Push(kContextRegister); + __ Push(kJSFunctionRegister); // Callee's JS function. + __ Push(kJavaScriptCallArgCountRegister); // Actual argument count. + + { + ASM_CODE_COMMENT_STRING(masm, " Stack/interrupt check"); + // Stack check. This folds the checks for both the interrupt stack limit + // check and the real stack limit into one by just checking for the + // interrupt limit. The interrupt limit is either equal to the real stack + // limit or tighter. By ensuring we have space until that limit after + // building the frame we can quickly precheck both at once. + // TODO(leszeks): Include a max call argument size here. + __ Move(kScratchRegister, rsp); + __ subq(kScratchRegister, total_stack_slot_bytes); + __ cmpq(kScratchRegister, + __ StackLimitAsOperand(StackLimitKind::kInterruptStackLimit)); + __ j(below, &call_stack_guard); + __ bind(&call_stack_guard_return); + } + + // Initialize stack slots: + // + // - tagged slots are initialized with smi zero. + // - untagged slots are simply reserved without initialization. + // + // Tagged slots first. + const Register untagged_stack_slot_bytes = total_stack_slot_bytes; + { + Label next, loop_condition, loop_header; + + DCHECK_EQ(total_stack_slot_bytes, untagged_stack_slot_bytes); + __ subq(total_stack_slot_bytes, tagged_stack_slot_bytes); + + const Register smi_zero = rax; + DCHECK(!AreAliased(smi_zero, scratch0, scratch1, scratch2)); + __ Move(smi_zero, Smi::zero()); + + __ jmp(&loop_condition, Label::kNear); + + // TODO(leszeks): Consider filling with xmm + movdqa instead. + // TODO(v8:7700): Consider doing more than one push per loop iteration. + __ bind(&loop_header); + __ pushq(rax); + __ bind(&loop_condition); + __ subq(tagged_stack_slot_bytes, Immediate(kSystemPointerSize)); + __ j(greater_equal, &loop_header, Label::kNear); + + __ bind(&next); + } + + // Untagged slots second. + __ subq(rsp, untagged_stack_slot_bytes); + + // The "all-good" return location. This is the only spot where we actually + // return to the caller. + __ PushReturnAddressFrom(return_address); + __ ret(0); + + __ bind(&deoptimize); + { + // Drop the frame and jump to CompileLazyDeoptimizedCode. This is slightly + // fiddly due to the CET shadow stack (otherwise we could do a conditional + // Jump to the builtin). + __ Drop(kStackParameterCount + kReturnAddressCount); + __ Move(scratch0, + BUILTIN_CODE(masm->isolate(), CompileLazyDeoptimizedCode)); + __ LoadCodeObjectEntry(scratch0, scratch0); + __ PushReturnAddressFrom(scratch0); + __ ret(0); + } + + __ bind(&optimize); + { + __ Drop(kStackParameterCount + kReturnAddressCount); + __ AssertFunction(kJSFunctionRegister); + __ OptimizeCodeOrTailCallOptimizedCodeSlot( + flags, feedback_vector, kJSFunctionRegister, JumpMode::kPushAndReturn); + __ Trap(); + } + + __ bind(&call_stack_guard); + { + ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call"); + + // Push the MAGLEV code return address now, as if it had been pushed by the + // call to this builtin. + __ PushReturnAddressFrom(return_address); + + { + FrameScope inner_frame_scope(masm, StackFrame::INTERNAL); + __ SmiTag(total_stack_slot_bytes); + __ Push(total_stack_slot_bytes); + __ SmiTag(tagged_stack_slot_bytes); + __ Push(tagged_stack_slot_bytes); + // Save any registers that can be referenced by maglev::RegisterInput. + // TODO(leszeks): Only push those that are used by the graph. + __ Push(kJavaScriptCallNewTargetRegister); + // Push the frame size. + __ Push(total_stack_slot_bytes); + __ CallRuntime(Runtime::kStackGuardWithGap, 1); + __ Pop(kJavaScriptCallNewTargetRegister); + __ Pop(tagged_stack_slot_bytes); + __ SmiUntag(tagged_stack_slot_bytes); + __ Pop(total_stack_slot_bytes); + __ SmiUntag(total_stack_slot_bytes); + } + + __ PopReturnAddressTo(return_address); + __ jmp(&call_stack_guard_return); + } +} + #if V8_ENABLE_WEBASSEMBLY void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was pushed to the stack by the caller as int32. diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h index cb0e5f6b7537b5..b1cd1d5205a1cd 100644 --- a/deps/v8/src/codegen/arm/assembler-arm-inl.h +++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h @@ -66,8 +66,7 @@ void RelocInfo::apply(intptr_t delta) { } Address RelocInfo::target_address() { - DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) || - IsWasmCall(rmode_)); + DCHECK(IsCodeTargetMode(rmode_) || IsWasmCall(rmode_)); return Assembler::target_address_at(pc_, constant_pool_); } @@ -92,11 +91,7 @@ Address RelocInfo::constant_pool_entry_address() { int RelocInfo::target_address_size() { return kPointerSize; } HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { - DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) || - IsDataEmbeddedObject(rmode_)); - if (IsDataEmbeddedObject(rmode_)) { - return HeapObject::cast(Object(ReadUnalignedValue
(pc_))); - } + DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); return HeapObject::cast( Object(Assembler::target_address_at(pc_, constant_pool_))); } @@ -105,8 +100,6 @@ Handle RelocInfo::target_object_handle(Assembler* origin) { if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) { return Handle(reinterpret_cast( Assembler::target_address_at(pc_, constant_pool_))); - } else if (IsDataEmbeddedObject(rmode_)) { - return Handle::cast(ReadUnalignedValue>(pc_)); } DCHECK(IsRelativeCodeTarget(rmode_)); return origin->relative_code_target_object_handle_at(pc_); @@ -115,15 +108,9 @@ Handle RelocInfo::target_object_handle(Assembler* origin) { void RelocInfo::set_target_object(Heap* heap, HeapObject target, WriteBarrierMode write_barrier_mode, ICacheFlushMode icache_flush_mode) { - DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) || - IsDataEmbeddedObject(rmode_)); - if (IsDataEmbeddedObject(rmode_)) { - WriteUnalignedValue(pc_, target.ptr()); - // No need to flush icache since no instructions were changed. - } else { - Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), - icache_flush_mode); - } + DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); + Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), + icache_flush_mode); if (!host().is_null() && !v8_flags.disable_write_barriers) { WriteBarrierForCode(host(), this, target, write_barrier_mode); } @@ -153,19 +140,6 @@ Address RelocInfo::target_internal_reference_address() { Builtin RelocInfo::target_builtin_at(Assembler* origin) { UNREACHABLE(); } -Address RelocInfo::target_runtime_entry(Assembler* origin) { - DCHECK(IsRuntimeEntry(rmode_)); - return target_address(); -} - -void RelocInfo::set_target_runtime_entry(Address target, - WriteBarrierMode write_barrier_mode, - ICacheFlushMode icache_flush_mode) { - DCHECK(IsRuntimeEntry(rmode_)); - if (target_address() != target) - set_target_address(target, write_barrier_mode, icache_flush_mode); -} - Address RelocInfo::target_off_heap_target() { DCHECK(IsOffHeapTarget(rmode_)); return Assembler::target_address_at(pc_, constant_pool_); @@ -173,8 +147,8 @@ Address RelocInfo::target_off_heap_target() { void RelocInfo::WipeOut() { DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) || - IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || - IsInternalReference(rmode_) || IsOffHeapTarget(rmode_)); + IsExternalReference(rmode_) || IsInternalReference(rmode_) || + IsOffHeapTarget(rmode_)); if (IsInternalReference(rmode_)) { Memory
(pc_) = kNullAddress; } else { diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc index 31d76c61984f60..b2d7cad0963cd7 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.cc +++ b/deps/v8/src/codegen/arm/assembler-arm.cc @@ -5218,8 +5218,7 @@ void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) { DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty()); CheckBuffer(); if (!RelocInfo::IsNoInfo(rmode)) { - DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || - RelocInfo::IsLiteralConstant(rmode)); + DCHECK(RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); } base::WriteUnalignedValue(reinterpret_cast
(pc_), data); @@ -5232,8 +5231,7 @@ void Assembler::dq(uint64_t value, RelocInfo::Mode rmode) { DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty()); CheckBuffer(); if (!RelocInfo::IsNoInfo(rmode)) { - DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || - RelocInfo::IsLiteralConstant(rmode)); + DCHECK(RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); } base::WriteUnalignedValue(reinterpret_cast
(pc_), value); diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index f88c04333f479f..565d0820bdd0b5 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -2078,7 +2078,7 @@ void MacroAssembler::LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( b(ne, flags_need_processing); } -void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( +void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( Register flags, Register feedback_vector) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(flags, feedback_vector)); diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h index 099b1551bf8b1d..51f79075812a84 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.h +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h @@ -779,8 +779,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( Register flags, Register feedback_vector, CodeKind current_code_kind, Label* flags_need_processing); - void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, - Register feedback_vector); + void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, + Register feedback_vector); // --------------------------------------------------------------------------- // Runtime calls diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h index 5df2d876457ddc..37158040ead8c0 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h @@ -537,16 +537,6 @@ Builtin Assembler::target_builtin_at(Address pc) { return static_cast(builtin_id); } -Address Assembler::runtime_entry_at(Address pc) { - Instruction* instr = reinterpret_cast(pc); - if (instr->IsLdrLiteralX()) { - return Assembler::target_address_at(pc, 0 /* unused */); - } else { - DCHECK(instr->IsBranchAndLink() || instr->IsUnconditionalBranch()); - return instr->ImmPCOffset() + options().code_range_base; - } -} - int Assembler::deserialization_special_target_size(Address location) { Instruction* instr = reinterpret_cast(location); if (instr->IsBranchAndLink() || instr->IsUnconditionalBranch()) { @@ -630,7 +620,7 @@ int RelocInfo::target_address_size() { Address RelocInfo::target_address() { DCHECK(IsCodeTarget(rmode_) || IsNearBuiltinEntry(rmode_) || - IsRuntimeEntry(rmode_) || IsWasmCall(rmode_)); + IsWasmCall(rmode_)); return Assembler::target_address_at(pc_, constant_pool_); } @@ -665,13 +655,12 @@ Address RelocInfo::constant_pool_entry_address() { HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); - if (IsDataEmbeddedObject(rmode_)) { - return HeapObject::cast(Object(ReadUnalignedValue
(pc_))); - } else if (IsCompressedEmbeddedObject(rmode_)) { + if (IsCompressedEmbeddedObject(rmode_)) { Tagged_t compressed = Assembler::target_compressed_address_at(pc_, constant_pool_); DCHECK(!HAS_SMI_TAG(compressed)); - Object obj(DecompressTaggedPointer(cage_base, compressed)); + Object obj(V8HeapCompressionScheme::DecompressTaggedPointer(cage_base, + compressed)); // Embedding of compressed Code objects must not happen when external code // space is enabled, because CodeDataContainers must be used instead. DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, @@ -684,9 +673,7 @@ HeapObject RelocInfo::target_object(PtrComprCageBase cage_base) { } Handle RelocInfo::target_object_handle(Assembler* origin) { - if (IsDataEmbeddedObject(rmode_)) { - return Handle::cast(ReadUnalignedValue>(pc_)); - } else if (IsEmbeddedObjectMode(rmode_)) { + if (IsEmbeddedObjectMode(rmode_)) { return origin->target_object_handle_at(pc_); } else { DCHECK(IsCodeTarget(rmode_)); @@ -698,12 +685,11 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject target, WriteBarrierMode write_barrier_mode, ICacheFlushMode icache_flush_mode) { DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObjectMode(rmode_)); - if (IsDataEmbeddedObject(rmode_)) { - WriteUnalignedValue(pc_, target.ptr()); - // No need to flush icache since no instructions were changed. - } else if (IsCompressedEmbeddedObject(rmode_)) { + if (IsCompressedEmbeddedObject(rmode_)) { Assembler::set_target_compressed_address_at( - pc_, constant_pool_, CompressTagged(target.ptr()), icache_flush_mode); + pc_, constant_pool_, + V8HeapCompressionScheme::CompressTagged(target.ptr()), + icache_flush_mode); } else { DCHECK(IsFullEmbeddedObject(rmode_)); Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(), @@ -741,20 +727,6 @@ Builtin RelocInfo::target_builtin_at(Assembler* origin) { return Assembler::target_builtin_at(pc_); } -Address RelocInfo::target_runtime_entry(Assembler* origin) { - DCHECK(IsRuntimeEntry(rmode_)); - return target_address(); -} - -void RelocInfo::set_target_runtime_entry(Address target, - WriteBarrierMode write_barrier_mode, - ICacheFlushMode icache_flush_mode) { - DCHECK(IsRuntimeEntry(rmode_)); - if (target_address() != target) { - set_target_address(target, write_barrier_mode, icache_flush_mode); - } -} - Address RelocInfo::target_off_heap_target() { DCHECK(IsOffHeapTarget(rmode_)); return Assembler::target_address_at(pc_, constant_pool_); @@ -762,8 +734,8 @@ Address RelocInfo::target_off_heap_target() { void RelocInfo::WipeOut() { DCHECK(IsEmbeddedObjectMode(rmode_) || IsCodeTarget(rmode_) || - IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) || - IsInternalReference(rmode_) || IsOffHeapTarget(rmode_)); + IsExternalReference(rmode_) || IsInternalReference(rmode_) || + IsOffHeapTarget(rmode_)); if (IsInternalReference(rmode_)) { WriteUnalignedValue
(pc_, kNullAddress); } else if (IsCompressedEmbeddedObject(rmode_)) { diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc index 754c79815ab06f..dc06c743a02d30 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc @@ -188,7 +188,6 @@ CPURegList CPURegList::GetCallerSavedV(int size) { const int RelocInfo::kApplyMask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) | RelocInfo::ModeMask(RelocInfo::NEAR_BUILTIN_ENTRY) | - RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) | RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE); bool RelocInfo::IsCodedSpecially() { @@ -1134,10 +1133,20 @@ void Assembler::smull(const Register& rd, const Register& rn, void Assembler::smulh(const Register& rd, const Register& rn, const Register& rm) { - DCHECK(AreSameSizeAndType(rd, rn, rm)); + DCHECK(rd.Is64Bits()); + DCHECK(rn.Is64Bits()); + DCHECK(rm.Is64Bits()); DataProcessing3Source(rd, rn, rm, xzr, SMULH_x); } +void Assembler::umulh(const Register& rd, const Register& rn, + const Register& rm) { + DCHECK(rd.Is64Bits()); + DCHECK(rn.Is64Bits()); + DCHECK(rm.Is64Bits()); + DataProcessing3Source(rd, rn, rm, xzr, UMULH_x); +} + void Assembler::sdiv(const Register& rd, const Register& rn, const Register& rm) { DCHECK(rd.SizeInBits() == rn.SizeInBits()); @@ -4306,7 +4315,6 @@ void Assembler::GrowBuffer() { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, ConstantPoolMode constant_pool_mode) { if ((rmode == RelocInfo::INTERNAL_REFERENCE) || - (rmode == RelocInfo::DATA_EMBEDDED_OBJECT) || (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) || (rmode == RelocInfo::DEOPT_SCRIPT_OFFSET) || (rmode == RelocInfo::DEOPT_INLINING_ID) || @@ -4318,7 +4326,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, RelocInfo::IsDeoptNodeId(rmode) || RelocInfo::IsDeoptPosition(rmode) || RelocInfo::IsInternalReference(rmode) || - RelocInfo::IsDataEmbeddedObject(rmode) || RelocInfo::IsLiteralConstant(rmode) || RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)); // These modes do not need an entry in the constant pool. diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h index 368a2e5f42bf78..68f773a92436c3 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64.h @@ -273,14 +273,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // code is moved into the code space. static inline Builtin target_builtin_at(Address pc); - // Returns the target address for a runtime function for the call encoded - // at 'pc'. - // Runtime entries can be temporarily encoded as the offset between the - // runtime function entrypoint and the code range base (stored in the - // code_range_base field), in order to be encodable as we generate the code, - // before it is moved into the code space. - inline Address runtime_entry_at(Address pc); - // This sets the branch destination. 'location' here can be either the pc of // an immediate branch or the address of an entry in the constant pool. // This is for calls and branches within generated code. @@ -751,9 +743,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // 32 x 32 -> 64-bit multiply. void smull(const Register& rd, const Register& rn, const Register& rm); - // Xd = bits<127:64> of Xn * Xm. + // Xd = bits<127:64> of Xn * Xm, signed. void smulh(const Register& rd, const Register& rn, const Register& rm); + // Xd = bits<127:64> of Xn * Xm, unsigned. + void umulh(const Register& rd, const Register& rn, const Register& rm); + // Signed 32 x 32 -> 64-bit multiply and accumulate. void smaddl(const Register& rd, const Register& rn, const Register& rm, const Register& ra); @@ -2070,8 +2065,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) { BlockPoolsScope no_pool_scope(this); if (!RelocInfo::IsNoInfo(rmode)) { - DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || - RelocInfo::IsLiteralConstant(rmode)); + DCHECK(RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); } dc32(data); @@ -2079,8 +2073,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) { BlockPoolsScope no_pool_scope(this); if (!RelocInfo::IsNoInfo(rmode)) { - DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || - RelocInfo::IsLiteralConstant(rmode)); + DCHECK(RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); } dc64(data); @@ -2088,8 +2081,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NO_INFO) { BlockPoolsScope no_pool_scope(this); if (!RelocInfo::IsNoInfo(rmode)) { - DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) || - RelocInfo::IsLiteralConstant(rmode)); + DCHECK(RelocInfo::IsLiteralConstant(rmode)); RecordRelocInfo(rmode); } dc64(data); diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h index bf34708491058b..0c7e7357534fc9 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h @@ -944,7 +944,7 @@ void TurboAssembler::Smull(const Register& rd, const Register& rn, smull(rd, rn, rm); } -void MacroAssembler::Smulh(const Register& rd, const Register& rn, +void TurboAssembler::Smulh(const Register& rd, const Register& rn, const Register& rm) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); @@ -958,6 +958,13 @@ void TurboAssembler::Umull(const Register& rd, const Register& rn, umaddl(rd, rn, rm, xzr); } +void TurboAssembler::Umulh(const Register& rd, const Register& rn, + const Register& rm) { + DCHECK(allow_macro_instructions()); + DCHECK(!rd.IsZero()); + umulh(rd, rn, rm); +} + void TurboAssembler::Sxtb(const Register& rd, const Register& rn) { DCHECK(allow_macro_instructions()); DCHECK(!rd.IsZero()); diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index 0112f35129c89d..25834f25823cd1 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -1440,7 +1440,7 @@ void MacroAssembler::LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( TestAndBranchIfAnySet(flags, kFlagsMask, flags_need_processing); } -void MacroAssembler::MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( +void MacroAssembler::OptimizeCodeOrTailCallOptimizedCodeSlot( Register flags, Register feedback_vector) { ASM_CODE_COMMENT(this); DCHECK(!AreAliased(flags, feedback_vector)); @@ -3291,16 +3291,6 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Bind(&done); } -void TurboAssembler::EncodeSandboxedPointer(const Register& value) { - ASM_CODE_COMMENT(this); -#ifdef V8_ENABLE_SANDBOX - Sub(value, value, kPtrComprCageBaseRegister); - Mov(value, Operand(value, LSL, kSandboxedPointerShift)); -#else - UNREACHABLE(); -#endif -} - void TurboAssembler::DecodeSandboxedPointer(const Register& value) { ASM_CODE_COMMENT(this); #ifdef V8_ENABLE_SANDBOX @@ -3313,19 +3303,27 @@ void TurboAssembler::DecodeSandboxedPointer(const Register& value) { void TurboAssembler::LoadSandboxedPointerField( const Register& destination, const MemOperand& field_operand) { +#ifdef V8_ENABLE_SANDBOX ASM_CODE_COMMENT(this); Ldr(destination, field_operand); DecodeSandboxedPointer(destination); +#else + UNREACHABLE(); +#endif } void TurboAssembler::StoreSandboxedPointerField( const Register& value, const MemOperand& dst_field_operand) { +#ifdef V8_ENABLE_SANDBOX ASM_CODE_COMMENT(this); UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); - Mov(scratch, value); - EncodeSandboxedPointer(scratch); + Sub(scratch, value, kPtrComprCageBaseRegister); + Mov(scratch, Operand(scratch, LSL, kSandboxedPointerShift)); Str(scratch, dst_field_operand); +#else + UNREACHABLE(); +#endif } void TurboAssembler::LoadExternalPointerField(Register destination, diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index 4c3715b69cf060..99121e3f4b9de3 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -1056,7 +1056,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { inline void Lsl(const Register& rd, const Register& rn, unsigned shift); inline void Lsl(const Register& rd, const Register& rn, const Register& rm); inline void Umull(const Register& rd, const Register& rn, const Register& rm); + inline void Umulh(const Register& rd, const Register& rn, const Register& rm); inline void Smull(const Register& rd, const Register& rn, const Register& rm); + inline void Smulh(const Register& rd, const Register& rn, const Register& rm); inline void Sxtb(const Register& rd, const Register& rn); inline void Sxth(const Register& rd, const Register& rn); @@ -1452,13 +1454,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Transform a SandboxedPointer from/to its encoded form, which is used when // the pointer is stored on the heap and ensures that the pointer will always // point into the sandbox. - void EncodeSandboxedPointer(const Register& value); void DecodeSandboxedPointer(const Register& value); - - // Load and decode a SandboxedPointer from the heap. void LoadSandboxedPointerField(const Register& destination, const MemOperand& field_operand); - // Encode and store a SandboxedPointer to the heap. void StoreSandboxedPointerField(const Register& value, const MemOperand& dst_field_operand); @@ -1630,7 +1628,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { const Register& ra); inline void Smsubl(const Register& rd, const Register& rn, const Register& rm, const Register& ra); - inline void Smulh(const Register& rd, const Register& rn, const Register& rm); inline void Stnp(const CPURegister& rt, const CPURegister& rt2, const MemOperand& dst); inline void Umaddl(const Register& rd, const Register& rn, const Register& rm, @@ -1842,8 +1839,8 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void LoadFeedbackVectorFlagsAndJumpIfNeedsProcessing( Register flags, Register feedback_vector, CodeKind current_code_kind, Label* flags_need_processing); - void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, - Register feedback_vector); + void OptimizeCodeOrTailCallOptimizedCodeSlot(Register flags, + Register feedback_vector); // Helpers ------------------------------------------------------------------ diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h index 8a8164d0739ace..1073a9d4f63432 100644 --- a/deps/v8/src/codegen/assembler.h +++ b/deps/v8/src/codegen/assembler.h @@ -245,7 +245,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { } bool is_constant_pool_available() const { - if (v8_flags.enable_embedded_constant_pool) { + if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { // We need to disable constant pool here for embeded builtins // because the metadata section is not adjacent to instructions return constant_pool_available_ && !options().isolate_independent_code; @@ -374,7 +374,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { byte* pc_; void set_constant_pool_available(bool available) { - if (v8_flags.enable_embedded_constant_pool) { + if (V8_EMBEDDED_CONSTANT_POOL_BOOL) { constant_pool_available_ = available; } else { // Embedded constant pool not supported on this architecture. diff --git a/deps/v8/src/codegen/background-merge-task.h b/deps/v8/src/codegen/background-merge-task.h index 0e7cffbaf6e90e..643a6a84efadbb 100644 --- a/deps/v8/src/codegen/background-merge-task.h +++ b/deps/v8/src/codegen/background-merge-task.h @@ -35,7 +35,7 @@ class V8_EXPORT_PRIVATE BackgroundMergeTask { // Step 2: on the background thread, update pointers in the new Script's // object graph to point to corresponding objects from the cached Script where - // appropriate. May only be called if HasCachedScript returned true. + // appropriate. May only be called if HasPendingBackgroundWork returned true. void BeginMergeInBackground(LocalIsolate* isolate, Handle