diff --git a/common.gypi b/common.gypi index 71862791dae3be..88764c8f6b75a8 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.20', + 'v8_embedder_string': '-node.18', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/.gn b/deps/v8/.gn index d4ad959954845f..a691fa339b0430 100644 --- a/deps/v8/.gn +++ b/deps/v8/.gn @@ -7,11 +7,21 @@ import("//build/dotfile_settings.gni") # The location of the build configuration file. buildconfig = "//build/config/BUILDCONFIG.gn" +# The python interpreter to use by default. On Windows, this will look +# for python3.exe and python3.bat. +script_executable = "python3" + # These are the targets to check headers for by default. The files in targets # matching these patterns (see "gn help label_pattern" for format) will have # their includes checked for proper dependencies when you run either # "gn check" or "gn gen --check". -check_targets = [] +no_check_targets = [ + "//:cppgc_base", + "//:v8_internal_headers", + "//src/inspector:inspector", + "//test/cctest:cctest_sources", + "//third_party/icu:*", +] # These are the list of GN files that run exec_script. This whitelist exists # to force additional review for new uses of exec_script, which is strongly diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index abfdc7887ba512..15909406cc6ebe 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -84,6 +84,7 @@ Colin Ihrig Cong Zuo Daniel Andersson Daniel Bevenius +Daniel Dromboski Daniel James David Carlier David Manouchehri @@ -229,6 +230,7 @@ Vladimir Krivosheev Vladimir Shutoff Wei Wu Wenlu Wang +Wenyu Zhao Wiktor Garbacz Wouter Vermeiren Xiaofang Zou @@ -240,6 +242,7 @@ Yong Wang Youfeng Hao Yu Yin Yusif Khudhur +Yuri Iozzelli Zac Hansen Zeynep Cankara Zhao Jiazhong diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index d2bfb6129dcf2b..5e3045bfdc1d80 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -41,7 +41,7 @@ declare_args() { v8_enable_future = false # Sets -DSYSTEM_INSTRUMENTATION. Enables OS-dependent event tracing - v8_enable_system_instrumentation = true + v8_enable_system_instrumentation = is_win || is_mac # Sets the GUID for the ETW provider v8_etw_guid = "" @@ -95,8 +95,19 @@ declare_args() { v8_win64_unwinding_info = true # Enable code comments for builtins in the snapshot (impacts performance). + # This also enables v8_code_comments. v8_enable_snapshot_code_comments = false + # Allow runtime-enabled code comments (with --code-comments). Enabled by + # default in debug builds. + # Sets -dV8_CODE_COMMENTS + v8_code_comments = "" + + # Allow runtime-enabled debug code (with --debug-code). Enabled by default in + # debug builds. + # Sets -dV8_ENABLE_DEBUG_CODE + v8_enable_debug_code = "" + # Enable native counters from the snapshot (impacts performance, sets # -dV8_SNAPSHOT_NATIVE_CODE_COUNTERS). # This option will generate extra code in the snapshot to increment counters, @@ -200,10 +211,6 @@ declare_args() { (is_linux || is_chromeos || is_mac)) || (v8_current_cpu == "ppc64" && (is_linux || is_chromeos)) - # Temporary flag to allow embedders to update their microtasks scopes - # while rolling in a new version of V8. - v8_check_microtasks_scopes_consistency = "" - # Enable mitigations for executing untrusted code. # Disabled by default on ia32 due to conflicting requirements with embedded # builtins. Enabled by default on Android since it doesn't support @@ -272,6 +279,10 @@ declare_args() { # Enable heap reservation of size 4GB. Only possible for 64bit archs. cppgc_enable_caged_heap = v8_current_cpu == "x64" || v8_current_cpu == "arm64" + # Enable verification of live bytes in the marking verifier. + # TODO(v8:11785): Enable by default when running with the verifier. + cppgc_enable_verify_live_bytes = false + # Enable young generation in cppgc. cppgc_enable_young_generation = false @@ -300,6 +311,12 @@ declare_args() { # meaning that they are not switched to fast mode. # Sets -DV8_DICT_PROPERTY_CONST_TRACKING v8_dict_property_const_tracking = false + + # Enable map packing & unpacking (sets -dV8_MAP_PACKING). + v8_enable_map_packing = false + + # Allow for JS promise hooks (instead of just C++). + v8_allow_javascript_in_promise_hooks = false } # Derived defaults. @@ -321,9 +338,15 @@ if (v8_enable_test_features == "") { if (v8_enable_v8_checks == "") { v8_enable_v8_checks = v8_enable_debugging_features } -if (v8_check_microtasks_scopes_consistency == "") { - v8_check_microtasks_scopes_consistency = - v8_enable_debugging_features || dcheck_always_on +if (v8_enable_snapshot_code_comments) { + assert(v8_code_comments == true || v8_code_comments == "", + "v8_enable_snapshot_code_comments conflicts with v8_code_comments.") + v8_code_comments = true +} else if (v8_code_comments == "") { + v8_code_comments = v8_enable_debugging_features +} +if (v8_enable_debug_code == "") { + v8_enable_debug_code = v8_enable_debugging_features } if (v8_enable_snapshot_native_code_counters == "") { v8_enable_snapshot_native_code_counters = v8_enable_debugging_features @@ -333,7 +356,7 @@ if (v8_enable_pointer_compression == "") { v8_current_cpu == "arm64" || v8_current_cpu == "x64" } if (v8_enable_pointer_compression_shared_cage == "") { - v8_enable_pointer_compression_shared_cage = false + v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression } if (v8_enable_fast_torque == "") { v8_enable_fast_torque = v8_enable_fast_mksnapshot @@ -357,6 +380,13 @@ if (v8_enable_atomic_object_field_writes == "") { if (v8_enable_atomic_marking_state == "") { v8_enable_atomic_marking_state = v8_enable_concurrent_marking } +if (v8_enable_third_party_heap) { + v8_disable_write_barriers = true + v8_enable_single_generation = true + v8_enable_shared_ro_heap = false + v8_enable_pointer_compression = false + v8_enable_pointer_compression_shared_cage = false +} assert(!v8_enable_concurrent_marking || v8_enable_atomic_object_field_writes, "Concurrent marking requires atomic object field writes.") assert(!v8_enable_concurrent_marking || v8_enable_atomic_marking_state, @@ -386,7 +416,8 @@ if (v8_enable_short_builtin_calls && v8_enable_short_builtin_calls = false } if (v8_enable_shared_ro_heap == "") { - v8_enable_shared_ro_heap = !v8_enable_pointer_compression + v8_enable_shared_ro_heap = !v8_enable_pointer_compression || + v8_enable_pointer_compression_shared_cage } assert(!v8_disable_write_barriers || v8_enable_single_generation, @@ -398,15 +429,18 @@ assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations, assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity, "Control-flow integrity is only supported on arm64") -if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) { +if (v8_enable_shared_ro_heap && v8_enable_pointer_compression && + !v8_enable_pointer_compression_shared_cage) { assert( is_linux || is_chromeos || is_android, "Sharing read-only heap with pointer compression is only supported on Linux or Android") } -assert( - !v8_enable_pointer_compression_shared_cage || !v8_enable_shared_ro_heap, - "Sharing read-only heap is not yet supported when sharing a pointer compression cage") +assert(!v8_enable_map_packing || !v8_enable_pointer_compression, + "Map packing does not support pointer compression") + +assert(!v8_enable_map_packing || v8_current_cpu == "x64", + "Map packing is only supported on x64") assert(!v8_use_multi_snapshots || !v8_control_flow_integrity, "Control-flow integrity does not support multisnapshots") @@ -418,6 +452,10 @@ assert( !v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression, "Can't share a pointer compression cage if pointers aren't compressed") +assert(!v8_enable_pointer_compression_shared_cage || v8_current_cpu == "x64" || + v8_current_cpu == "arm64", + "Sharing a pointer compression cage is only supported on x64 and arm64") + assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers, "Write barriers can't be both enabled and disabled") @@ -566,6 +604,7 @@ external_v8_defines = [ "V8_IMMINENT_DEPRECATION_WARNINGS", "V8_NO_ARGUMENTS_ADAPTOR", "V8_USE_PERFETTO", + "V8_MAP_PACKING", ] enabled_external_v8_defines = [] @@ -575,11 +614,11 @@ if (v8_enable_v8_checks) { } if (v8_enable_pointer_compression) { enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS" ] -} -if (v8_enable_pointer_compression_shared_cage) { - enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_SHARED_CAGE" ] -} else if (v8_enable_pointer_compression) { - enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE" ] + if (v8_enable_pointer_compression_shared_cage) { + enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_SHARED_CAGE" ] + } else { + enabled_external_v8_defines += [ "V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE" ] + } } if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) { enabled_external_v8_defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ] @@ -599,6 +638,9 @@ if (v8_imminent_deprecation_warnings) { if (v8_use_perfetto) { enabled_external_v8_defines += [ "V8_USE_PERFETTO" ] } +if (v8_enable_map_packing) { + enabled_external_v8_defines += [ "V8_MAP_PACKING" ] +} disabled_external_v8_defines = external_v8_defines - enabled_external_v8_defines @@ -665,6 +707,10 @@ config("features") { ":cppgc_header_features", ] + if (cppgc_enable_verify_live_bytes) { + defines += [ "CPPGC_VERIFY_LIVE_BYTES" ] + } + if (v8_embedder_string != "") { defines += [ "V8_EMBEDDER_STRING=\"$v8_embedder_string\"" ] } @@ -729,6 +775,12 @@ config("features") { if (v8_enable_handle_zapping) { defines += [ "ENABLE_HANDLE_ZAPPING" ] } + if (v8_code_comments == true) { + defines += [ "V8_CODE_COMMENTS" ] + } + if (v8_enable_debug_code) { + defines += [ "V8_ENABLE_DEBUG_CODE" ] + } if (v8_enable_snapshot_native_code_counters) { defines += [ "V8_SNAPSHOT_NATIVE_CODE_COUNTERS" ] } @@ -756,9 +808,6 @@ config("features") { if (v8_enable_lazy_source_positions) { defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ] } - if (v8_check_microtasks_scopes_consistency) { - defines += [ "V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY" ] - } if (v8_use_multi_snapshots) { defines += [ "V8_MULTI_SNAPSHOTS" ] } @@ -807,6 +856,9 @@ config("features") { if (v8_dict_property_const_tracking) { defines += [ "V8_DICT_PROPERTY_CONST_TRACKING" ] } + if (v8_allow_javascript_in_promise_hooks) { + defines += [ "V8_ALLOW_JAVASCRIPT_IN_PROMISE_HOOKS" ] + } } config("toolchain") { @@ -1038,6 +1090,10 @@ config("toolchain") { defines += [ "ENABLE_VERIFY_CSA" ] } + if (v8_enable_runtime_call_stats) { + defines += [ "V8_RUNTIME_CALL_STATS" ] + } + if (!v8_untrusted_code_mitigations) { defines += [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ] } @@ -1244,6 +1300,8 @@ action("postmortem-metadata") { "src/objects/map.cc", "src/objects/map.h", "src/objects/map-inl.h", + "src/objects/megadom-handler.h", + "src/objects/megadom-handler-inl.h", "src/objects/name.h", "src/objects/name-inl.h", "src/objects/objects.h", @@ -1432,6 +1490,7 @@ torque_files = [ "src/objects/js-weak-refs.tq", "src/objects/literal-objects.tq", "src/objects/map.tq", + "src/objects/megadom-handler.tq", "src/objects/microtask.tq", "src/objects/module.tq", "src/objects/name.tq", @@ -1515,44 +1574,37 @@ template("run_torque") { destination_folder = "$target_gen_dir/torque-generated$suffix" - files = [ - "$target_gen_dir/torque-generated/bit-fields.h", - "$target_gen_dir/torque-generated/builtin-definitions.h", - "$target_gen_dir/torque-generated/class-debug-readers.cc", - "$target_gen_dir/torque-generated/class-debug-readers.h", - "$target_gen_dir/torque-generated/class-forward-declarations.h", - "$target_gen_dir/torque-generated/class-verifiers.cc", - "$target_gen_dir/torque-generated/class-verifiers.h", - "$target_gen_dir/torque-generated/csa-types.h", - "$target_gen_dir/torque-generated/debug-macros.cc", - "$target_gen_dir/torque-generated/debug-macros.h", - "$target_gen_dir/torque-generated/enum-verifiers.cc", - "$target_gen_dir/torque-generated/exported-macros-assembler.cc", - "$target_gen_dir/torque-generated/exported-macros-assembler.h", - "$target_gen_dir/torque-generated/factory.cc", - "$target_gen_dir/torque-generated/factory.inc", - "$target_gen_dir/torque-generated/field-offsets.h", - "$target_gen_dir/torque-generated/instance-types.h", - "$target_gen_dir/torque-generated/interface-descriptors.inc", - "$target_gen_dir/torque-generated/objects-body-descriptors-inl.inc", - "$target_gen_dir/torque-generated/objects-printer.cc", + outputs = [ + "$destination_folder/bit-fields.h", + "$destination_folder/builtin-definitions.h", + "$destination_folder/class-debug-readers.cc", + "$destination_folder/class-debug-readers.h", + "$destination_folder/class-forward-declarations.h", + "$destination_folder/class-verifiers.cc", + "$destination_folder/class-verifiers.h", + "$destination_folder/csa-types.h", + "$destination_folder/debug-macros.cc", + "$destination_folder/debug-macros.h", + "$destination_folder/enum-verifiers.cc", + "$destination_folder/exported-macros-assembler.cc", + "$destination_folder/exported-macros-assembler.h", + "$destination_folder/factory.cc", + "$destination_folder/factory.inc", + "$destination_folder/field-offsets.h", + "$destination_folder/instance-types.h", + "$destination_folder/interface-descriptors.inc", + "$destination_folder/objects-body-descriptors-inl.inc", + "$destination_folder/objects-printer.cc", ] - outputs = [] - foreach(file, files) { - outputs += [ string_replace(file, - "$target_gen_dir/torque-generated", - destination_folder) ] - } - foreach(file, torque_files) { filetq = string_replace(file, ".tq", "-tq") outputs += [ - "$target_gen_dir/torque-generated/$filetq-csa.cc", - "$target_gen_dir/torque-generated/$filetq-csa.h", - "$target_gen_dir/torque-generated/$filetq-inl.inc", - "$target_gen_dir/torque-generated/$filetq.cc", - "$target_gen_dir/torque-generated/$filetq.inc", + "$destination_folder/$filetq-csa.cc", + "$destination_folder/$filetq-csa.h", + "$destination_folder/$filetq-inl.inc", + "$destination_folder/$filetq.cc", + "$destination_folder/$filetq.inc", ] } @@ -1892,11 +1944,16 @@ action("v8_dump_build_config") { "v8_enable_atomic_object_field_writes=" + "$v8_enable_atomic_object_field_writes", "v8_enable_concurrent_marking=$v8_enable_concurrent_marking", + "v8_enable_single_generation=$v8_enable_single_generation", "v8_enable_i18n_support=$v8_enable_i18n_support", "v8_enable_verify_predictable=$v8_enable_verify_predictable", "v8_enable_verify_csa=$v8_enable_verify_csa", "v8_enable_lite_mode=$v8_enable_lite_mode", + "v8_enable_runtime_call_stats=$v8_enable_runtime_call_stats", "v8_enable_pointer_compression=$v8_enable_pointer_compression", + "v8_enable_pointer_compression_shared_cage=" + + "$v8_enable_pointer_compression_shared_cage", + "v8_enable_third_party_heap=$v8_enable_third_party_heap", "v8_enable_webassembly=$v8_enable_webassembly", "v8_control_flow_integrity=$v8_control_flow_integrity", "v8_target_cpu=\"$v8_target_cpu\"", @@ -1918,7 +1975,10 @@ action("v8_dump_build_config") { v8_source_set("v8_snapshot") { visibility = [ ":*" ] # Targets in this file can depend on this. - deps = [] + deps = [ + ":v8_internal_headers", + ":v8_libbase", + ] public_deps = [ # This should be public so downstream targets can declare the snapshot # output file as their inputs. @@ -2000,7 +2060,6 @@ v8_source_set("v8_initializers") { "src/builtins/builtins-conversion-gen.cc", "src/builtins/builtins-data-view-gen.h", "src/builtins/builtins-date-gen.cc", - "src/builtins/builtins-debug-gen.cc", "src/builtins/builtins-generator-gen.cc", "src/builtins/builtins-global-gen.cc", "src/builtins/builtins-handler-gen.cc", @@ -2226,9 +2285,8 @@ if (v8_generate_external_defines_header) { v8_header_set("v8_shared_internal_headers") { visibility = [ ":*", - "test/cctest:*", - "test/unittests:*", - "tools/debug_helper/:*", + "test/*", + "tools/*", ] configs = [ ":internal_config" ] @@ -2245,7 +2303,10 @@ v8_header_set("v8_shared_internal_headers") { } v8_header_set("v8_flags") { - visibility = [ ":*" ] + visibility = [ + ":*", + "tools/*", + ] configs = [ ":internal_config" ] @@ -2273,6 +2334,7 @@ v8_header_set("v8_internal_headers") { "src/api/api-arguments-inl.h", "src/api/api-arguments.h", "src/api/api-inl.h", + "src/api/api-macros-undef.h", "src/api/api-macros.h", "src/api/api-natives.h", "src/api/api.h", @@ -2289,6 +2351,7 @@ v8_header_set("v8_internal_headers") { "src/baseline/baseline-assembler-inl.h", "src/baseline/baseline-assembler.h", "src/baseline/baseline-compiler.h", + "src/baseline/baseline-osr-inl.h", "src/baseline/baseline.h", "src/baseline/bytecode-offset-iterator.h", "src/builtins/accessors.h", @@ -2321,6 +2384,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/external-reference.h", "src/codegen/flush-instruction-cache.h", "src/codegen/handler-table.h", + "src/codegen/interface-descriptors-inl.h", "src/codegen/interface-descriptors.h", "src/codegen/label.h", "src/codegen/machine-type.h", @@ -2382,6 +2446,7 @@ v8_header_set("v8_internal_headers") { "src/compiler/common-operator-reducer.h", "src/compiler/common-operator.h", "src/compiler/compilation-dependencies.h", + "src/compiler/compilation-dependency.h", "src/compiler/compiler-source-position-table.h", "src/compiler/constant-folding-reducer.h", "src/compiler/control-equivalence.h", @@ -2448,18 +2513,17 @@ v8_header_set("v8_internal_headers") { "src/compiler/persistent-map.h", "src/compiler/pipeline-statistics.h", "src/compiler/pipeline.h", + "src/compiler/processed-feedback.h", "src/compiler/property-access-builder.h", "src/compiler/raw-machine-assembler.h", "src/compiler/redundancy-elimination.h", "src/compiler/refs-map.h", "src/compiler/representation-change.h", "src/compiler/schedule.h", - "src/compiler/scheduled-machine-lowering.h", "src/compiler/scheduler.h", "src/compiler/select-lowering.h", "src/compiler/serializer-for-background-compilation.h", "src/compiler/serializer-hints.h", - "src/compiler/simd-scalar-lowering.h", "src/compiler/simplified-lowering.h", "src/compiler/simplified-operator-reducer.h", "src/compiler/simplified-operator.h", @@ -2517,6 +2581,7 @@ v8_header_set("v8_internal_headers") { "src/execution/interrupts-scope.h", "src/execution/isolate-data.h", "src/execution/isolate-inl.h", + "src/execution/isolate-utils-inl.h", "src/execution/isolate-utils.h", "src/execution/isolate.h", "src/execution/local-isolate-inl.h", @@ -2557,6 +2622,7 @@ v8_header_set("v8_internal_headers") { "src/heap/base-space.h", "src/heap/basic-memory-chunk.h", "src/heap/code-object-registry.h", + "src/heap/code-range.h", "src/heap/code-stats.h", "src/heap/collection-barrier.h", "src/heap/combined-heap.h", @@ -2592,11 +2658,13 @@ v8_header_set("v8_internal_headers") { "src/heap/list.h", "src/heap/local-allocator-inl.h", "src/heap/local-allocator.h", + "src/heap/local-factory-inl.h", "src/heap/local-factory.h", "src/heap/local-heap-inl.h", "src/heap/local-heap.h", "src/heap/mark-compact-inl.h", "src/heap/mark-compact.h", + "src/heap/marking-barrier-inl.h", "src/heap/marking-barrier.h", "src/heap/marking-visitor-inl.h", "src/heap/marking-visitor.h", @@ -2679,13 +2747,13 @@ v8_header_set("v8_internal_headers") { "src/libsampler/sampler.h", "src/logging/code-events.h", "src/logging/counters-definitions.h", - "src/logging/counters-inl.h", "src/logging/counters.h", "src/logging/local-logger.h", "src/logging/log-inl.h", "src/logging/log-utils.h", "src/logging/log.h", "src/logging/metrics.h", + "src/logging/runtime-call-stats.h", "src/logging/tracing-flags.h", "src/numbers/bignum-dtoa.h", "src/numbers/bignum.h", @@ -2766,6 +2834,7 @@ v8_header_set("v8_internal_headers") { "src/objects/js-array-inl.h", "src/objects/js-array.h", "src/objects/js-collection-inl.h", + "src/objects/js-collection-iterator-inl.h", "src/objects/js-collection-iterator.h", "src/objects/js-collection.h", "src/objects/js-function-inl.h", @@ -2782,6 +2851,8 @@ v8_header_set("v8_internal_headers") { "src/objects/js-regexp-string-iterator-inl.h", "src/objects/js-regexp-string-iterator.h", "src/objects/js-regexp.h", + "src/objects/js-segments-inl.h", + "src/objects/js-segments.h", "src/objects/js-weak-refs-inl.h", "src/objects/js-weak-refs.h", "src/objects/keys.h", @@ -2797,6 +2868,8 @@ v8_header_set("v8_internal_headers") { "src/objects/map.h", "src/objects/maybe-object-inl.h", "src/objects/maybe-object.h", + "src/objects/megadom-handler-inl.h", + "src/objects/megadom-handler.h", "src/objects/microtask-inl.h", "src/objects/microtask.h", "src/objects/module-inl.h", @@ -2831,6 +2904,7 @@ v8_header_set("v8_internal_headers") { "src/objects/property.h", "src/objects/prototype-info-inl.h", "src/objects/prototype-info.h", + "src/objects/prototype-inl.h", "src/objects/prototype.h", "src/objects/regexp-match-info.h", "src/objects/scope-info-inl.h", @@ -2842,6 +2916,7 @@ v8_header_set("v8_internal_headers") { "src/objects/slots-atomic-inl.h", "src/objects/slots-inl.h", "src/objects/slots.h", + "src/objects/source-text-module-inl.h", "src/objects/source-text-module.h", "src/objects/stack-frame-info-inl.h", "src/objects/stack-frame-info.h", @@ -2880,6 +2955,7 @@ v8_header_set("v8_internal_headers") { "src/parsing/expression-scope.h", "src/parsing/func-name-inferrer.h", "src/parsing/import-assertions.h", + "src/parsing/keywords-gen.h", "src/parsing/literal-buffer.h", "src/parsing/parse-info.h", "src/parsing/parser-base.h", @@ -2892,6 +2968,7 @@ v8_header_set("v8_internal_headers") { "src/parsing/preparser.h", "src/parsing/rewriter.h", "src/parsing/scanner-character-streams.h", + "src/parsing/scanner-inl.h", "src/parsing/scanner.h", "src/parsing/token.h", "src/profiler/allocation-tracker.h", @@ -2939,10 +3016,6 @@ v8_header_set("v8_internal_headers") { "src/roots/roots.h", "src/runtime/runtime-utils.h", "src/runtime/runtime.h", - "src/sanitizer/asan.h", - "src/sanitizer/lsan-page-allocator.h", - "src/sanitizer/msan.h", - "src/sanitizer/tsan.h", "src/snapshot/code-serializer.h", "src/snapshot/context-deserializer.h", "src/snapshot/context-serializer.h", @@ -2980,6 +3053,7 @@ v8_header_set("v8_internal_headers") { "src/tasks/task-utils.h", "src/third_party/siphash/halfsiphash.h", "src/third_party/utf8-decoder/utf8-decoder.h", + "src/torque/runtime-macro-shims.h", "src/tracing/trace-event.h", "src/tracing/traced-value.h", "src/tracing/tracing-category-observer.h", @@ -3046,6 +3120,7 @@ v8_header_set("v8_internal_headers") { "src/wasm/jump-table-assembler.h", "src/wasm/leb-helper.h", "src/wasm/local-decl-encoder.h", + "src/wasm/memory-protection-key.h", "src/wasm/memory-tracing.h", "src/wasm/module-compiler.h", "src/wasm/module-decoder.h", @@ -3063,6 +3138,7 @@ v8_header_set("v8_internal_headers") { "src/wasm/wasm-feature-flags.h", "src/wasm/wasm-features.h", "src/wasm/wasm-import-wrapper-cache.h", + "src/wasm/wasm-init-expr.h", "src/wasm/wasm-js.h", "src/wasm/wasm-linkage.h", "src/wasm/wasm-module-builder.h", @@ -3070,6 +3146,7 @@ v8_header_set("v8_internal_headers") { "src/wasm/wasm-module.h", "src/wasm/wasm-objects-inl.h", "src/wasm/wasm-objects.h", + "src/wasm/wasm-opcodes-inl.h", "src/wasm/wasm-opcodes.h", "src/wasm/wasm-result.h", "src/wasm/wasm-serialization.h", @@ -3140,6 +3217,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/ia32/assembler-ia32-inl.h", "src/codegen/ia32/assembler-ia32.h", "src/codegen/ia32/constants-ia32.h", + "src/codegen/ia32/interface-descriptors-ia32-inl.h", "src/codegen/ia32/macro-assembler-ia32.h", "src/codegen/ia32/register-ia32.h", "src/codegen/ia32/sse-instr.h", @@ -3158,6 +3236,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/x64/assembler-x64.h", "src/codegen/x64/constants-x64.h", "src/codegen/x64/fma-instr.h", + "src/codegen/x64/interface-descriptors-x64-inl.h", "src/codegen/x64/macro-assembler-x64.h", "src/codegen/x64/register-x64.h", "src/codegen/x64/sse-instr.h", @@ -3187,6 +3266,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/arm/assembler-arm-inl.h", "src/codegen/arm/assembler-arm.h", "src/codegen/arm/constants-arm.h", + "src/codegen/arm/interface-descriptors-arm-inl.h", "src/codegen/arm/macro-assembler-arm.h", "src/codegen/arm/register-arm.h", "src/compiler/backend/arm/instruction-codes-arm.h", @@ -3206,6 +3286,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/arm64/decoder-arm64-inl.h", "src/codegen/arm64/decoder-arm64.h", "src/codegen/arm64/instructions-arm64.h", + "src/codegen/arm64/interface-descriptors-arm64-inl.h", "src/codegen/arm64/macro-assembler-arm64-inl.h", "src/codegen/arm64/macro-assembler-arm64.h", "src/codegen/arm64/register-arm64.h", @@ -3259,11 +3340,10 @@ v8_header_set("v8_internal_headers") { ] } else if (v8_current_cpu == "ppc") { sources += [ ### gcmole(arch:ppc) ### - "src/baseline/ppc/baseline-assembler-ppc-inl.h", - "src/baseline/ppc/baseline-compiler-ppc-inl.h", "src/codegen/ppc/assembler-ppc-inl.h", "src/codegen/ppc/assembler-ppc.h", "src/codegen/ppc/constants-ppc.h", + "src/codegen/ppc/interface-descriptors-ppc-inl.h", "src/codegen/ppc/macro-assembler-ppc.h", "src/codegen/ppc/register-ppc.h", "src/compiler/backend/ppc/instruction-codes-ppc.h", @@ -3275,11 +3355,10 @@ v8_header_set("v8_internal_headers") { ] } else if (v8_current_cpu == "ppc64") { sources += [ ### gcmole(arch:ppc64) ### - "src/baseline/ppc/baseline-assembler-ppc-inl.h", - "src/baseline/ppc/baseline-compiler-ppc-inl.h", "src/codegen/ppc/assembler-ppc-inl.h", "src/codegen/ppc/assembler-ppc.h", "src/codegen/ppc/constants-ppc.h", + "src/codegen/ppc/interface-descriptors-ppc-inl.h", "src/codegen/ppc/macro-assembler-ppc.h", "src/codegen/ppc/register-ppc.h", "src/compiler/backend/ppc/instruction-codes-ppc.h", @@ -3296,6 +3375,7 @@ v8_header_set("v8_internal_headers") { "src/codegen/s390/assembler-s390-inl.h", "src/codegen/s390/assembler-s390.h", "src/codegen/s390/constants-s390.h", + "src/codegen/s390/interface-descriptors-s390-inl.h", "src/codegen/s390/macro-assembler-s390.h", "src/codegen/s390/register-s390.h", "src/compiler/backend/s390/instruction-codes-s390.h", @@ -3384,6 +3464,7 @@ v8_compiler_sources = [ "src/compiler/graph-trimmer.cc", "src/compiler/graph-visualizer.cc", "src/compiler/graph.cc", + "src/compiler/heap-refs.cc", "src/compiler/js-call-reducer.cc", "src/compiler/js-context-specialization.cc", "src/compiler/js-create-lowering.cc", @@ -3430,7 +3511,6 @@ v8_compiler_sources = [ "src/compiler/refs-map.cc", "src/compiler/representation-change.cc", "src/compiler/schedule.cc", - "src/compiler/scheduled-machine-lowering.cc", "src/compiler/scheduler.cc", "src/compiler/select-lowering.cc", "src/compiler/serializer-for-background-compilation.cc", @@ -3452,7 +3532,6 @@ v8_compiler_sources = [ if (v8_enable_webassembly) { v8_compiler_sources += [ "src/compiler/int64-lowering.cc", - "src/compiler/simd-scalar-lowering.cc", "src/compiler/wasm-compiler.cc", ] } @@ -3676,6 +3755,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/base-space.cc", "src/heap/basic-memory-chunk.cc", "src/heap/code-object-registry.cc", + "src/heap/code-range.cc", "src/heap/code-stats.cc", "src/heap/collection-barrier.cc", "src/heap/combined-heap.cc", @@ -3763,6 +3843,7 @@ v8_source_set("v8_base_without_compiler") { "src/logging/log-utils.cc", "src/logging/log.cc", "src/logging/metrics.cc", + "src/logging/runtime-call-stats.cc", "src/logging/tracing-flags.cc", "src/numbers/bignum-dtoa.cc", "src/numbers/bignum.cc", @@ -3908,7 +3989,6 @@ v8_source_set("v8_base_without_compiler") { "src/runtime/runtime-typedarray.cc", "src/runtime/runtime-weak-refs.cc", "src/runtime/runtime.cc", - "src/sanitizer/lsan-page-allocator.cc", "src/snapshot/code-serializer.cc", "src/snapshot/context-deserializer.cc", "src/snapshot/context-serializer.cc", @@ -3977,6 +4057,7 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/graph-builder-interface.cc", "src/wasm/jump-table-assembler.cc", "src/wasm/local-decl-encoder.cc", + "src/wasm/memory-protection-key.cc", "src/wasm/memory-tracing.cc", "src/wasm/module-compiler.cc", "src/wasm/module-decoder.cc", @@ -3988,10 +4069,12 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/value-type.cc", "src/wasm/wasm-code-manager.cc", "src/wasm/wasm-debug.cc", + "src/wasm/wasm-debug.h", "src/wasm/wasm-engine.cc", "src/wasm/wasm-external-refs.cc", "src/wasm/wasm-features.cc", "src/wasm/wasm-import-wrapper-cache.cc", + "src/wasm/wasm-init-expr.cc", "src/wasm/wasm-js.cc", "src/wasm/wasm-module-builder.cc", "src/wasm/wasm-module-sourcemap.cc", @@ -4007,7 +4090,10 @@ v8_source_set("v8_base_without_compiler") { if (v8_enable_third_party_heap) { sources += v8_third_party_heap_files } else { - sources += [ "src/heap/third-party/heap-api-stub.cc" ] + sources += [ + "src/heap/third-party/heap-api-stub.cc", + "src/heap/third-party/heap-api.h", + ] } if (v8_enable_conservative_stack_scanning) { @@ -4027,24 +4113,15 @@ v8_source_set("v8_base_without_compiler") { ] } - if (v8_check_header_includes) { - # This file will be generated by tools/generate-header-include-checks.py - # if the "check_v8_header_includes" gclient variable is set. - import("check-header-includes/sources.gni") - sources += check_header_includes_sources - } - if (v8_current_cpu == "x86") { sources += [ ### gcmole(arch:ia32) ### "src/codegen/ia32/assembler-ia32.cc", "src/codegen/ia32/cpu-ia32.cc", - "src/codegen/ia32/interface-descriptors-ia32.cc", "src/codegen/ia32/macro-assembler-ia32.cc", "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc", "src/compiler/backend/ia32/code-generator-ia32.cc", "src/compiler/backend/ia32/instruction-scheduler-ia32.cc", "src/compiler/backend/ia32/instruction-selector-ia32.cc", - "src/debug/ia32/debug-ia32.cc", "src/deoptimizer/ia32/deoptimizer-ia32.cc", "src/diagnostics/ia32/disasm-ia32.cc", "src/diagnostics/ia32/unwinder-ia32.cc", @@ -4056,13 +4133,11 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc", "src/codegen/x64/assembler-x64.cc", "src/codegen/x64/cpu-x64.cc", - "src/codegen/x64/interface-descriptors-x64.cc", "src/codegen/x64/macro-assembler-x64.cc", "src/compiler/backend/x64/code-generator-x64.cc", "src/compiler/backend/x64/instruction-scheduler-x64.cc", "src/compiler/backend/x64/instruction-selector-x64.cc", "src/compiler/backend/x64/unwinding-info-writer-x64.cc", - "src/debug/x64/debug-x64.cc", "src/deoptimizer/x64/deoptimizer-x64.cc", "src/diagnostics/x64/disasm-x64.cc", "src/diagnostics/x64/eh-frame-x64.cc", @@ -4091,13 +4166,11 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/arm/assembler-arm.cc", "src/codegen/arm/constants-arm.cc", "src/codegen/arm/cpu-arm.cc", - "src/codegen/arm/interface-descriptors-arm.cc", "src/codegen/arm/macro-assembler-arm.cc", "src/compiler/backend/arm/code-generator-arm.cc", "src/compiler/backend/arm/instruction-scheduler-arm.cc", "src/compiler/backend/arm/instruction-selector-arm.cc", "src/compiler/backend/arm/unwinding-info-writer-arm.cc", - "src/debug/arm/debug-arm.cc", "src/deoptimizer/arm/deoptimizer-arm.cc", "src/diagnostics/arm/disasm-arm.cc", "src/diagnostics/arm/eh-frame-arm.cc", @@ -4113,7 +4186,6 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/arm64/decoder-arm64.cc", "src/codegen/arm64/instructions-arm64-constants.cc", "src/codegen/arm64/instructions-arm64.cc", - "src/codegen/arm64/interface-descriptors-arm64.cc", "src/codegen/arm64/macro-assembler-arm64.cc", "src/codegen/arm64/register-arm64.cc", "src/codegen/arm64/utils-arm64.cc", @@ -4121,7 +4193,6 @@ v8_source_set("v8_base_without_compiler") { "src/compiler/backend/arm64/instruction-scheduler-arm64.cc", "src/compiler/backend/arm64/instruction-selector-arm64.cc", "src/compiler/backend/arm64/unwinding-info-writer-arm64.cc", - "src/debug/arm64/debug-arm64.cc", "src/deoptimizer/arm64/deoptimizer-arm64.cc", "src/diagnostics/arm64/disasm-arm64.cc", "src/diagnostics/arm64/eh-frame-arm64.cc", @@ -4146,12 +4217,11 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/mips/assembler-mips.cc", "src/codegen/mips/constants-mips.cc", "src/codegen/mips/cpu-mips.cc", - "src/codegen/mips/interface-descriptors-mips.cc", + "src/codegen/mips/interface-descriptors-mips-inl.h", "src/codegen/mips/macro-assembler-mips.cc", "src/compiler/backend/mips/code-generator-mips.cc", "src/compiler/backend/mips/instruction-scheduler-mips.cc", "src/compiler/backend/mips/instruction-selector-mips.cc", - "src/debug/mips/debug-mips.cc", "src/deoptimizer/mips/deoptimizer-mips.cc", "src/diagnostics/mips/disasm-mips.cc", "src/diagnostics/mips/unwinder-mips.cc", @@ -4164,12 +4234,11 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/mips64/assembler-mips64.cc", "src/codegen/mips64/constants-mips64.cc", "src/codegen/mips64/cpu-mips64.cc", - "src/codegen/mips64/interface-descriptors-mips64.cc", + "src/codegen/mips64/interface-descriptors-mips64-inl.h", "src/codegen/mips64/macro-assembler-mips64.cc", "src/compiler/backend/mips64/code-generator-mips64.cc", "src/compiler/backend/mips64/instruction-scheduler-mips64.cc", "src/compiler/backend/mips64/instruction-selector-mips64.cc", - "src/debug/mips64/debug-mips64.cc", "src/deoptimizer/mips64/deoptimizer-mips64.cc", "src/diagnostics/mips64/disasm-mips64.cc", "src/diagnostics/mips64/unwinder-mips64.cc", @@ -4182,13 +4251,11 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/ppc/assembler-ppc.cc", "src/codegen/ppc/constants-ppc.cc", "src/codegen/ppc/cpu-ppc.cc", - "src/codegen/ppc/interface-descriptors-ppc.cc", "src/codegen/ppc/macro-assembler-ppc.cc", "src/compiler/backend/ppc/code-generator-ppc.cc", "src/compiler/backend/ppc/instruction-scheduler-ppc.cc", "src/compiler/backend/ppc/instruction-selector-ppc.cc", "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc", - "src/debug/ppc/debug-ppc.cc", "src/deoptimizer/ppc/deoptimizer-ppc.cc", "src/diagnostics/ppc/disasm-ppc.cc", "src/diagnostics/ppc/eh-frame-ppc.cc", @@ -4202,13 +4269,11 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/ppc/assembler-ppc.cc", "src/codegen/ppc/constants-ppc.cc", "src/codegen/ppc/cpu-ppc.cc", - "src/codegen/ppc/interface-descriptors-ppc.cc", "src/codegen/ppc/macro-assembler-ppc.cc", "src/compiler/backend/ppc/code-generator-ppc.cc", "src/compiler/backend/ppc/instruction-scheduler-ppc.cc", "src/compiler/backend/ppc/instruction-selector-ppc.cc", "src/compiler/backend/ppc/unwinding-info-writer-ppc.cc", - "src/debug/ppc/debug-ppc.cc", "src/deoptimizer/ppc/deoptimizer-ppc.cc", "src/diagnostics/ppc/disasm-ppc.cc", "src/diagnostics/ppc/eh-frame-ppc.cc", @@ -4222,13 +4287,11 @@ v8_source_set("v8_base_without_compiler") { "src/codegen/s390/assembler-s390.cc", "src/codegen/s390/constants-s390.cc", "src/codegen/s390/cpu-s390.cc", - "src/codegen/s390/interface-descriptors-s390.cc", "src/codegen/s390/macro-assembler-s390.cc", "src/compiler/backend/s390/code-generator-s390.cc", "src/compiler/backend/s390/instruction-scheduler-s390.cc", "src/compiler/backend/s390/instruction-selector-s390.cc", "src/compiler/backend/s390/unwinding-info-writer-s390.cc", - "src/debug/s390/debug-s390.cc", "src/deoptimizer/s390/deoptimizer-s390.cc", "src/diagnostics/s390/disasm-s390.cc", "src/diagnostics/s390/eh-frame-s390.cc", @@ -4239,15 +4302,17 @@ v8_source_set("v8_base_without_compiler") { ] } else if (v8_current_cpu == "riscv64") { sources += [ ### gcmole(arch:riscv64) ### + "src/baseline/riscv64/baseline-assembler-riscv64-inl.h", + "src/baseline/riscv64/baseline-compiler-riscv64-inl.h", + "src/codegen/riscv64/assembler-riscv64-inl.h", "src/codegen/riscv64/assembler-riscv64.cc", "src/codegen/riscv64/constants-riscv64.cc", "src/codegen/riscv64/cpu-riscv64.cc", - "src/codegen/riscv64/interface-descriptors-riscv64.cc", + "src/codegen/riscv64/interface-descriptors-riscv64-inl.h", "src/codegen/riscv64/macro-assembler-riscv64.cc", "src/compiler/backend/riscv64/code-generator-riscv64.cc", "src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc", "src/compiler/backend/riscv64/instruction-selector-riscv64.cc", - "src/debug/riscv64/debug-riscv64.cc", "src/deoptimizer/riscv64/deoptimizer-riscv64.cc", "src/diagnostics/riscv64/disasm-riscv64.cc", "src/diagnostics/riscv64/unwinder-riscv64.cc", @@ -4497,8 +4562,6 @@ v8_component("v8_libbase") { "src/base/atomic-utils.h", "src/base/atomicops.h", "src/base/atomicops_internals_atomicword_compat.h", - "src/base/atomicops_internals_portable.h", - "src/base/atomicops_internals_std.h", "src/base/base-export.h", "src/base/bit-field.h", "src/base/bits-iterator.h", @@ -4532,7 +4595,6 @@ v8_component("v8_libbase") { "src/base/lazy-instance.h", "src/base/logging.cc", "src/base/logging.h", - "src/base/lsan.h", "src/base/macros.h", "src/base/memory.h", "src/base/once.cc", @@ -4558,6 +4620,11 @@ v8_component("v8_libbase") { "src/base/safe_conversions.h", "src/base/safe_conversions_arm_impl.h", "src/base/safe_conversions_impl.h", + "src/base/sanitizer/asan.h", + "src/base/sanitizer/lsan-page-allocator.cc", + "src/base/sanitizer/lsan-page-allocator.h", + "src/base/sanitizer/lsan.h", + "src/base/sanitizer/msan.h", "src/base/small-vector.h", "src/base/sys-info.cc", "src/base/sys-info.h", @@ -4694,9 +4761,11 @@ v8_component("v8_libbase") { if (is_tsan && !build_with_chromium) { data += [ "tools/sanitizers/tsan_suppressions.txt" ] + } - # llvm-symbolizer uses libstdc++ from the clang package. - data += [ "//third_party/llvm-build/Release+Asserts/lib/libstdc++.so.6" ] + if (using_sanitizer && !build_with_chromium) { + data_deps += + [ "//build/config/clang:llvm-symbolizer_data($host_toolchain)" ] } # TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris. @@ -4720,8 +4789,6 @@ v8_component("v8_libplatform") { "src/libplatform/delayed-task-queue.h", "src/libplatform/task-queue.cc", "src/libplatform/task-queue.h", - "src/libplatform/tracing/recorder-default.cc", - "src/libplatform/tracing/recorder.h", "src/libplatform/tracing/trace-buffer.cc", "src/libplatform/tracing/trace-buffer.h", "src/libplatform/tracing/trace-config.cc", @@ -4752,8 +4819,6 @@ v8_component("v8_libplatform") { if (v8_use_perfetto) { sources -= [ "//base/trace_event/common/trace_event_common.h", - "src/libplatform/tracing/recorder-default.cc", - "src/libplatform/tracing/recorder.h", "src/libplatform/tracing/trace-buffer.cc", "src/libplatform/tracing/trace-buffer.h", "src/libplatform/tracing/trace-object.cc", @@ -4768,9 +4833,15 @@ v8_component("v8_libplatform") { # TODO(skyostil): Switch TraceEventListener to protozero. "//third_party/perfetto/protos/perfetto/trace:lite", ] - } else if (is_win) { - sources -= [ "src/libplatform/tracing/recorder-default.cc" ] - sources += [ "src/libplatform/tracing/recorder-win.cc" ] + } + + if (v8_enable_system_instrumentation) { + sources += [ "src/libplatform/tracing/recorder.h" ] + if (is_mac) { + sources += [ "src/libplatform/tracing/recorder-mac.cc" ] + } else if (is_win) { + sources += [ "src/libplatform/tracing/recorder-win.cc" ] + } } } @@ -4794,8 +4865,13 @@ v8_source_set("fuzzer_support") { v8_source_set("v8_bigint") { sources = [ + "src/bigint/bigint-internal.cc", + "src/bigint/bigint-internal.h", "src/bigint/bigint.h", + "src/bigint/digit-arithmetic.h", + "src/bigint/mul-schoolbook.cc", "src/bigint/vector-arithmetic.cc", + "src/bigint/vector-arithmetic.h", ] configs = [ ":internal_config" ] @@ -4807,7 +4883,6 @@ v8_source_set("v8_cppgc_shared") { "src/heap/base/stack.h", "src/heap/base/worklist.cc", "src/heap/base/worklist.h", - "src/heap/cppgc/sanitizers.h", ] if (is_clang || !is_win) { @@ -4954,6 +5029,8 @@ v8_source_set("cppgc_base") { "src/heap/cppgc/marking-visitor.h", "src/heap/cppgc/marking-worklists.cc", "src/heap/cppgc/marking-worklists.h", + "src/heap/cppgc/memory.cc", + "src/heap/cppgc/memory.h", "src/heap/cppgc/metric-recorder.h", "src/heap/cppgc/name-trait.cc", "src/heap/cppgc/object-allocator.cc", @@ -4961,6 +5038,7 @@ v8_source_set("cppgc_base") { "src/heap/cppgc/object-poisoner.h", "src/heap/cppgc/object-size-trait.cc", "src/heap/cppgc/object-start-bitmap.h", + "src/heap/cppgc/object-view.h", "src/heap/cppgc/page-memory.cc", "src/heap/cppgc/page-memory.h", "src/heap/cppgc/persistent-node.cc", @@ -5032,6 +5110,35 @@ v8_source_set("cppgc_base_for_testing") { public_deps = [ ":cppgc_base" ] } +if (v8_check_header_includes) { + # This file will be generated by tools/generate-header-include-checks.py + # if the "check_v8_header_includes" gclient variable is set. + import("check-header-includes/sources.gni") + v8_source_set("check_headers") { + configs = [ ":internal_config" ] + sources = check_header_includes_sources + + # Any rules that contain headers files should be added here either directly + # or indirectly by including something that has it transitively in its + # public_deps. + deps = [ + ":d8", + ":mksnapshot", + ":torque_base", + ":torque_ls_base", + ":v8_base_without_compiler", + ":v8_bigint", + ":v8_initializers", + ":v8_internal_headers", + ":v8_libbase", + ":v8_maybe_icu", + ":wee8", + "src/inspector:inspector", + "src/inspector:inspector_string_conversions", + ] + } +} + ############################################################################### # Produce a single static library for embedders # @@ -5284,6 +5391,10 @@ group("gn_all") { if (want_v8_shell) { deps += [ ":v8_shell" ] } + + if (v8_check_header_includes) { + deps += [ ":check_headers" ] + } } group("v8_python_base") { @@ -6199,9 +6310,7 @@ if (!build_with_chromium && v8_use_perfetto) { configs = [ ":v8_tracing_config" ] public_configs = [ "//third_party/perfetto/gn:public_config" ] deps = [ - "//third_party/perfetto/src/trace_processor:export_json", "//third_party/perfetto/src/trace_processor:storage_minimal", - "//third_party/perfetto/src/tracing:client_api", "//third_party/perfetto/src/tracing/core", # TODO(skyostil): Support non-POSIX platforms. @@ -6210,5 +6319,11 @@ if (!build_with_chromium && v8_use_perfetto) { "//third_party/perfetto/src/tracing:in_process_backend", "//third_party/perfetto/src/tracing:platform_impl", ] + + public_deps = [ + "//third_party/perfetto/include/perfetto/trace_processor", + "//third_party/perfetto/src/trace_processor:export_json", + "//third_party/perfetto/src/tracing:client_api", + ] } } # if (!build_with_chromium && v8_use_perfetto) diff --git a/deps/v8/DEPS b/deps/v8/DEPS index b27a4e8e8fada7..d3de2c5ddc9eaf 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -9,7 +9,6 @@ gclient_gn_args = [ # TODO(https://crbug.com/1137662, https://crbug.com/1080854) # Remove when migration is complete. 'checkout_fuchsia_for_arm64_host', - 'checkout_google_benchmark', ] vars = { @@ -44,13 +43,11 @@ vars = { 'download_jsfunfuzz': False, 'check_v8_header_includes': False, - 'checkout_google_benchmark' : False, - # GN CIPD package version. - 'gn_version': 'git_revision:dba01723a441c358d843a575cb7720d54ddcdf92', + 'gn_version': 'git_revision:39a87c0b36310bdf06b692c098f199a0d97fc810', # luci-go CIPD package version. - 'luci_go': 'git_revision:d6d24b11ecded4d89f3dfd1b2e5a0072a3d4ab15', + 'luci_go': 'git_revision:22d464e2f8f3bd2bd33f69fe819326d63f881008', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version @@ -88,15 +85,15 @@ vars = { deps = { 'build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '77edba11e25386aa719d4f08c3ce2d8c4f868c15', + Var('chromium_url') + '/chromium/src/build.git' + '@' + '4036cf1b17581f5668b487a25e252d56e0321a7f', 'third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '98a52e2e312dd10d7fcf281e322039a6b706b86b', + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '6b0a611c2c692684f94c0c3629f793feebd16b39', 'third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '81d656878ec611cb0b42d52c82e9dae93920d9ba', + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'f022e298b4f4a782486bb6d5ce6589c998b51fe2', 'third_party/instrumented_libraries': - Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '084aee04777db574038af9e9d33ca5caed577462', + Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '4ae2535e8e894c3cd81d46aacdaf151b5df30709', 'buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '5dbd89c9d9c0b0ff47cefdc2bc421b8c9a1c5a21', + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '20b1d0fc13ebaa263a1248f08814f523a86e6bed', 'buildtools/clang_format/script': Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99803d74e35962f63a775f29477882afd4d57d94', 'buildtools/linux64': { @@ -122,9 +119,9 @@ deps = { 'buildtools/third_party/libc++/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '8fa87946779682841e21e2da977eccfb6cb3bded', 'buildtools/third_party/libc++abi/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + 'd0f33885a2ffa7d5af74af6065b60eb48e3c70f5', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '767de317f97343db64af048e3d198ab8b10fee5d', 'buildtools/third_party/libunwind/trunk': - Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '08f35c8514a74817103121def05351186830d4b7', + Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '7846d256355e40273f7cc192c8f5893e8665a1f9', 'buildtools/win': { 'packages': [ { @@ -136,7 +133,7 @@ deps = { 'condition': 'host_os == "win"', }, 'base/trace_event/common': - Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'cab90cbdaaf4444d67aef6ce3cef09fc5fdeb560', + Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'd5bb24e5d9802c8c917fcaa4375d5239a586c168', 'third_party/android_ndk': { 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '401019bf85744311b26c88ced255cd53401af8b7', 'condition': 'checkout_android', @@ -184,7 +181,7 @@ deps = { 'dep_type': 'cipd', }, 'third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + '41a5e5e465ad93d6e08224613d3544334a6278bc', + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'c1e1d559b46476584ec0eb1d83bd7f43fa5a1b36', 'condition': 'checkout_android', }, 'third_party/colorama/src': { @@ -196,10 +193,9 @@ deps = { 'condition': 'checkout_fuchsia', }, 'third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '07f4869221012b16b7f9ee685d94856e1fc9f361', + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '23ef29555ef4789f555f1ba8c51b4c52975f0907', 'third_party/google_benchmark/src': { - 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '7f27afe83b82f3a98baf58ef595814b9d42a5b2b', - 'condition': 'checkout_google_benchmark', + 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '7d0d9061d83b663ce05d9de5da3d5865a3845b79', }, 'third_party/jinja2': Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '11b6b3e5971d760bd2d310f77643f55a818a6d25', @@ -212,7 +208,7 @@ deps = { 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', 'test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '31126581e7290f9233c29cefd93f66c6ac78f1c9', + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '6d353a4436747e2de8820efac27ae5ef7e601b60', 'test/test262/harness': Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b', 'third_party/qemu-linux-x64': { @@ -239,7 +235,7 @@ deps = { 'packages': [ { 'package': 'fuchsia/third_party/aemu/linux-amd64', - 'version': 'SeLS6a0f6IL-PCOUKbMTN5LYgjjJbDSnb3DGf5q9pwsC' + 'version': '-Sz2gSN_5yVSHDlitjxUlmZpHuz-F2kFDW6TnmggCZoC' }, ], 'condition': 'host_os == "linux" and checkout_fuchsia', @@ -256,7 +252,7 @@ deps = { 'dep_type': 'cipd', }, 'tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'a387faa2a6741f565e45d78804a49a0e55de5909', + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'a38f01b956e091d5e698d2af484c81cd4e9a2a2d', 'tools/luci-go': { 'packages': [ { @@ -290,7 +286,7 @@ deps = { 'third_party/protobuf': Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + '6a59a2ad1f61d9696092f79b6d74368b4d7970a3', 'third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '09490503d0f201b81e03f5ca0ab8ba8ee76d4a8e', + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '5b8d433953beb2a75a755ba321a3076b95f7cdb9', 'third_party/jsoncpp/source': Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '9059f5cad030ba11d37818847443a53918c327b1', 'third_party/ittapi': { diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS index 2a478dbdc5c689..2ad1949b515935 100644 --- a/deps/v8/OWNERS +++ b/deps/v8/OWNERS @@ -21,7 +21,7 @@ per-file PRESUBMIT.py=file:INFRA_OWNERS per-file codereview.settings=file:INFRA_OWNERS per-file AUTHORS=file:COMMON_OWNERS -per-file WATCHLIST=file:COMMON_OWNERS +per-file WATCHLISTS=file:COMMON_OWNERS per-file *-mips*=file:MIPS_OWNERS per-file *-mips64*=file:MIPS_OWNERS diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py index 2ee14d545ee66e..61963c62f62009 100644 --- a/deps/v8/PRESUBMIT.py +++ b/deps/v8/PRESUBMIT.py @@ -279,7 +279,7 @@ def PathToGuardMacro(path): for line in f.NewContents(): for i in range(len(guard_patterns)): if guard_patterns[i].match(line): - found_patterns[i] = True + found_patterns[i] = True if skip_check_pattern.match(line): file_omitted = True break @@ -485,7 +485,9 @@ def FilterFile(affected_file): files_to_check=(r'src[\\\/].*', r'test[\\\/].*'), # Skip api.cc since we cannot easily add the 'noexcept' annotation to # public methods. - files_to_skip=(r'src[\\\/]api[\\\/]api\.cc',)) + # Skip src/bigint/ because it's meant to be V8-independent. + files_to_skip=(r'src[\\\/]api[\\\/]api\.cc', + r'src[\\\/]bigint[\\\/].*')) # matches any class name. class_name = r'\b([A-Z][A-Za-z0-9_:]*)(?:::\1)?' diff --git a/deps/v8/RISCV_OWNERS b/deps/v8/RISCV_OWNERS index f3240b500b196f..8f8e15a40a0cd3 100644 --- a/deps/v8/RISCV_OWNERS +++ b/deps/v8/RISCV_OWNERS @@ -1,3 +1,3 @@ brice.dobry@futurewei.com -lazyparser@gmail.com peng.w@rioslab.org +qiuji@iscas.ac.cn diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS index fa95f144cb73cb..c54f15ad797078 100644 --- a/deps/v8/WATCHLISTS +++ b/deps/v8/WATCHLISTS @@ -51,6 +51,9 @@ '|test/cctest/interpreter/' \ '|test/unittests/interpreter/', }, + 'baseline': { + 'filepath': 'src/baseline/' + }, 'feature_shipping_status': { 'filepath': 'src/flags/flag-definitions.h', }, @@ -91,6 +94,9 @@ 'filepath': 'src/base/ieee754\.(cc|h)' \ '|src/base/overflowing-math.h' \ '|LICENSE.fdlibm', + }, + 'regexp': { + 'filepath': 'src/.*regexp', } }, @@ -110,6 +116,10 @@ 'interpreter': [ 'rmcilroy@chromium.org', ], + 'baseline': [ + 'leszeks+watch@chromium.org', + 'verwaest+watch@chromium.org', + ], 'feature_shipping_status': [ 'hablich@chromium.org', ], @@ -142,5 +152,9 @@ 'rtoy+watch@chromium.org', 'hongchan+watch@chromium.org' ], + 'regexp': [ + 'jgruber+watch@chromium.org', + 'pthier+watch@chromium.org' + ], }, } diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h index dcbb09bb663b0c..dff2f9b2773c68 100644 --- a/deps/v8/base/trace_event/common/trace_event_common.h +++ b/deps/v8/base/trace_event/common/trace_event_common.h @@ -256,10 +256,8 @@ namespace perfetto { namespace legacy { template <> -bool BASE_EXPORT ConvertThreadId(const ::base::PlatformThreadId& thread, - uint64_t* track_uuid_out, - int32_t* pid_override_out, - int32_t* tid_override_out); +perfetto::ThreadTrack BASE_EXPORT +ConvertThreadId(const ::base::PlatformThreadId& thread); } // namespace legacy diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index 9325baf996e666..8741e86c087069 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -75,13 +75,16 @@ declare_args() { # executed as standard JavaScript instead. v8_enable_webassembly = "" + # Enable runtime call stats. + v8_enable_runtime_call_stats = true + # Add fuzzilli fuzzer support. v8_fuzzilli = false # Scan the call stack conservatively during garbage collection. v8_enable_conservative_stack_scanning = false - v8_enable_google_benchmark = checkout_google_benchmark + v8_enable_google_benchmark = false cppgc_is_standalone = false } diff --git a/deps/v8/include/cppgc/allocation.h b/deps/v8/include/cppgc/allocation.h index f4f0e72bd512ae..7a803cf2cc43b3 100644 --- a/deps/v8/include/cppgc/allocation.h +++ b/deps/v8/include/cppgc/allocation.h @@ -8,6 +8,7 @@ #include #include +#include #include "cppgc/custom-space.h" #include "cppgc/garbage-collected.h" @@ -103,6 +104,10 @@ class MakeGarbageCollectedTraitBase * \returns the memory to construct an object of type T on. */ V8_INLINE static void* Allocate(AllocationHandle& handle, size_t size) { + static_assert( + std::is_base_of::value, + "U of GarbageCollected must be a base of T. Check " + "GarbageCollected base class inheritance."); return SpacePolicy< typename internal::GCInfoFolding< T, typename T::ParentMostGarbageCollectedType>::ResultType, diff --git a/deps/v8/include/cppgc/cross-thread-persistent.h b/deps/v8/include/cppgc/cross-thread-persistent.h index 9cfcd23fdf8e3b..fe61e9acbc3815 100644 --- a/deps/v8/include/cppgc/cross-thread-persistent.h +++ b/deps/v8/include/cppgc/cross-thread-persistent.h @@ -28,19 +28,19 @@ class BasicCrossThreadPersistent final : public PersistentBase, ~BasicCrossThreadPersistent() { Clear(); } - BasicCrossThreadPersistent( // NOLINT + BasicCrossThreadPersistent( const SourceLocation& loc = SourceLocation::Current()) : LocationPolicy(loc) {} - BasicCrossThreadPersistent( // NOLINT + BasicCrossThreadPersistent( std::nullptr_t, const SourceLocation& loc = SourceLocation::Current()) : LocationPolicy(loc) {} - BasicCrossThreadPersistent( // NOLINT + BasicCrossThreadPersistent( SentinelPointer s, const SourceLocation& loc = SourceLocation::Current()) : PersistentBase(s), LocationPolicy(loc) {} - BasicCrossThreadPersistent( // NOLINT + BasicCrossThreadPersistent( T* raw, const SourceLocation& loc = SourceLocation::Current()) : PersistentBase(raw), LocationPolicy(loc) { if (!IsValid(raw)) return; @@ -58,7 +58,7 @@ class BasicCrossThreadPersistent final : public PersistentBase, friend class BasicCrossThreadPersistent; }; - BasicCrossThreadPersistent( // NOLINT + BasicCrossThreadPersistent( UnsafeCtorTag, T* raw, const SourceLocation& loc = SourceLocation::Current()) : PersistentBase(raw), LocationPolicy(loc) { @@ -68,14 +68,14 @@ class BasicCrossThreadPersistent final : public PersistentBase, this->CheckPointer(raw); } - BasicCrossThreadPersistent( // NOLINT + BasicCrossThreadPersistent( T& raw, const SourceLocation& loc = SourceLocation::Current()) : BasicCrossThreadPersistent(&raw, loc) {} template ::value>> - BasicCrossThreadPersistent( // NOLINT + BasicCrossThreadPersistent( internal::BasicMember member, @@ -94,7 +94,7 @@ class BasicCrossThreadPersistent final : public PersistentBase, template ::value>> - BasicCrossThreadPersistent( // NOLINT + BasicCrossThreadPersistent( const BasicCrossThreadPersistent& other, @@ -139,7 +139,7 @@ class BasicCrossThreadPersistent final : public PersistentBase, GetNode()->UpdateOwner(this); other.SetValue(nullptr); other.SetNode(nullptr); - this->CheckPointer(GetValue()); + this->CheckPointer(Get()); return *this; } @@ -236,7 +236,7 @@ class BasicCrossThreadPersistent final : public PersistentBase, * * \returns the object. */ - operator T*() const { return Get(); } // NOLINT + operator T*() const { return Get(); } /** * Dereferences the stored object. diff --git a/deps/v8/include/cppgc/explicit-management.h b/deps/v8/include/cppgc/explicit-management.h index 8fb321c08ca5e4..cdb6af48586e02 100644 --- a/deps/v8/include/cppgc/explicit-management.h +++ b/deps/v8/include/cppgc/explicit-management.h @@ -12,9 +12,12 @@ #include "cppgc/type-traits.h" namespace cppgc { + +class HeapHandle; + namespace internal { -V8_EXPORT void FreeUnreferencedObject(void*); +V8_EXPORT void FreeUnreferencedObject(HeapHandle&, void*); V8_EXPORT bool Resize(void*, size_t); } // namespace internal @@ -30,15 +33,19 @@ namespace subtle { * to `object` after calling `FreeUnreferencedObject()`. In case such a * reference exists, it's use results in a use-after-free. * + * To aid in using the API, `FreeUnreferencedObject()` may be called from + * destructors on objects that would be reclaimed in the same garbage collection + * cycle. + * + * \param heap_handle The corresponding heap. * \param object Reference to an object that is of type `GarbageCollected` and * should be immediately reclaimed. */ template -void FreeUnreferencedObject(T* object) { +void FreeUnreferencedObject(HeapHandle& heap_handle, T& object) { static_assert(IsGarbageCollectedTypeV, "Object must be of type GarbageCollected."); - if (!object) return; - internal::FreeUnreferencedObject(object); + internal::FreeUnreferencedObject(heap_handle, &object); } /** @@ -53,6 +60,8 @@ void FreeUnreferencedObject(T* object) { * object down, the reclaimed area is not used anymore. Any subsequent use * results in a use-after-free. * + * The `object` must be live when calling `Resize()`. + * * \param object Reference to an object that is of type `GarbageCollected` and * should be resized. * \param additional_bytes Bytes in addition to sizeof(T) that the object should diff --git a/deps/v8/include/cppgc/heap-statistics.h b/deps/v8/include/cppgc/heap-statistics.h index cf8d6633cc2751..2fe6e1ae58abbb 100644 --- a/deps/v8/include/cppgc/heap-statistics.h +++ b/deps/v8/include/cppgc/heap-statistics.h @@ -57,7 +57,7 @@ struct HeapStatistics final { }; /** - * Stastistics of the freelist (used only in non-large object spaces). For + * Statistics of the freelist (used only in non-large object spaces). For * each bucket in the freelist the statistics record the bucket size, the * number of freelist entries in the bucket, and the overall allocated memory * consumed by these freelist entries. @@ -67,7 +67,7 @@ struct HeapStatistics final { std::vector bucket_size; /** number of freelist entries per bucket. */ std::vector free_count; - /** memory size concumed by freelist entries per size. */ + /** memory size consumed by freelist entries per size. */ std::vector free_size; }; diff --git a/deps/v8/include/cppgc/internal/compiler-specific.h b/deps/v8/include/cppgc/internal/compiler-specific.h index c580894b35d0fe..595b6398cb720a 100644 --- a/deps/v8/include/cppgc/internal/compiler-specific.h +++ b/deps/v8/include/cppgc/internal/compiler-specific.h @@ -21,13 +21,13 @@ namespace cppgc { // [[no_unique_address]] comes in C++20 but supported in clang with -std >= // c++11. -#if CPPGC_HAS_CPP_ATTRIBUTE(no_unique_address) // NOLINTNEXTLINE +#if CPPGC_HAS_CPP_ATTRIBUTE(no_unique_address) #define CPPGC_NO_UNIQUE_ADDRESS [[no_unique_address]] #else #define CPPGC_NO_UNIQUE_ADDRESS #endif -#if CPPGC_HAS_ATTRIBUTE(unused) // NOLINTNEXTLINE +#if CPPGC_HAS_ATTRIBUTE(unused) #define CPPGC_UNUSED __attribute__((unused)) #else #define CPPGC_UNUSED diff --git a/deps/v8/include/cppgc/internal/pointer-policies.h b/deps/v8/include/cppgc/internal/pointer-policies.h index ceb002f02d555f..e09b86199f46b3 100644 --- a/deps/v8/include/cppgc/internal/pointer-policies.h +++ b/deps/v8/include/cppgc/internal/pointer-policies.h @@ -9,12 +9,15 @@ #include #include "cppgc/internal/write-barrier.h" +#include "cppgc/sentinel-pointer.h" #include "cppgc/source-location.h" +#include "cppgc/type-traits.h" #include "v8config.h" // NOLINT(build/include_directory) namespace cppgc { namespace internal { +class HeapBase; class PersistentRegion; class CrossThreadPersistentRegion; @@ -50,11 +53,31 @@ struct NoWriteBarrierPolicy { class V8_EXPORT EnabledCheckingPolicy { protected: - EnabledCheckingPolicy(); - void CheckPointer(const void* ptr); + template + void CheckPointer(const T* ptr) { + if (!ptr || (kSentinelPointer == ptr)) return; + + CheckPointersImplTrampoline::Call(this, ptr); + } private: - void* impl_; + void CheckPointerImpl(const void* ptr, bool points_to_payload); + + template > + struct CheckPointersImplTrampoline { + static void Call(EnabledCheckingPolicy* policy, const T* ptr) { + policy->CheckPointerImpl(ptr, false); + } + }; + + template + struct CheckPointersImplTrampoline { + static void Call(EnabledCheckingPolicy* policy, const T* ptr) { + policy->CheckPointerImpl(ptr, IsGarbageCollectedTypeV); + } + }; + + const HeapBase* heap_ = nullptr; }; class DisabledCheckingPolicy { @@ -63,9 +86,11 @@ class DisabledCheckingPolicy { }; #if V8_ENABLE_CHECKS -using DefaultCheckingPolicy = EnabledCheckingPolicy; +using DefaultMemberCheckingPolicy = EnabledCheckingPolicy; +using DefaultPersistentCheckingPolicy = EnabledCheckingPolicy; #else -using DefaultCheckingPolicy = DisabledCheckingPolicy; +using DefaultMemberCheckingPolicy = DisabledCheckingPolicy; +using DefaultPersistentCheckingPolicy = DisabledCheckingPolicy; #endif class KeepLocationPolicy { @@ -133,10 +158,10 @@ template + typename CheckingPolicy = DefaultPersistentCheckingPolicy> class BasicPersistent; template + typename CheckingPolicy = DefaultMemberCheckingPolicy> class BasicMember; } // namespace internal diff --git a/deps/v8/include/cppgc/member.h b/deps/v8/include/cppgc/member.h index 7b76bc4f7556fa..16aed060226ea3 100644 --- a/deps/v8/include/cppgc/member.h +++ b/deps/v8/include/cppgc/member.h @@ -24,8 +24,11 @@ namespace internal { // BasicMember on casting to the right type as needed. class MemberBase { protected: + struct AtomicInitializerTag {}; + MemberBase() = default; explicit MemberBase(const void* value) : raw_(value) {} + MemberBase(const void* value, AtomicInitializerTag) { SetRawAtomic(value); } const void** GetRawSlot() const { return &raw_; } const void* GetRaw() const { return raw_; } @@ -61,6 +64,20 @@ class BasicMember final : private MemberBase, private CheckingPolicy { this->CheckPointer(Get()); } BasicMember(T& raw) : BasicMember(&raw) {} // NOLINT + // Atomic ctor. Using the AtomicInitializerTag forces BasicMember to + // initialize using atomic assignments. This is required for preventing + // data races with concurrent marking. + using AtomicInitializerTag = MemberBase::AtomicInitializerTag; + BasicMember(std::nullptr_t, AtomicInitializerTag atomic) + : MemberBase(nullptr, atomic) {} + BasicMember(SentinelPointer s, AtomicInitializerTag atomic) + : MemberBase(s, atomic) {} + BasicMember(T* raw, AtomicInitializerTag atomic) : MemberBase(raw, atomic) { + InitializingWriteBarrier(); + this->CheckPointer(Get()); + } + BasicMember(T& raw, AtomicInitializerTag atomic) + : BasicMember(&raw, atomic) {} // Copy ctor. BasicMember(const BasicMember& other) : BasicMember(other.Get()) {} // Allow heterogeneous construction. @@ -79,9 +96,8 @@ class BasicMember final : private MemberBase, private CheckingPolicy { template ::value>> - BasicMember( // NOLINT - BasicMember&& other) noexcept + BasicMember(BasicMember&& other) noexcept : BasicMember(other.Get()) { other.Clear(); } @@ -90,10 +106,9 @@ class BasicMember final : private MemberBase, private CheckingPolicy { typename PersistentLocationPolicy, typename PersistentCheckingPolicy, typename = std::enable_if_t::value>> - BasicMember( // NOLINT - const BasicPersistent& - p) + BasicMember(const BasicPersistent& p) : BasicMember(p.Get()) {} // Copy assignment. @@ -161,7 +176,7 @@ class BasicMember final : private MemberBase, private CheckingPolicy { } explicit operator bool() const { return Get(); } - operator T*() const { return Get(); } // NOLINT + operator T*() const { return Get(); } T* operator->() const { return Get(); } T& operator*() const { return *Get(); } diff --git a/deps/v8/include/cppgc/persistent.h b/deps/v8/include/cppgc/persistent.h index d7aac723c0d24d..22cda7c6e8fcb7 100644 --- a/deps/v8/include/cppgc/persistent.h +++ b/deps/v8/include/cppgc/persistent.h @@ -95,7 +95,7 @@ class BasicPersistent final : public PersistentBase, template ::value>> - BasicPersistent( // NOLINT + BasicPersistent( const BasicPersistent& other, const SourceLocation& loc = SourceLocation::Current()) @@ -118,7 +118,7 @@ class BasicPersistent final : public PersistentBase, template ::value>> - BasicPersistent(internal::BasicMember member, const SourceLocation& loc = SourceLocation::Current()) @@ -181,7 +181,7 @@ class BasicPersistent final : public PersistentBase, } explicit operator bool() const { return Get(); } - operator T*() const { return Get(); } // NOLINT + operator T*() const { return Get(); } T* operator->() const { return Get(); } T& operator*() const { return *Get(); } diff --git a/deps/v8/include/cppgc/sentinel-pointer.h b/deps/v8/include/cppgc/sentinel-pointer.h index f7915834e5ac2c..b049d1a2b34632 100644 --- a/deps/v8/include/cppgc/sentinel-pointer.h +++ b/deps/v8/include/cppgc/sentinel-pointer.h @@ -14,7 +14,7 @@ namespace internal { // sentinel is defined by the embedder. struct SentinelPointer { template - operator T*() const { // NOLINT + operator T*() const { static constexpr intptr_t kSentinelValue = 1; return reinterpret_cast(kSentinelValue); } diff --git a/deps/v8/include/cppgc/type-traits.h b/deps/v8/include/cppgc/type-traits.h index 2b50a2164b2267..56cd55d61e2a5a 100644 --- a/deps/v8/include/cppgc/type-traits.h +++ b/deps/v8/include/cppgc/type-traits.h @@ -7,6 +7,7 @@ // This file should stay with minimal dependencies to allow embedder to check // against Oilpan types without including any other parts. +#include #include namespace cppgc { @@ -164,6 +165,18 @@ struct IsUntracedMemberType : std::false_type {}; template struct IsUntracedMemberType : std::true_type {}; +template +struct IsComplete { + private: + template + static std::true_type IsSizeOfKnown(U*); + static std::false_type IsSizeOfKnown(...); + + public: + static constexpr bool value = + decltype(IsSizeOfKnown(std::declval()))::value; +}; + } // namespace internal /** @@ -223,6 +236,12 @@ constexpr bool IsWeakMemberTypeV = internal::IsWeakMemberType::value; template constexpr bool IsWeakV = internal::IsWeak::value; +/** + * Value is true for types that are complete, and false otherwise. + */ +template +constexpr bool IsCompleteV = internal::IsComplete::value; + } // namespace cppgc #endif // INCLUDE_CPPGC_TYPE_TRAITS_H_ diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl index 666952f27b97f6..9c0483ae7059c2 100644 --- a/deps/v8/include/js_protocol.pdl +++ b/deps/v8/include/js_protocol.pdl @@ -267,7 +267,7 @@ domain Debugger BreakpointId breakpointId # Restarts particular call frame from the beginning. - command restartFrame + deprecated command restartFrame parameters # Call frame identifier to evaluate on. CallFrameId callFrameId @@ -707,6 +707,8 @@ experimental domain HeapProfiler # when the tracking is stopped. optional boolean reportProgress optional boolean treatGlobalObjectsAsRoots + # If true, numerical values are included in the snapshot + optional boolean captureNumericValue command takeHeapSnapshot parameters @@ -714,6 +716,8 @@ experimental domain HeapProfiler optional boolean reportProgress # If true, a raw snapshot without artifical roots will be generated optional boolean treatGlobalObjectsAsRoots + # If true, numerical values are included in the snapshot + optional boolean captureNumericValue event addHeapSnapshotChunk parameters @@ -1563,7 +1567,10 @@ domain Runtime # execution context. If omitted and `executionContextName` is not set, # the binding is exposed to all execution contexts of the target. # This parameter is mutually exclusive with `executionContextName`. - optional ExecutionContextId executionContextId + # Deprecated in favor of `executionContextName` due to an unclear use case + # and bugs in implementation (crbug.com/1169639). `executionContextId` will be + # removed in the future. + deprecated optional ExecutionContextId executionContextId # If specified, the binding is exposed to the executionContext with # matching name, even for contexts created after the binding is added. # See also `ExecutionContext.name` and `worldName` parameter to diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h index fba35f71c9ae07..745fb04347ee4c 100644 --- a/deps/v8/include/v8-cppgc.h +++ b/deps/v8/include/v8-cppgc.h @@ -28,6 +28,8 @@ namespace internal { class CppHeap; } // namespace internal +class CustomSpaceStatisticsReceiver; + /** * Describes how V8 wrapper objects maintain references to garbage-collected C++ * objects. @@ -119,6 +121,16 @@ class V8_EXPORT CppHeap { cppgc::HeapStatistics CollectStatistics( cppgc::HeapStatistics::DetailLevel detail_level); + /** + * Collects statistics for the given spaces and reports them to the receiver. + * + * \param custom_spaces a collection of custom space indicies. + * \param receiver an object that gets the results. + */ + void CollectCustomSpaceStatisticsAtLastGC( + std::vector custom_spaces, + std::unique_ptr receiver); + /** * Enables a detached mode that allows testing garbage collection using * `cppgc::testing` APIs. Once used, the heap cannot be attached to an @@ -277,6 +289,26 @@ class V8_EXPORT JSHeapConsistency final { const TracedReferenceBase& ref); }; +/** + * Provided as input to `CppHeap::CollectCustomSpaceStatisticsAtLastGC()`. + * + * Its method is invoked with the results of the statistic collection. + */ +class CustomSpaceStatisticsReceiver { + public: + virtual ~CustomSpaceStatisticsReceiver() = default; + /** + * Reports the size of a space at the last GC. It is called for each space + * that was requested in `CollectCustomSpaceStatisticsAtLastGC()`. + * + * \param space_index The index of the space. + * \param bytes The total size of live objects in the space at the last GC. + * It is zero if there was no GC yet. + */ + virtual void AllocatedBytes(cppgc::CustomSpaceIndex space_index, + size_t bytes) = 0; +}; + } // namespace v8 namespace cppgc { diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h index f8b5acb093456e..cda1959c866548 100644 --- a/deps/v8/include/v8-fast-api-calls.h +++ b/deps/v8/include/v8-fast-api-calls.h @@ -70,8 +70,7 @@ * return GetInternalField(wrapper); * } - * static void FastMethod(v8::ApiObject receiver_obj, int param) { - * v8::Object* v8_object = reinterpret_cast(&api_object); + * static void FastMethod(v8::Local receiver_obj, int param) { * CustomEmbedderType* receiver = static_cast( * receiver_obj->GetAlignedPointerFromInternalField( * kV8EmbedderWrapperObjectIndex)); @@ -190,10 +189,13 @@ #include #include +#include "v8.h" // NOLINT(build/include_directory) #include "v8config.h" // NOLINT(build/include_directory) namespace v8 { +class Isolate; + class CTypeInfo { public: enum class Type : uint8_t { @@ -206,6 +208,8 @@ class CTypeInfo { kFloat32, kFloat64, kV8Value, + kApiObject, // This will be deprecated once all users have + // migrated from v8::ApiObject to v8::Local. }; // kCallbackOptionsType is not part of the Type enum @@ -321,6 +325,14 @@ struct ApiObject { * \endcode */ struct FastApiCallbackOptions { + /** + * Creates a new instance of FastApiCallbackOptions for testing purpose. The + * returned instance may be filled with mock data. + */ + static FastApiCallbackOptions CreateForTesting(Isolate* isolate) { + return {false, {0}}; + } + /** * If the callback wants to signal an error condition or to perform an * allocation, it must set options.fallback to true and do an early return @@ -336,8 +348,12 @@ struct FastApiCallbackOptions { /** * The `data` passed to the FunctionTemplate constructor, or `undefined`. + * `data_ptr` allows for default constructing FastApiCallbackOptions. */ - const ApiObject data; + union { + uintptr_t data_ptr; + v8::Value data; + }; }; namespace internal { @@ -398,16 +414,22 @@ struct TypeInfoHelper { static constexpr CTypeInfo::Type Type() { return CTypeInfo::Type::Enum; } \ }; -#define BASIC_C_TYPES(V) \ - V(void, kVoid) \ - V(bool, kBool) \ - V(int32_t, kInt32) \ - V(uint32_t, kUint32) \ - V(int64_t, kInt64) \ - V(uint64_t, kUint64) \ - V(float, kFloat32) \ - V(double, kFloat64) \ - V(ApiObject, kV8Value) +#define BASIC_C_TYPES(V) \ + V(void, kVoid) \ + V(bool, kBool) \ + V(int32_t, kInt32) \ + V(uint32_t, kUint32) \ + V(int64_t, kInt64) \ + V(uint64_t, kUint64) \ + V(float, kFloat32) \ + V(double, kFloat64) \ + V(ApiObject, kApiObject) \ + V(v8::Local, kV8Value) \ + V(v8::Local, kV8Value) + +// ApiObject was a temporary solution to wrap the pointer to the v8::Value. +// Please use v8::Local in new code for the arguments and +// v8::Local for the receiver, as ApiObject will be deprecated. BASIC_C_TYPES(SPECIALIZE_GET_TYPE_INFO_HELPER_FOR) diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h index a55518e45930e1..852b39d7252295 100644 --- a/deps/v8/include/v8-inspector.h +++ b/deps/v8/include/v8-inspector.h @@ -105,8 +105,9 @@ class V8_EXPORT V8StackTrace { virtual StringView topSourceURL() const = 0; virtual int topLineNumber() const = 0; virtual int topColumnNumber() const = 0; - virtual StringView topScriptId() const = 0; - virtual int topScriptIdAsInteger() const = 0; + virtual int topScriptId() const = 0; + V8_DEPRECATE_SOON("Use V8::StackTrace::topScriptId() instead.") + int topScriptIdAsInteger() const { return topScriptId(); } virtual StringView topFunctionName() const = 0; virtual ~V8StackTrace() = default; @@ -130,6 +131,10 @@ class V8_EXPORT V8InspectorSession { virtual v8::Local get(v8::Local) = 0; virtual ~Inspectable() = default; }; + class V8_EXPORT CommandLineAPIScope { + public: + virtual ~CommandLineAPIScope() = default; + }; virtual void addInspectedObject(std::unique_ptr) = 0; // Dispatching protocol messages. @@ -139,6 +144,9 @@ class V8_EXPORT V8InspectorSession { virtual std::vector> supportedDomains() = 0; + virtual std::unique_ptr + initializeCommandLineAPIScope(int executionContextId) = 0; + // Debugger actions. virtual void schedulePauseOnNextStatement(StringView breakReason, StringView breakDetails) = 0; diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index eb18f76504d6fa..5f0177182b598f 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -40,6 +40,13 @@ const int kWeakHeapObjectTag = 3; const int kHeapObjectTagSize = 2; const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1; +// Tag information for fowarding pointers stored in object headers. +// 0b00 at the lowest 2 bits in the header indicates that the map word is a +// forwarding pointer. +const int kForwardingTag = 0; +const int kForwardingTagSize = 2; +const intptr_t kForwardingTagMask = (1 << kForwardingTagSize) - 1; + // Tag information for Smi. const int kSmiTag = 0; const int kSmiTagSize = 1; @@ -120,23 +127,28 @@ constexpr bool HeapSandboxIsEnabled() { using ExternalPointer_t = Address; -// If the heap sandbox is enabled, these tag values will be XORed with the +// If the heap sandbox is enabled, these tag values will be ORed with the // external pointers in the external pointer table to prevent use of pointers of -// the wrong type. -enum ExternalPointerTag : Address { - kExternalPointerNullTag = static_cast
(0ULL), - kArrayBufferBackingStoreTag = static_cast
(1ULL << 48), - kTypedArrayExternalPointerTag = static_cast
(2ULL << 48), - kDataViewDataPointerTag = static_cast
(3ULL << 48), - kExternalStringResourceTag = static_cast
(4ULL << 48), - kExternalStringResourceDataTag = static_cast
(5ULL << 48), - kForeignForeignAddressTag = static_cast
(6ULL << 48), - kNativeContextMicrotaskQueueTag = static_cast
(7ULL << 48), - // TODO(v8:10391, saelo): Currently has to be zero so that raw zero values are - // also nullptr - kEmbedderDataSlotPayloadTag = static_cast
(0ULL << 48), +// the wrong type. When a pointer is loaded, it is ANDed with the inverse of the +// expected type's tag. The tags are constructed in a way that guarantees that a +// failed type check will result in one or more of the top bits of the pointer +// to be set, rendering the pointer inacessible. This construction allows +// performing the type check and removing GC marking bits from the pointer at +// the same time. +enum ExternalPointerTag : uint64_t { + kExternalPointerNullTag = 0x0000000000000000, + kArrayBufferBackingStoreTag = 0x00ff000000000000, // 0b000000011111111 + kTypedArrayExternalPointerTag = 0x017f000000000000, // 0b000000101111111 + kDataViewDataPointerTag = 0x01bf000000000000, // 0b000000110111111 + kExternalStringResourceTag = 0x01df000000000000, // 0b000000111011111 + kExternalStringResourceDataTag = 0x01ef000000000000, // 0b000000111101111 + kForeignForeignAddressTag = 0x01f7000000000000, // 0b000000111110111 + kNativeContextMicrotaskQueueTag = 0x01fb000000000000, // 0b000000111111011 + kEmbedderDataSlotPayloadTag = 0x01fd000000000000, // 0b000000111111101 }; +constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000; + #ifdef V8_31BIT_SMIS_ON_64BIT_ARCH using PlatformSmiTagging = SmiTagging; #else @@ -177,6 +189,14 @@ V8_EXPORT bool ShouldThrowOnError(v8::internal::Isolate* isolate); * depend on functions and constants defined here. */ class Internals { +#ifdef V8_MAP_PACKING + V8_INLINE static constexpr internal::Address UnpackMapWord( + internal::Address mapword) { + // TODO(wenyuzhao): Clear header metadata. + return mapword ^ kMapWordXorMask; + } +#endif + public: // These values match non-compiler-dependent values defined within // the implementation of v8. @@ -253,6 +273,17 @@ class Internals { // incremental GC once the external memory reaches this limit. static constexpr int kExternalAllocationSoftLimit = 64 * 1024 * 1024; +#ifdef V8_MAP_PACKING + static const uintptr_t kMapWordMetadataMask = 0xffffULL << 48; + // The lowest two bits of mapwords are always `0b10` + static const uintptr_t kMapWordSignature = 0b10; + // XORing a (non-compressed) map with this mask ensures that the two + // low-order bits are 0b10. The 0 at the end makes this look like a Smi, + // although real Smis have all lower 32 bits unset. We only rely on these + // values passing as Smis in very few places. + static const int kMapWordXorMask = 0b11; +#endif + V8_EXPORT static void CheckInitializedImpl(v8::Isolate* isolate); V8_INLINE static void CheckInitialized(v8::Isolate* isolate) { #ifdef V8_ENABLE_CHECKS @@ -279,6 +310,9 @@ class Internals { V8_INLINE static int GetInstanceType(const internal::Address obj) { typedef internal::Address A; A map = ReadTaggedPointerField(obj, kHeapObjectMapOffset); +#ifdef V8_MAP_PACKING + map = UnpackMapWord(map); +#endif return ReadRawField(map, kMapInstanceTypeOffset); } diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h index 85d3f8a4821ab1..49ada0a7bf68e5 100644 --- a/deps/v8/include/v8-profiler.h +++ b/deps/v8/include/v8-profiler.h @@ -492,7 +492,7 @@ class V8_EXPORT HeapGraphNode { /** * An interface for exporting data from V8, using "push" model. */ -class V8_EXPORT OutputStream { // NOLINT +class V8_EXPORT OutputStream { public: enum WriteResult { kContinue = 0, @@ -519,7 +519,6 @@ class V8_EXPORT OutputStream { // NOLINT } }; - /** * HeapSnapshots record the state of the JS heap at some moment. */ @@ -586,7 +585,7 @@ class V8_EXPORT HeapSnapshot { * An interface for reporting progress and controlling long-running * activities. */ -class V8_EXPORT ActivityControl { // NOLINT +class V8_EXPORT ActivityControl { public: enum ControlOption { kContinue = 0, @@ -600,7 +599,6 @@ class V8_EXPORT ActivityControl { // NOLINT virtual ControlOption ReportProgressValue(int done, int total) = 0; }; - /** * AllocationProfile is a sampled profile of allocations done by the program. * This is structured as a call-graph. @@ -902,6 +900,15 @@ class V8_EXPORT HeapProfiler { ObjectNameResolver* global_object_name_resolver = nullptr, bool treat_global_objects_as_roots = true); + /** + * Takes a heap snapshot and returns it. + */ + const HeapSnapshot* TakeHeapSnapshotV8_92( + ActivityControl* control = nullptr, + ObjectNameResolver* global_object_name_resolver = nullptr, + bool treat_global_objects_as_roots = true, + bool capture_numeric_value = false); + /** * Starts tracking of heap objects population statistics. After calling * this method, all heap objects relocations done by the garbage collector diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h index 89ec4f6a789c03..8e4d66153d1f22 100644 --- a/deps/v8/include/v8-util.h +++ b/deps/v8/include/v8-util.h @@ -43,7 +43,7 @@ class StdMapTraits { static bool Empty(Impl* impl) { return impl->empty(); } static size_t Size(Impl* impl) { return impl->size(); } - static void Swap(Impl& a, Impl& b) { std::swap(a, b); } // NOLINT + static void Swap(Impl& a, Impl& b) { std::swap(a, b); } static Iterator Begin(Impl* impl) { return impl->begin(); } static Iterator End(Impl* impl) { return impl->end(); } static K Key(Iterator it) { return it->first; } diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index 4bdb66b2bf6847..1cf4d7e28448ce 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 9 -#define V8_MINOR_VERSION 1 -#define V8_BUILD_NUMBER 269 -#define V8_PATCH_LEVEL 38 +#define V8_MINOR_VERSION 2 +#define V8_BUILD_NUMBER 230 +#define V8_PATCH_LEVEL 21 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 840dd2c2258b4f..cbdca19367d766 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -596,7 +596,7 @@ template class PersistentBase { */ V8_INLINE uint16_t WrapperClassId() const; - PersistentBase(const PersistentBase& other) = delete; // NOLINT + PersistentBase(const PersistentBase& other) = delete; void operator=(const PersistentBase&) = delete; private: @@ -708,7 +708,7 @@ template class Persistent : public PersistentBase { return *this; } template - V8_INLINE Persistent& operator=(const Persistent& that) { // NOLINT + V8_INLINE Persistent& operator=(const Persistent& that) { Copy(that); return *this; } @@ -723,7 +723,7 @@ template class Persistent : public PersistentBase { // TODO(dcarney): this is pretty useless, fix or remove template - V8_INLINE static Persistent& Cast(const Persistent& that) { // NOLINT + V8_INLINE static Persistent& Cast(const Persistent& that) { #ifdef V8_ENABLE_CHECKS // If we're going to perform the type check then we have to check // that the handle isn't empty before doing the checked cast. @@ -734,7 +734,7 @@ template class Persistent : public PersistentBase { // TODO(dcarney): this is pretty useless, fix or remove template - V8_INLINE Persistent& As() const { // NOLINT + V8_INLINE Persistent& As() const { return Persistent::Cast(*this); } @@ -803,7 +803,7 @@ class Global : public PersistentBase { /** * Pass allows returning uniques from functions, etc. */ - Global Pass() { return static_cast(*this); } // NOLINT + Global Pass() { return static_cast(*this); } /* * For compatibility with Chromium's base::Bind (base::Passed). @@ -905,8 +905,8 @@ class TracedReferenceBase { * The exact semantics are: * - Tracing garbage collections use |v8::EmbedderHeapTracer| or cppgc. * - Non-tracing garbage collections refer to - * |v8::EmbedderHeapTracer::IsRootForNonTracingGC()| whether the handle should - * be treated as root or not. + * |v8::EmbedderRootsHandler::IsRoot()| whether the handle should + * be treated as root or not. * * Note that the base class cannot be instantiated itself. Choose from * - TracedGlobal @@ -1678,10 +1678,12 @@ class V8_EXPORT Module : public Data { /** * Evaluates the module and its dependencies. * - * If status is kInstantiated, run the module's code. On success, set status - * to kEvaluated and return the completion value; on failure, set status to - * kErrored and propagate the thrown exception (which is then also available - * via |GetException|). + * If status is kInstantiated, run the module's code and return a Promise + * object. On success, set status to kEvaluated and resolve the Promise with + * the completion value; on failure, set status to kErrored and reject the + * Promise with the error. + * + * If IsGraphAsync() is false, the returned Promise is settled. */ V8_WARN_UNUSED_RESULT MaybeLocal Evaluate(Local context); @@ -3282,7 +3284,7 @@ class V8_EXPORT String : public Name { */ bool IsExternalOneByte() const; - class V8_EXPORT ExternalStringResourceBase { // NOLINT + class V8_EXPORT ExternalStringResourceBase { public: virtual ~ExternalStringResourceBase() = default; @@ -3633,6 +3635,7 @@ class V8_EXPORT Symbol : public Name { * Returns the description string of the symbol, or undefined if none. */ Local Description() const; + Local Description(Isolate* isolate) const; V8_DEPRECATED("Use Symbol::Description()") Local Name() const { return Description(); } @@ -3986,8 +3989,7 @@ class V8_EXPORT Object : public Value { // // Returns true on success. V8_WARN_UNUSED_RESULT Maybe DefineProperty( - Local context, Local key, - PropertyDescriptor& descriptor); // NOLINT(runtime/references) + Local context, Local key, PropertyDescriptor& descriptor); V8_WARN_UNUSED_RESULT MaybeLocal Get(Local context, Local key); @@ -5364,7 +5366,7 @@ class V8_EXPORT ArrayBuffer : public Object { * Note that it is unsafe to call back into V8 from any of the allocator * functions. */ - class V8_EXPORT Allocator { // NOLINT + class V8_EXPORT Allocator { public: virtual ~Allocator() = default; @@ -6756,6 +6758,15 @@ class V8_EXPORT FunctionTemplate : public Template { SideEffectType side_effect_type = SideEffectType::kHasSideEffect, const CFunction* c_function = nullptr); + /** Creates a function template for multiple overloaded fast API calls.*/ + static Local NewWithCFunctionOverloads( + Isolate* isolate, FunctionCallback callback = nullptr, + Local data = Local(), + Local signature = Local(), int length = 0, + ConstructorBehavior behavior = ConstructorBehavior::kAllow, + SideEffectType side_effect_type = SideEffectType::kHasSideEffect, + const MemorySpan& c_function_overloads = {}); + /** * Creates a function template backed/cached by a private property. */ @@ -6787,7 +6798,7 @@ class V8_EXPORT FunctionTemplate : public Template { void SetCallHandler( FunctionCallback callback, Local data = Local(), SideEffectType side_effect_type = SideEffectType::kHasSideEffect, - const CFunction* c_function = nullptr); + const MemorySpan& c_function_overloads = {}); /** Set the predefined length property for the FunctionTemplate. */ void SetLength(int length); @@ -6848,6 +6859,15 @@ class V8_EXPORT FunctionTemplate : public Template { */ bool HasInstance(Local object); + /** + * Returns true if the given value is an API object that was constructed by an + * instance of this function template (without checking for inheriting + * function templates). + * + * This is an experimental feature and may still change significantly. + */ + bool IsLeafTemplateForApiObject(v8::Local value) const; + V8_INLINE static FunctionTemplate* Cast(Data* data); private: @@ -7281,7 +7301,7 @@ class V8_EXPORT AccessorSignature : public Data { /** * Ignore */ -class V8_EXPORT Extension { // NOLINT +class V8_EXPORT Extension { public: // Note that the strings passed into this constructor must live as long // as the Extension itself. @@ -7383,6 +7403,11 @@ class V8_EXPORT ResourceConstraints { /** * The amount of virtual memory reserved for generated code. This is relevant * for 64-bit architectures that rely on code range for calls in code. + * + * When V8_COMPRESS_POINTERS_IN_SHARED_CAGE is defined, there is a shared + * process-wide code range that is lazily initialized. This value is used to + * configure that shared code range when the first Isolate is + * created. Subsequent Isolates ignore this value. */ size_t code_range_size_in_bytes() const { return code_range_size_; } void set_code_range_size_in_bytes(size_t limit) { code_range_size_ = limit; } @@ -7832,6 +7857,10 @@ using WasmSimdEnabledCallback = bool (*)(Local context); // --- Callback for checking if WebAssembly exceptions are enabled --- using WasmExceptionsEnabledCallback = bool (*)(Local context); +// --- Callback for checking if the SharedArrayBuffer constructor is enabled --- +using SharedArrayBufferConstructorEnabledCallback = + bool (*)(Local context); + // --- Garbage Collection Callbacks --- /** @@ -8153,17 +8182,16 @@ using UnhandledExceptionCallback = /** * Interface for iterating through all external resources in the heap. */ -class V8_EXPORT ExternalResourceVisitor { // NOLINT +class V8_EXPORT ExternalResourceVisitor { public: virtual ~ExternalResourceVisitor() = default; virtual void VisitExternalString(Local string) {} }; - /** * Interface for iterating through all the persistent handles in the heap. */ -class V8_EXPORT PersistentHandleVisitor { // NOLINT +class V8_EXPORT PersistentHandleVisitor { public: virtual ~PersistentHandleVisitor() = default; virtual void VisitPersistentHandle(Persistent* value, @@ -8180,6 +8208,45 @@ class V8_EXPORT PersistentHandleVisitor { // NOLINT */ enum class MemoryPressureLevel { kNone, kModerate, kCritical }; +/** + * Handler for embedder roots on non-unified heap garbage collections. + */ +class V8_EXPORT EmbedderRootsHandler { + public: + virtual ~EmbedderRootsHandler() = default; + + /** + * Returns true if the TracedGlobal handle should be considered as root for + * the currently running non-tracing garbage collection and false otherwise. + * The default implementation will keep all TracedGlobal references as roots. + * + * If this returns false, then V8 may decide that the object referred to by + * such a handle is reclaimed. In that case: + * - No action is required if handles are used with destructors, i.e., by just + * using |TracedGlobal|. + * - When run without destructors, i.e., by using |TracedReference|, V8 calls + * |ResetRoot|. + * + * Note that the |handle| is different from the handle that the embedder holds + * for retaining the object. The embedder may use |WrapperClassId()| to + * distinguish cases where it wants handles to be treated as roots from not + * being treated as roots. + */ + virtual bool IsRoot(const v8::TracedReference& handle) = 0; + virtual bool IsRoot(const v8::TracedGlobal& handle) = 0; + + /** + * Used in combination with |IsRoot|. Called by V8 when an + * object that is backed by a handle is reclaimed by a non-tracing garbage + * collection. It is up to the embedder to reset the original handle. + * + * Note that the |handle| is different from the handle that the embedder holds + * for retaining the object. It is up to the embedder to find the original + * handle via the object or class id. + */ + virtual void ResetRoot(const v8::TracedReference& handle) = 0; +}; + /** * Interface for tracing through the embedder heap. During a V8 garbage * collection, V8 collects hidden fields of all potential wrappers, and at the @@ -8242,6 +8309,9 @@ class V8_EXPORT EmbedderHeapTracer { /** * Called by the embedder to notify V8 of an empty execution stack. */ + V8_DEPRECATE_SOON( + "This call only optimized internal caches which V8 is able to figure out " + "on its own now.") void NotifyEmptyEmbedderStack(); /** @@ -8305,34 +8375,14 @@ class V8_EXPORT EmbedderHeapTracer { void FinalizeTracing(); /** - * Returns true if the TracedGlobal handle should be considered as root for - * the currently running non-tracing garbage collection and false otherwise. - * The default implementation will keep all TracedGlobal references as roots. - * - * If this returns false, then V8 may decide that the object referred to by - * such a handle is reclaimed. In that case: - * - No action is required if handles are used with destructors, i.e., by just - * using |TracedGlobal|. - * - When run without destructors, i.e., by using - * |TracedReference|, V8 calls |ResetHandleInNonTracingGC|. - * - * Note that the |handle| is different from the handle that the embedder holds - * for retaining the object. The embedder may use |WrapperClassId()| to - * distinguish cases where it wants handles to be treated as roots from not - * being treated as roots. + * See documentation on EmbedderRootsHandler. */ virtual bool IsRootForNonTracingGC( const v8::TracedReference& handle); virtual bool IsRootForNonTracingGC(const v8::TracedGlobal& handle); /** - * Used in combination with |IsRootForNonTracingGC|. Called by V8 when an - * object that is backed by a handle is reclaimed by a non-tracing garbage - * collection. It is up to the embedder to reset the original handle. - * - * Note that the |handle| is different from the handle that the embedder holds - * for retaining the object. It is up to the embedder to find the original - * handle via the object or class id. + * See documentation on EmbedderRootsHandler. */ virtual void ResetHandleInNonTracingGC( const v8::TracedReference& handle); @@ -8791,6 +8841,7 @@ class V8_EXPORT Isolate { kWasmBulkMemory = 109, // Unused. kWasmMultiValue = 110, kWasmExceptionHandling = 111, + kInvalidatedMegaDOMProtector = 112, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to @@ -9126,6 +9177,17 @@ class V8_EXPORT Isolate { */ Local GetIncumbentContext(); + /** + * Schedules a v8::Exception::Error with the given message. + * See ThrowException for more details. Templatized to provide compile-time + * errors in case of too long strings (see v8::String::NewFromUtf8Literal). + */ + template + Local ThrowError(const char (&message)[N]) { + return ThrowError(String::NewFromUtf8Literal(this, message)); + } + Local ThrowError(Local message); + /** * Schedules an exception to be thrown when returning to JavaScript. When an * exception has been scheduled it is illegal to invoke any JavaScript @@ -9170,6 +9232,18 @@ class V8_EXPORT Isolate { */ EmbedderHeapTracer* GetEmbedderHeapTracer(); + /** + * Sets an embedder roots handle that V8 should consider when performing + * non-unified heap garbage collections. + * + * Using only EmbedderHeapTracer automatically sets up a default handler. + * The intended use case is for setting a custom handler after invoking + * `AttachCppHeap()`. + * + * V8 does not take ownership of the handler. + */ + void SetEmbedderRootsHandler(EmbedderRootsHandler* handler); + /** * Attaches a managed C++ heap as an extension to the JavaScript heap. The * embedder maintains ownership of the CppHeap. At most one C++ heap can be @@ -9739,6 +9813,9 @@ class V8_EXPORT Isolate { void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback); + void SetSharedArrayBufferConstructorEnabledCallback( + SharedArrayBufferConstructorEnabledCallback callback); + /** * This function can be called by the embedder to signal V8 that the dynamic * enabling of features has finished. V8 can now set up dynamically added @@ -10113,6 +10190,9 @@ class V8_EXPORT V8 { * Notifies V8 that the process is cross-origin-isolated, which enables * defining the SharedArrayBuffer function on the global object of Contexts. */ + V8_DEPRECATED( + "Use the command line argument --enable-sharedarraybuffer-per-context " + "together with SetSharedArrayBufferConstructorEnabledCallback") static void SetIsCrossOriginIsolated(); private: diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index acd34d7a1f255d..c1bb691f8789e5 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -310,10 +310,6 @@ path. Add it with -I to the command line // GCC doc: https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html # define V8_HAS_COMPUTED_GOTO 1 -// Whether constexpr has full C++14 semantics, in particular that non-constexpr -// code is allowed as long as it's not executed for any constexpr instantiation. -# define V8_HAS_CXX14_CONSTEXPR 1 - #elif defined(__GNUC__) # define V8_CC_GNU 1 @@ -336,7 +332,10 @@ path. Add it with -I to the command line # define V8_HAS_ATTRIBUTE_UNUSED 1 # define V8_HAS_ATTRIBUTE_VISIBILITY 1 # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT (!V8_CC_INTEL) -# define V8_HAS_CPP_ATTRIBUTE_NODISCARD (V8_HAS_CPP_ATTRIBUTE(nodiscard)) + +// [[nodiscard]] does not work together with with +// __attribute__((visibility(""))) on GCC 7.4 which is why there is no define +// for V8_HAS_CPP_ATTRIBUTE_NODISCARD. See https://crbug.com/v8/11707. # define V8_HAS_BUILTIN_ASSUME_ALIGNED 1 # define V8_HAS_BUILTIN_CLZ 1 @@ -348,11 +347,6 @@ path. Add it with -I to the command line // GCC doc: https://gcc.gnu.org/onlinedocs/gcc/Labels-as-Values.html #define V8_HAS_COMPUTED_GOTO 1 -// Whether constexpr has full C++14 semantics, in particular that non-constexpr -// code is allowed as long as it's not executed for any constexpr instantiation. -// GCC only supports this since version 6. -# define V8_HAS_CXX14_CONSTEXPR (V8_GNUC_PREREQ(6, 0, 0)) - #endif #if defined(_MSC_VER) diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index c87192896cec33..9c0c933cda7d12 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -65,6 +65,7 @@ 'V8 Linux64 - debug builder': 'debug_x64', 'V8 Linux64 - dict tracking - debug - builder': 'debug_x64_dict_tracking_trybot', 'V8 Linux64 - custom snapshot - debug builder': 'debug_x64_custom', + 'V8 Linux64 - heap sandbox - debug - builder': 'debug_x64_heap_sandbox', 'V8 Linux64 - internal snapshot': 'release_x64_internal', 'V8 Linux64 - debug - header includes': 'debug_x64_header_includes', 'V8 Linux64 - shared': 'release_x64_shared_verify_heap', @@ -101,6 +102,7 @@ # FYI. 'V8 iOS - sim': 'release_x64_ios_simulator', 'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto', + 'V8 Linux64 - debug - single generation - builder': 'debug_x64_single_generation', 'V8 Linux64 - pointer compression': 'release_x64_pointer_compression', 'V8 Linux64 - pointer compression without dchecks': 'release_x64_pointer_compression_without_dchecks', @@ -136,9 +138,13 @@ 'V8 Clusterfuzz Linux64 - debug builder': 'debug_x64', 'V8 Clusterfuzz Linux64 ASAN no inline - release builder': 'release_x64_asan_symbolized_verify_heap', + 'V8 Clusterfuzz Linux ASAN no inline - release builder': + 'release_x86_asan_symbolized_verify_heap', 'V8 Clusterfuzz Linux64 ASAN - debug builder': 'debug_x64_asan', + 'V8 Clusterfuzz Linux ASAN - debug builder': 'debug_x86_asan', 'V8 Clusterfuzz Linux64 ASAN arm64 - debug builder': 'debug_simulate_arm64_asan', + 'V8 Clusterfuzz Linux - debug builder': 'debug_x86', 'V8 Clusterfuzz Linux ASAN arm - debug builder': 'debug_simulate_arm_asan', 'V8 Clusterfuzz Linux64 CFI - release builder': @@ -202,19 +208,23 @@ 'v8_linux_gcc_compile_rel': 'release_x86_gcc_minimal_symbols', 'v8_linux_gcc_rel_ng': 'release_x86_gcc_minimal_symbols', 'v8_linux_shared_compile_rel': 'release_x86_shared_verify_heap', + 'v8_linux_vtunejit': 'debug_x86_vtunejit', 'v8_linux64_arm64_pointer_compression_rel_ng': 'release_simulate_arm64_pointer_compression', 'v8_linux64_dbg_ng': 'debug_x64_trybot', 'v8_linux64_dict_tracking_dbg_ng': 'debug_x64_dict_tracking_trybot', 'v8_linux64_gc_stress_custom_snapshot_dbg_ng': 'debug_x64_trybot_custom', 'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc', + 'v8_linux64_gcov_coverage': 'release_x64_gcc_coverage', 'v8_linux64_header_includes_dbg': 'debug_x64_header_includes', + 'v8_linux64_heap_sandbox_dbg_ng': 'debug_x64_heap_sandbox', 'v8_linux64_fyi_rel_ng': 'release_x64_test_features_trybot', 'v8_linux64_nodcheck_rel_ng': 'release_x64', 'v8_linux64_perfetto_dbg_ng': 'debug_x64_perfetto', 'v8_linux64_pointer_compression_rel_ng': 'release_x64_pointer_compression', 'v8_linux64_rel_ng': 'release_x64_test_features_trybot', 'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap', + 'v8_linux64_single_generation_dbg_ng': 'debug_x64_single_generation', 'v8_linux64_no_wasm_compile_rel': 'release_x64_webassembly_disabled', 'v8_linux64_verify_csa_rel_ng': 'release_x64_verify_csa', 'v8_linux64_asan_rel_ng': 'release_x64_asan_minimal_symbols', @@ -464,8 +474,9 @@ 'release_x64_cfi_clusterfuzz': [ 'release_bot', 'x64', 'cfi_clusterfuzz'], 'release_x64_fuzzilli': [ - 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_verify_heap', - 'v8_verify_csa', 'v8_enable_verify_predictable', 'fuzzilli'], + 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks', + 'v8_verify_heap', 'v8_verify_csa', 'v8_enable_verify_predictable', + 'fuzzilli'], 'release_x64_msvc': [ 'release_bot_no_goma', 'x64', 'minimal_symbols', 'msvc'], 'release_x64_correctness_fuzzer' : [ @@ -475,8 +486,8 @@ 'release_x64_fuchsia_trybot': [ 'release_trybot', 'x64', 'fuchsia'], 'release_x64_gcc_coverage': [ - 'release_bot_no_goma', 'x64', 'coverage', 'gcc', 'no_custom_libcxx', - 'no_sysroot'], + 'release_bot_no_goma', 'x64', 'coverage', 'gcc', 'lld', + 'no_custom_libcxx', 'no_sysroot'], 'release_x64_ios_simulator': [ 'release_bot', 'x64', 'ios_simulator'], 'release_x64_internal': [ @@ -529,13 +540,17 @@ 'debug_x64_fuchsia': [ 'debug_bot', 'x64', 'fuchsia'], 'debug_x64_gcc': [ - 'debug_bot_no_goma', 'x64', 'gcc', 'v8_check_header_includes'], + 'debug_bot_no_goma', 'x64', 'gcc', 'lld', 'v8_check_header_includes'], 'debug_x64_header_includes': [ 'debug_bot', 'x64', 'v8_check_header_includes'], + 'debug_x64_heap_sandbox': [ + 'debug_bot', 'x64', 'v8_enable_heap_sandbox'], 'debug_x64_minimal_symbols': [ 'debug_bot', 'x64', 'minimal_symbols'], 'debug_x64_perfetto': [ 'debug_bot', 'x64', 'perfetto'], + 'debug_x64_single_generation': [ + 'debug_bot', 'x64', 'v8_enable_single_generation'], 'debug_x64_trybot': [ 'debug_trybot', 'x64'], 'debug_x64_dict_tracking_trybot': [ @@ -548,6 +563,8 @@ # Debug configs for x86. 'debug_x86': [ 'debug_bot', 'x86'], + 'debug_x86_asan': [ + 'debug_bot', 'x86', 'asan', 'lsan'], 'debug_x86_minimal_symbols': [ 'debug_bot', 'x86', 'minimal_symbols'], 'debug_x86_no_i18n': [ @@ -560,10 +577,13 @@ 'debug', 'x86', 'goma', 'v8_enable_slow_dchecks', 'v8_full_debug'], # Release configs for x86. + 'release_x86_asan_symbolized_verify_heap': [ + 'release_bot', 'x86', 'asan', 'lsan', 'symbolized', + 'v8_verify_heap'], 'release_x86_gcc': [ - 'release_bot_no_goma', 'x86', 'gcc', 'v8_check_header_includes'], + 'release_bot_no_goma', 'x86', 'gcc', 'lld', 'v8_check_header_includes'], 'release_x86_gcc_minimal_symbols': [ - 'release_bot_no_goma', 'x86', 'gcc', 'minimal_symbols', + 'release_bot_no_goma', 'x86', 'gcc', 'lld', 'minimal_symbols', 'v8_check_header_includes'], 'release_x86_gcmole': [ 'release_bot', 'x86', 'gcmole'], @@ -645,7 +665,7 @@ 'debug_bot': { 'mixins': [ 'debug', 'shared', 'goma', 'v8_enable_slow_dchecks', - 'v8_optimized_debug'], + 'v8_optimized_debug', 'v8_enable_google_benchmark'], }, 'debug_bot_no_goma': { @@ -701,6 +721,10 @@ 'gn_args': 'target_cpu="x64" target_os="ios"', }, + 'lld': { + 'gn_args': 'use_lld=true', + }, + 'lsan': { 'mixins': ['v8_enable_test_features'], 'gn_args': 'is_lsan=true', @@ -745,7 +769,7 @@ }, 'release_bot': { - 'mixins': ['release', 'static', 'goma'], + 'mixins': ['release', 'static', 'goma', 'v8_enable_google_benchmark'], }, 'release_bot_no_goma': { @@ -834,6 +858,10 @@ 'gn_args': 'v8_control_flow_integrity=true', }, + 'v8_enable_heap_sandbox': { + 'gn_args': 'v8_enable_heap_sandbox=true', + }, + 'v8_enable_lite_mode': { 'gn_args': 'v8_enable_lite_mode=true', }, @@ -842,6 +870,10 @@ 'gn_args': 'v8_enable_slow_dchecks=true', }, + 'v8_enable_google_benchmark': { + 'gn_args': 'v8_enable_google_benchmark=true', + }, + 'webassembly_disabled': { 'gn_args': 'v8_enable_webassembly=false', }, @@ -853,7 +885,10 @@ 'v8_disable_pointer_compression': { 'gn_args': 'v8_enable_pointer_compression=false', }, - + 'v8_enable_single_generation': { + 'gn_args': 'v8_enable_single_generation=true ' + 'v8_disable_write_barriers=true', + }, 'v8_enable_test_features': { 'gn_args': 'v8_enable_test_features=true', }, diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index fc0d1c55b114a6..8fe8872ed9b0fa 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -49,7 +49,7 @@ # Fuchsia 'v8_fuchsia_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'fuchsia-unittests'}, @@ -57,7 +57,7 @@ }, 'V8 Fuchsia': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'fuchsia-unittests'}, @@ -68,7 +68,7 @@ 'v8_linux_dbg_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -102,7 +102,7 @@ }, 'v8_linux_gc_stress_dbg_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mjsunit', 'variant': 'slow_path', 'test_args': ['--gc-stress'], 'shards': 2}, @@ -111,7 +111,7 @@ }, 'v8_linux_gcc_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing'}, @@ -120,7 +120,7 @@ 'v8_linux_nodcheck_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -135,7 +135,7 @@ }, 'v8_linux_noi18n_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mozilla', 'variant': 'default'}, @@ -146,7 +146,7 @@ 'v8_linux_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -166,7 +166,7 @@ 'v8_linux_optional_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ # Code serializer. @@ -268,7 +268,7 @@ }, 'v8_linux_verify_csa_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -278,7 +278,7 @@ # Linux32 with arm simulators 'v8_linux_arm_dbg_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -291,7 +291,7 @@ }, 'v8_linux_arm_lite_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, @@ -299,7 +299,7 @@ }, 'v8_linux_arm_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 2}, @@ -314,7 +314,7 @@ # Linux64 'v8_linux64_asan_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'test262', 'shards': 7}, @@ -325,7 +325,7 @@ }, 'v8_linux64_cfi_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -338,7 +338,7 @@ 'v8_linux64_dbg_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -360,7 +360,7 @@ 'v8_linux64_dict_tracking_dbg_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -368,26 +368,14 @@ }, 'v8_linux64_fuzzilli_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, # TODO(almuthanna): Add a new test config for the fuzzilli suite. 'tests': [], }, - 'v8_linux64_gc_stress_custom_snapshot_dbg_ng_triggered': { - 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - { - 'name': 'mjsunit', - 'test_args': ['--gc-stress', '--no-harness'], - 'shards': 3, - }, - ], - }, 'v8_linux64_fyi_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ # Infra staging. @@ -401,11 +389,41 @@ {'name': 'mjsunit', 'variant': 'experimental_regexp'}, # Concurrent inlining. {'name': 'mjsunit', 'variant': 'concurrent_inlining'}, + # Wasm write protect code space. + {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'}, + ], + }, + 'v8_linux64_gc_stress_custom_snapshot_dbg_ng_triggered': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + { + 'name': 'mjsunit', + 'test_args': ['--gc-stress', '--no-harness'], + 'shards': 3, + }, + ], + }, + 'v8_linux64_gcov_coverage': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + {'name': 'v8testing'}, + ], + }, + 'v8_linux64_heap_sandbox_dbg_ng_triggered': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + {'name': 'v8testing', 'shards': 2}, ], }, 'v8_linux64_msan_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'test262', 'variant': 'default', 'shards': 2}, @@ -415,7 +433,7 @@ 'v8_linux64_nodcheck_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -438,7 +456,7 @@ }, 'v8_linux64_perfetto_dbg_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -446,7 +464,15 @@ }, 'v8_linux64_pointer_compression_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + {'name': 'v8testing', 'shards': 3}, + ], + }, + 'v8_linux64_single_generation_dbg_ng_triggered': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -455,7 +481,7 @@ 'v8_linux64_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ # TODO(machenbach): Add benchmarks. @@ -475,7 +501,7 @@ }, 'v8_linux64_tsan_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -489,7 +515,7 @@ }, 'v8_linux64_tsan_no_cm_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ { @@ -507,7 +533,7 @@ }, 'v8_linux64_tsan_isolates_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7}, @@ -515,7 +541,7 @@ }, 'v8_linux64_ubsan_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -523,7 +549,7 @@ }, 'v8_linux64_verify_csa_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -533,7 +559,7 @@ # Linux64 with arm64 simulators 'v8_linux_arm64_dbg_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -546,7 +572,7 @@ }, 'v8_linux_arm64_gc_stress_dbg_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 12}, @@ -554,7 +580,7 @@ }, 'v8_linux_arm64_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 4}, @@ -567,7 +593,7 @@ }, 'v8_linux_arm64_cfi_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'test262', 'variant': 'default', 'shards': 3}, @@ -576,7 +602,7 @@ }, 'v8_linux64_arm64_pointer_compression_rel_ng_triggered': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, @@ -586,7 +612,7 @@ # Linux64 with RISC-V simulators 'v8_linux64_riscv64_rel_ng_triggered': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -598,7 +624,7 @@ 'swarming_dimensions' : { 'cores': '8', 'cpu': 'armv7l-32-ODROID-XU4', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu', }, 'swarming_task_attrs': { # Use same prio as CI due to limited resources. @@ -760,6 +786,7 @@ }, 'tests': [ {'name': 'v8testing', 'shards': 8}, + {'name': 'v8testing', 'variant': 'future', 'shards': 2}, ], }, 'v8_mac_arm64_sim_dbg_ng_triggered': { @@ -770,6 +797,7 @@ }, 'tests': [ {'name': 'v8testing', 'shards': 8}, + {'name': 'v8testing', 'variant': 'future', 'shards': 2}, ], }, 'v8_mac_arm64_sim_nodcheck_rel_ng_triggered': { @@ -780,6 +808,7 @@ }, 'tests': [ {'name': 'v8testing', 'shards': 8}, + {'name': 'v8testing', 'variant': 'future', 'shards': 2}, ], }, ############################################################################## @@ -788,7 +817,7 @@ # Main. 'V8 Fuzzer': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -802,7 +831,7 @@ 'V8 Linux': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -873,7 +902,7 @@ }, 'V8 Linux - arm64 - sim - CFI': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'test262', 'variant': 'default', 'shards': 3}, @@ -882,7 +911,7 @@ }, 'V8 Linux - arm64 - sim - MSAN': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'test262', 'variant': 'default', 'shards': 3}, @@ -892,7 +921,7 @@ 'V8 Linux - debug': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -991,7 +1020,7 @@ }, 'V8 Linux - full debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, @@ -999,7 +1028,7 @@ }, 'V8 Linux - gc stress': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ { @@ -1017,7 +1046,7 @@ }, 'V8 Linux - noi18n - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mozilla', 'variant': 'default'}, @@ -1027,7 +1056,7 @@ }, 'V8 Linux - predictable': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1037,7 +1066,7 @@ }, 'V8 Linux - shared': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1047,7 +1076,7 @@ }, 'V8 Linux - verify csa': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1055,7 +1084,7 @@ }, 'V8 Linux gcc': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1064,7 +1093,7 @@ 'V8 Linux64': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1108,7 +1137,7 @@ }, 'V8 Linux64 - cfi': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1120,7 +1149,7 @@ }, 'V8 Linux64 - custom snapshot - debug': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mjsunit', 'test_args': ['--no-harness']}, @@ -1129,7 +1158,7 @@ 'V8 Linux64 - debug': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1171,7 +1200,7 @@ 'V8 Linux64 - dict tracking - debug': { 'swarming_dimensions': { 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'shards': 3}, @@ -1179,7 +1208,7 @@ }, 'V8 Linux64 - debug - fyi': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ # Infra staging. @@ -1193,11 +1222,13 @@ {'name': 'mjsunit', 'variant': 'experimental_regexp'}, # Concurrent inlining. {'name': 'mjsunit', 'variant': 'concurrent_inlining'}, + # Wasm write protect code space. + {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'}, ], }, 'V8 Linux64 - debug - perfetto': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1208,9 +1239,22 @@ {'name': 'v8testing', 'shards': 2}, ], }, + 'V8 Linux64 - debug - single generation': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'swarming_task_attrs': { + 'expiration': 14400, + 'hard_timeout': 3600, + 'priority': 35, + }, + 'tests': [ + {'name': 'v8testing', 'shards': 3}, + ], + }, 'V8 Linux64 - fyi': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ # Infra staging. @@ -1224,19 +1268,29 @@ {'name': 'mjsunit', 'variant': 'experimental_regexp'}, # Concurrent inlining. {'name': 'mjsunit', 'variant': 'concurrent_inlining'}, + # Wasm write protect code space. + {'name': 'mjsunit', 'variant': 'wasm_write_protect_code'}, ], }, 'V8 Linux64 - gcov coverage': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing'}, ], }, + 'V8 Linux64 - heap sandbox - debug': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-18.04', + }, + 'tests': [ + {'name': 'v8testing', 'shards': 2}, + ], + }, 'V8 Linux64 - internal snapshot': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1244,7 +1298,7 @@ }, 'V8 Linux64 - pointer compression': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'shards': 2}, @@ -1252,7 +1306,7 @@ }, 'V8 Linux64 - shared': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1262,7 +1316,7 @@ }, 'V8 Linux64 - verify csa': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing'}, @@ -1270,7 +1324,7 @@ }, 'V8 Linux64 ASAN': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'test262', 'shards': 5}, @@ -1281,7 +1335,7 @@ }, 'V8 Linux64 GC Stress - custom snapshot': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ { @@ -1293,7 +1347,7 @@ }, 'V8 Linux64 TSAN': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'benchmarks'}, @@ -1307,7 +1361,7 @@ }, 'V8 Linux64 TSAN - stress-incremental-marking': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1338,7 +1392,7 @@ }, 'V8 Linux64 TSAN - isolates': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'test_args': ['--isolates'], 'shards': 7}, @@ -1346,7 +1400,7 @@ }, 'V8 Linux64 TSAN - no-concurrent-marking': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ { @@ -1364,7 +1418,7 @@ }, 'V8 Linux64 UBSan': { 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mozilla'}, @@ -1427,8 +1481,8 @@ 'priority': 35, }, 'tests': [ - {'name': 'd8testing'}, - {'name': 'd8testing', 'variant': 'extra'}, + {'name': 'v8testing'}, + {'name': 'v8testing', 'variant': 'extra'}, ], }, 'V8 Mac - arm64 - debug': { @@ -1460,6 +1514,7 @@ }, 'tests': [ {'name': 'v8testing', 'shards': 8}, + {'name': 'v8testing', 'variant': 'future', 'shards': 2}, ], }, 'V8 Mac - arm64 - sim - release': { @@ -1475,6 +1530,7 @@ }, 'tests': [ {'name': 'v8testing', 'shards': 8}, + {'name': 'v8testing', 'variant': 'future', 'shards': 2}, ], }, 'V8 Win32': { @@ -1558,7 +1614,7 @@ 'swarming_dimensions': { 'cores': '8', 'cpu': 'armv7l-32-ODROID-XU4', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu', }, 'swarming_task_attrs': { 'expiration': 21600, @@ -1587,7 +1643,7 @@ 'swarming_dimensions': { 'cores': '8', 'cpu': 'armv7l-32-ODROID-XU4', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu', }, 'swarming_task_attrs': { 'expiration': 21600, @@ -1615,7 +1671,7 @@ 'swarming_dimensions': { 'cores': '8', 'cpu': 'armv7l-32-ODROID-XU4', - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu', }, 'swarming_task_attrs': { 'expiration': 21600, @@ -1633,7 +1689,7 @@ }, 'V8 Linux - arm - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -1682,7 +1738,7 @@ }, 'V8 Linux - arm - sim - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access', 'shards': 6}, @@ -1735,7 +1791,7 @@ }, 'V8 Linux - arm - sim - lite': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 2}, @@ -1743,7 +1799,7 @@ }, 'V8 Linux - arm - sim - lite - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'v8testing', 'variant': 'default', 'shards': 4}, @@ -1751,7 +1807,7 @@ }, 'V8 Linux - arm64 - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'tests': [ {'name': 'mjsunit_sp_frame_access'}, @@ -1764,7 +1820,7 @@ }, 'V8 Linux - arm64 - sim - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, # TODO(machenbach): Remove longer timeout when this builder scales better. 'swarming_task_attrs': { @@ -1781,7 +1837,7 @@ }, 'V8 Linux - arm64 - sim - gc stress': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1798,7 +1854,7 @@ }, 'V8 Linux - mips64el - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1812,7 +1868,7 @@ }, 'V8 Linux - mipsel - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1826,7 +1882,7 @@ }, 'V8 Linux - ppc64 - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1839,7 +1895,7 @@ }, 'V8 Linux - riscv64 - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1852,7 +1908,7 @@ }, 'V8 Linux - s390x - sim': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1865,7 +1921,7 @@ }, 'V8 Linux64 - arm64 - sim - pointer compression': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 14400, @@ -1880,7 +1936,7 @@ # Clusterfuzz. 'V8 NumFuzz': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 13800, @@ -1897,7 +1953,7 @@ }, 'V8 NumFuzz - TSAN': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 13800, @@ -1945,7 +2001,7 @@ }, 'V8 NumFuzz - debug': { 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', + 'os': 'Ubuntu-18.04', }, 'swarming_task_attrs': { 'expiration': 13800, @@ -1998,282 +2054,4 @@ }, ], }, - ############################################################################## - # Branches. - 'V8 Linux - beta branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing'}, - ], - }, - 'V8 Linux - beta branch - debug': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 3}, - ], - }, - 'V8 Linux - stable branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing'}, - ], - }, - 'V8 Linux - stable branch - debug': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 3}, - ], - }, - 'V8 Linux - previous branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing'}, - ], - }, - 'V8 Linux - previous branch - debug': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 3}, - ], - }, - 'V8 Linux64 - beta branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing'}, - ], - }, - 'V8 Linux64 - beta branch - debug': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 3}, - ], - }, - 'V8 Linux64 - stable branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing'}, - ], - }, - 'V8 Linux64 - stable branch - debug': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 3}, - ], - }, - 'V8 Linux64 - previous branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing'}, - ], - }, - 'V8 Linux64 - previous branch - debug': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 3}, - ], - }, - 'V8 arm - sim - beta branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 4}, - ], - }, - 'V8 arm - sim - beta branch - debug': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla', 'shards': 2}, - {'name': 'test262', 'variant': 'default', 'shards': 2}, - {'name': 'v8testing', 'shards': 10}, - ], - }, - 'V8 arm - sim - stable branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 4}, - ], - }, - 'V8 arm - sim - stable branch - debug': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla', 'shards': 2}, - {'name': 'test262', 'variant': 'default', 'shards': 2}, - {'name': 'v8testing', 'shards': 10}, - ], - }, - 'V8 arm - sim - previous branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla'}, - {'name': 'test262', 'variant': 'default'}, - {'name': 'v8testing', 'shards': 4}, - ], - }, - 'V8 arm - sim - previous branch - debug': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'mozilla', 'shards': 2}, - {'name': 'test262', 'variant': 'default', 'shards': 2}, - {'name': 'v8testing', 'shards': 10}, - ], - }, - 'V8 mips64el - sim - beta branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'unittests'}, - ], - }, - 'V8 mips64el - sim - stable branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'unittests'}, - ], - }, - 'V8 mips64el - sim - previous branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'unittests'}, - ], - }, - 'V8 mipsel - sim - beta branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'v8testing', 'shards': 4}, - ], - }, - 'V8 mipsel - sim - stable branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'v8testing', 'shards': 4}, - ], - }, - 'V8 mipsel - sim - previous branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'v8testing', 'shards': 4}, - ], - }, - 'V8 ppc64 - sim - beta branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'unittests'}, - ], - }, - 'V8 ppc64 - sim - stable branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'unittests'}, - ], - }, - 'V8 ppc64 - sim - previous branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'unittests'}, - ], - }, - 'V8 s390x - sim - beta branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'unittests'}, - ], - }, - 'V8 s390x - sim - stable branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'unittests'}, - ], - }, - 'V8 s390x - sim - previous branch': { - 'swarming_dimensions': { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'unittests'}, - ], - }, } diff --git a/deps/v8/samples/OWNERS b/deps/v8/samples/OWNERS index 9c4f2439aa5ef3..6df8720dc57afb 100644 --- a/deps/v8/samples/OWNERS +++ b/deps/v8/samples/OWNERS @@ -1,2 +1,2 @@ mathias@chromium.org -yangguo@chromium.org +cbruni@chromium.org diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc index e844ca51bf3157..7de600a88fd310 100644 --- a/deps/v8/samples/shell.cc +++ b/deps/v8/samples/shell.cc @@ -147,20 +147,17 @@ void Print(const v8::FunctionCallbackInfo& args) { // the argument into a JavaScript string. void Read(const v8::FunctionCallbackInfo& args) { if (args.Length() != 1) { - args.GetIsolate()->ThrowException( - v8::String::NewFromUtf8Literal(args.GetIsolate(), "Bad parameters")); + args.GetIsolate()->ThrowError("Bad parameters"); return; } v8::String::Utf8Value file(args.GetIsolate(), args[0]); if (*file == NULL) { - args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal( - args.GetIsolate(), "Error loading file")); + args.GetIsolate()->ThrowError("Error loading file"); return; } v8::Local source; if (!ReadFile(args.GetIsolate(), *file).ToLocal(&source)) { - args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal( - args.GetIsolate(), "Error loading file")); + args.GetIsolate()->ThrowError("Error loading file"); return; } @@ -175,19 +172,16 @@ void Load(const v8::FunctionCallbackInfo& args) { v8::HandleScope handle_scope(args.GetIsolate()); v8::String::Utf8Value file(args.GetIsolate(), args[i]); if (*file == NULL) { - args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal( - args.GetIsolate(), "Error loading file")); + args.GetIsolate()->ThrowError("Error loading file"); return; } v8::Local source; if (!ReadFile(args.GetIsolate(), *file).ToLocal(&source)) { - args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal( - args.GetIsolate(), "Error loading file")); + args.GetIsolate()->ThrowError("Error loading file"); return; } if (!ExecuteString(args.GetIsolate(), source, args[i], false, false)) { - args.GetIsolate()->ThrowException(v8::String::NewFromUtf8Literal( - args.GetIsolate(), "Error executing file")); + args.GetIsolate()->ThrowError("Error executing file"); return; } } diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS index c3394e4b7a8590..3c5dca663f61e2 100644 --- a/deps/v8/src/DEPS +++ b/deps/v8/src/DEPS @@ -5,6 +5,7 @@ include_rules = [ "+src/asmjs/asm-js.h", "-src/baseline", "+src/baseline/baseline.h", + "+src/baseline/baseline-osr-inl.h", "+src/baseline/bytecode-offset-iterator.h", "-src/bigint", "+src/bigint/bigint.h", @@ -14,6 +15,7 @@ include_rules = [ "+src/compiler/wasm-compiler.h", "-src/heap", "+src/heap/basic-memory-chunk.h", + "+src/heap/code-range.h", "+src/heap/combined-heap.h", "+src/heap/embedder-tracing.h", "+src/heap/factory.h", diff --git a/deps/v8/src/api/api-arguments-inl.h b/deps/v8/src/api/api-arguments-inl.h index 57b533c7c9fb11..4edd0dad29d9da 100644 --- a/deps/v8/src/api/api-arguments-inl.h +++ b/deps/v8/src/api/api-arguments-inl.h @@ -96,8 +96,7 @@ inline JSReceiver FunctionCallbackArguments::holder() { Handle interceptor, Handle name) { \ DCHECK_NAME_COMPATIBLE(interceptor, name); \ Isolate* isolate = this->isolate(); \ - RuntimeCallTimerScope timer( \ - isolate, RuntimeCallCounterId::kNamed##FUNCTION##Callback); \ + RCS_SCOPE(isolate, RuntimeCallCounterId::kNamed##FUNCTION##Callback); \ Handle receiver_check_unsupported; \ GenericNamedProperty##FUNCTION##Callback f = \ ToCData( \ @@ -120,8 +119,7 @@ FOR_EACH_CALLBACK(CREATE_NAMED_CALLBACK) Handle interceptor, uint32_t index) { \ DCHECK(!interceptor->is_named()); \ Isolate* isolate = this->isolate(); \ - RuntimeCallTimerScope timer( \ - isolate, RuntimeCallCounterId::kIndexed##FUNCTION##Callback); \ + RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexed##FUNCTION##Callback); \ Handle receiver_check_unsupported; \ IndexedProperty##FUNCTION##Callback f = \ ToCData(interceptor->TYPE()); \ @@ -142,7 +140,7 @@ FOR_EACH_CALLBACK(CREATE_INDEXED_CALLBACK) Handle FunctionCallbackArguments::Call(CallHandlerInfo handler) { Isolate* isolate = this->isolate(); LOG(isolate, ApiObjectAccess("call", holder())); - RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionCallback); v8::FunctionCallback f = v8::ToCData(handler.callback()); Handle receiver_check_unsupported; @@ -163,8 +161,7 @@ Handle PropertyCallbackArguments::CallNamedEnumerator( Handle interceptor) { DCHECK(interceptor->is_named()); LOG(isolate(), ApiObjectAccess("interceptor-named-enumerator", holder())); - RuntimeCallTimerScope timer(isolate(), - RuntimeCallCounterId::kNamedEnumeratorCallback); + RCS_SCOPE(isolate(), RuntimeCallCounterId::kNamedEnumeratorCallback); return CallPropertyEnumerator(interceptor); } @@ -172,8 +169,7 @@ Handle PropertyCallbackArguments::CallIndexedEnumerator( Handle interceptor) { DCHECK(!interceptor->is_named()); LOG(isolate(), ApiObjectAccess("interceptor-indexed-enumerator", holder())); - RuntimeCallTimerScope timer(isolate(), - RuntimeCallCounterId::kIndexedEnumeratorCallback); + RCS_SCOPE(isolate(), RuntimeCallCounterId::kIndexedEnumeratorCallback); return CallPropertyEnumerator(interceptor); } @@ -181,8 +177,7 @@ Handle PropertyCallbackArguments::CallNamedGetter( Handle interceptor, Handle name) { DCHECK_NAME_COMPATIBLE(interceptor, name); Isolate* isolate = this->isolate(); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kNamedGetterCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedGetterCallback); LOG(isolate, ApiNamedPropertyAccess("interceptor-named-getter", holder(), *name)); GenericNamedPropertyGetterCallback f = @@ -194,8 +189,7 @@ Handle PropertyCallbackArguments::CallNamedDescriptor( Handle interceptor, Handle name) { DCHECK_NAME_COMPATIBLE(interceptor, name); Isolate* isolate = this->isolate(); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kNamedDescriptorCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedDescriptorCallback); LOG(isolate, ApiNamedPropertyAccess("interceptor-named-descriptor", holder(), *name)); GenericNamedPropertyDescriptorCallback f = @@ -222,8 +216,7 @@ Handle PropertyCallbackArguments::CallNamedSetter( GenericNamedPropertySetterCallback f = ToCData(interceptor->setter()); Isolate* isolate = this->isolate(); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kNamedSetterCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedSetterCallback); PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle, v8::Value); LOG(isolate, @@ -237,8 +230,7 @@ Handle PropertyCallbackArguments::CallNamedDefiner( const v8::PropertyDescriptor& desc) { DCHECK_NAME_COMPATIBLE(interceptor, name); Isolate* isolate = this->isolate(); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kNamedDefinerCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedDefinerCallback); GenericNamedPropertyDefinerCallback f = ToCData(interceptor->definer()); PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle, @@ -253,8 +245,7 @@ Handle PropertyCallbackArguments::CallIndexedSetter( Handle interceptor, uint32_t index, Handle value) { DCHECK(!interceptor->is_named()); Isolate* isolate = this->isolate(); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kIndexedSetterCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexedSetterCallback); IndexedPropertySetterCallback f = ToCData(interceptor->setter()); PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle, @@ -270,8 +261,7 @@ Handle PropertyCallbackArguments::CallIndexedDefiner( const v8::PropertyDescriptor& desc) { DCHECK(!interceptor->is_named()); Isolate* isolate = this->isolate(); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kIndexedDefinerCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexedDefinerCallback); IndexedPropertyDefinerCallback f = ToCData(interceptor->definer()); PREPARE_CALLBACK_INFO_FAIL_SIDE_EFFECT_CHECK(isolate, f, Handle, @@ -286,8 +276,7 @@ Handle PropertyCallbackArguments::CallIndexedGetter( Handle interceptor, uint32_t index) { DCHECK(!interceptor->is_named()); Isolate* isolate = this->isolate(); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kNamedGetterCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kNamedGetterCallback); LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-getter", holder(), index)); IndexedPropertyGetterCallback f = @@ -299,8 +288,7 @@ Handle PropertyCallbackArguments::CallIndexedDescriptor( Handle interceptor, uint32_t index) { DCHECK(!interceptor->is_named()); Isolate* isolate = this->isolate(); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kIndexedDescriptorCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kIndexedDescriptorCallback); LOG(isolate, ApiIndexedPropertyAccess("interceptor-indexed-descriptor", holder(), index)); IndexedPropertyDescriptorCallback f = @@ -338,8 +326,7 @@ Handle PropertyCallbackArguments::CallPropertyEnumerator( Handle PropertyCallbackArguments::CallAccessorGetter( Handle info, Handle name) { Isolate* isolate = this->isolate(); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kAccessorGetterCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kAccessorGetterCallback); LOG(isolate, ApiNamedPropertyAccess("accessor-getter", holder(), *name)); AccessorNameGetterCallback f = ToCData(info->getter()); @@ -351,8 +338,7 @@ Handle PropertyCallbackArguments::CallAccessorSetter( Handle accessor_info, Handle name, Handle value) { Isolate* isolate = this->isolate(); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kAccessorSetterCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kAccessorSetterCallback); AccessorNameSetterCallback f = ToCData(accessor_info->setter()); PREPARE_CALLBACK_INFO(isolate, f, Handle, void, accessor_info, diff --git a/deps/v8/src/api/api-inl.h b/deps/v8/src/api/api-inl.h index 849364e655cd69..84b9b288bb09e3 100644 --- a/deps/v8/src/api/api-inl.h +++ b/deps/v8/src/api/api-inl.h @@ -139,6 +139,7 @@ class V8_NODISCARD CallDepthScope { CallDepthScope(i::Isolate* isolate, Local context) : isolate_(isolate), context_(context), + did_enter_context_(false), escaped_(false), safe_for_termination_(isolate->next_v8_call_is_safe_for_termination()), interrupts_scope_(isolate_, i::StackGuard::TERMINATE_EXECUTION, @@ -152,12 +153,11 @@ class V8_NODISCARD CallDepthScope { if (!context.IsEmpty()) { i::Handle env = Utils::OpenHandle(*context); i::HandleScopeImplementer* impl = isolate->handle_scope_implementer(); - if (!isolate->context().is_null() && - isolate->context().native_context() == env->native_context()) { - context_ = Local(); - } else { + if (isolate->context().is_null() || + isolate->context().native_context() != env->native_context()) { impl->SaveContext(isolate->context()); isolate->set_context(*env); + did_enter_context_ = true; } } if (do_callback) isolate_->FireBeforeCallEnteredCallback(); @@ -165,16 +165,17 @@ class V8_NODISCARD CallDepthScope { ~CallDepthScope() { i::MicrotaskQueue* microtask_queue = isolate_->default_microtask_queue(); if (!context_.IsEmpty()) { - i::HandleScopeImplementer* impl = isolate_->handle_scope_implementer(); - isolate_->set_context(impl->RestoreContext()); + if (did_enter_context_) { + i::HandleScopeImplementer* impl = isolate_->handle_scope_implementer(); + isolate_->set_context(impl->RestoreContext()); + } i::Handle env = Utils::OpenHandle(*context_); microtask_queue = env->native_context().microtask_queue(); } if (!escaped_) isolate_->thread_local_top()->DecrementCallDepth(this); if (do_callback) isolate_->FireCallCompletedCallback(microtask_queue); -// TODO(jochen): This should be #ifdef DEBUG -#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY +#ifdef DEBUG if (do_callback) { if (microtask_queue && microtask_queue->microtasks_policy() == v8::MicrotasksPolicy::kScoped) { @@ -213,9 +214,9 @@ class V8_NODISCARD CallDepthScope { i::Isolate* const isolate_; Local context_; - bool escaped_; - bool do_callback_; - bool safe_for_termination_; + bool did_enter_context_ : 1; + bool escaped_ : 1; + bool safe_for_termination_ : 1; i::InterruptsScope interrupts_scope_; i::Address previous_stack_height_; diff --git a/deps/v8/src/api/api-macros.h b/deps/v8/src/api/api-macros.h index b126e1cd5a06fd..9b339321e7a430 100644 --- a/deps/v8/src/api/api-macros.h +++ b/deps/v8/src/api/api-macros.h @@ -30,9 +30,9 @@ * TODO(jochen): Remove calls form API methods to DO_NOT_USE macros. */ -#define LOG_API(isolate, class_name, function_name) \ - i::RuntimeCallTimerScope _runtime_timer( \ - isolate, i::RuntimeCallCounterId::kAPI_##class_name##_##function_name); \ +#define LOG_API(isolate, class_name, function_name) \ + RCS_SCOPE(isolate, \ + i::RuntimeCallCounterId::kAPI_##class_name##_##function_name); \ LOG(isolate, ApiEntryCall("v8::" #class_name "::" #function_name)) #define ENTER_V8_DO_NOT_USE(isolate) i::VMState __state__((isolate)) @@ -126,7 +126,3 @@ EXCEPTION_BAILOUT_CHECK_SCOPED_DO_NOT_USE(isolate, Nothing()) #define RETURN_ESCAPED(value) return handle_scope.Escape(value); - -// TODO(jochen): This should be #ifdef DEBUG -#ifdef V8_CHECK_MICROTASKS_SCOPES_CONSISTENCY -#endif diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc index 56bf5bd47c4e09..46d54f6f5872ae 100644 --- a/deps/v8/src/api/api-natives.cc +++ b/deps/v8/src/api/api-natives.cc @@ -74,9 +74,9 @@ MaybeHandle DefineAccessorProperty(Isolate* isolate, Handle setter, PropertyAttributes attributes) { DCHECK(!getter->IsFunctionTemplateInfo() || - !FunctionTemplateInfo::cast(*getter).do_not_cache()); + FunctionTemplateInfo::cast(*getter).should_cache()); DCHECK(!setter->IsFunctionTemplateInfo() || - !FunctionTemplateInfo::cast(*setter).do_not_cache()); + FunctionTemplateInfo::cast(*setter).should_cache()); if (getter->IsFunctionTemplateInfo() && FunctionTemplateInfo::cast(*getter).BreakAtEntry()) { ASSIGN_RETURN_ON_EXCEPTION( @@ -184,8 +184,7 @@ Object GetIntrinsic(Isolate* isolate, v8::Intrinsic intrinsic) { template MaybeHandle ConfigureInstance(Isolate* isolate, Handle obj, Handle data) { - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kConfigureInstance); + RCS_SCOPE(isolate, RuntimeCallCounterId::kConfigureInstance); HandleScope scope(isolate); // Disable access checks while instantiating the object. AccessCheckDisableScope access_check_scope(isolate, obj); @@ -288,16 +287,20 @@ enum class CachingMode { kLimited, kUnlimited }; MaybeHandle ProbeInstantiationsCache( Isolate* isolate, Handle native_context, int serial_number, CachingMode caching_mode) { - DCHECK_LE(1, serial_number); - if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) { + DCHECK_NE(serial_number, TemplateInfo::kDoNotCache); + if (serial_number == TemplateInfo::kUncached) { + return {}; + } + + if (serial_number < TemplateInfo::kFastTemplateInstantiationsCacheSize) { FixedArray fast_cache = native_context->fast_template_instantiations_cache(); - Handle object{fast_cache.get(serial_number - 1), isolate}; + Handle object{fast_cache.get(serial_number), isolate}; if (object->IsTheHole(isolate)) return {}; return Handle::cast(object); } if (caching_mode == CachingMode::kUnlimited || - (serial_number <= TemplateInfo::kSlowTemplateInstantiationsCacheSize)) { + (serial_number < TemplateInfo::kSlowTemplateInstantiationsCacheSize)) { SimpleNumberDictionary slow_cache = native_context->slow_template_instantiations_cache(); InternalIndex entry = slow_cache.FindEntry(isolate, serial_number); @@ -310,19 +313,27 @@ MaybeHandle ProbeInstantiationsCache( void CacheTemplateInstantiation(Isolate* isolate, Handle native_context, - int serial_number, CachingMode caching_mode, + Handle data, + CachingMode caching_mode, Handle object) { - DCHECK_LE(1, serial_number); - if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) { + DCHECK_NE(TemplateInfo::kDoNotCache, data->serial_number()); + + int serial_number = data->serial_number(); + if (serial_number == TemplateInfo::kUncached) { + serial_number = isolate->heap()->GetNextTemplateSerialNumber(); + } + + if (serial_number < TemplateInfo::kFastTemplateInstantiationsCacheSize) { Handle fast_cache = handle(native_context->fast_template_instantiations_cache(), isolate); Handle new_cache = - FixedArray::SetAndGrow(isolate, fast_cache, serial_number - 1, object); + FixedArray::SetAndGrow(isolate, fast_cache, serial_number, object); if (*new_cache != *fast_cache) { native_context->set_fast_template_instantiations_cache(*new_cache); } + data->set_serial_number(serial_number); } else if (caching_mode == CachingMode::kUnlimited || - (serial_number <= + (serial_number < TemplateInfo::kSlowTemplateInstantiationsCacheSize)) { Handle cache = handle(native_context->slow_template_instantiations_cache(), isolate); @@ -331,20 +342,28 @@ void CacheTemplateInstantiation(Isolate* isolate, if (*new_cache != *cache) { native_context->set_slow_template_instantiations_cache(*new_cache); } + data->set_serial_number(serial_number); + } else { + // we've overflowed the cache limit, no more caching + data->set_serial_number(TemplateInfo::kDoNotCache); } } void UncacheTemplateInstantiation(Isolate* isolate, Handle native_context, - int serial_number, CachingMode caching_mode) { - DCHECK_LE(1, serial_number); - if (serial_number <= TemplateInfo::kFastTemplateInstantiationsCacheSize) { + Handle data, + CachingMode caching_mode) { + int serial_number = data->serial_number(); + if (serial_number < 0) return; + + if (serial_number < TemplateInfo::kFastTemplateInstantiationsCacheSize) { FixedArray fast_cache = native_context->fast_template_instantiations_cache(); - DCHECK(!fast_cache.get(serial_number - 1).IsUndefined(isolate)); - fast_cache.set_undefined(serial_number - 1); + DCHECK(!fast_cache.get(serial_number).IsUndefined(isolate)); + fast_cache.set_undefined(serial_number); + data->set_serial_number(TemplateInfo::kUncached); } else if (caching_mode == CachingMode::kUnlimited || - (serial_number <= + (serial_number < TemplateInfo::kSlowTemplateInstantiationsCacheSize)) { Handle cache = handle(native_context->slow_template_instantiations_cache(), isolate); @@ -352,6 +371,7 @@ void UncacheTemplateInstantiation(Isolate* isolate, DCHECK(entry.is_found()); cache = SimpleNumberDictionary::DeleteEntry(isolate, cache, entry); native_context->set_slow_template_instantiations_cache(*cache); + data->set_serial_number(TemplateInfo::kUncached); } } @@ -371,23 +391,22 @@ MaybeHandle InstantiateObject(Isolate* isolate, Handle info, Handle new_target, bool is_prototype) { - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kInstantiateObject); + RCS_SCOPE(isolate, RuntimeCallCounterId::kInstantiateObject); Handle constructor; - int serial_number = info->serial_number(); + bool should_cache = info->should_cache(); if (!new_target.is_null()) { if (IsSimpleInstantiation(isolate, *info, *new_target)) { constructor = Handle::cast(new_target); } else { // Disable caching for subclass instantiation. - serial_number = 0; + should_cache = false; } } // Fast path. Handle result; - if (serial_number) { + if (should_cache && info->is_cached()) { if (ProbeInstantiationsCache(isolate, isolate->native_context(), - serial_number, CachingMode::kLimited) + info->serial_number(), CachingMode::kLimited) .ToHandle(&result)) { return isolate->factory()->CopyJSObject(result); } @@ -430,9 +449,9 @@ MaybeHandle InstantiateObject(Isolate* isolate, // TODO(dcarney): is this necessary? JSObject::MigrateSlowToFast(result, 0, "ApiNatives::InstantiateObject"); // Don't cache prototypes. - if (serial_number) { - CacheTemplateInstantiation(isolate, isolate->native_context(), - serial_number, CachingMode::kLimited, result); + if (should_cache) { + CacheTemplateInstantiation(isolate, isolate->native_context(), info, + CachingMode::kLimited, result); result = isolate->factory()->CopyJSObject(result); } } @@ -465,12 +484,11 @@ MaybeHandle GetInstancePrototype(Isolate* isolate, MaybeHandle InstantiateFunction( Isolate* isolate, Handle native_context, Handle data, MaybeHandle maybe_name) { - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kInstantiateFunction); - int serial_number = data->serial_number(); - if (serial_number) { + RCS_SCOPE(isolate, RuntimeCallCounterId::kInstantiateFunction); + bool should_cache = data->should_cache(); + if (should_cache && data->is_cached()) { Handle result; - if (ProbeInstantiationsCache(isolate, native_context, serial_number, + if (ProbeInstantiationsCache(isolate, native_context, data->serial_number(), CachingMode::kUnlimited) .ToHandle(&result)) { return Handle::cast(result); @@ -504,7 +522,7 @@ MaybeHandle InstantiateFunction( GetInstancePrototype(isolate, parent), JSFunction); CHECK(parent_prototype->IsHeapObject()); - JSObject::ForceSetPrototype(Handle::cast(prototype), + JSObject::ForceSetPrototype(isolate, Handle::cast(prototype), Handle::cast(parent_prototype)); } } @@ -517,18 +535,16 @@ MaybeHandle InstantiateFunction( Handle function = ApiNatives::CreateApiFunction( isolate, native_context, data, prototype, function_type, maybe_name); - if (serial_number) { + if (should_cache) { // Cache the function. - CacheTemplateInstantiation(isolate, native_context, serial_number, + CacheTemplateInstantiation(isolate, native_context, data, CachingMode::kUnlimited, function); } MaybeHandle result = ConfigureInstance(isolate, function, data); if (result.is_null()) { // Uncache on error. - if (serial_number) { - UncacheTemplateInstantiation(isolate, native_context, serial_number, - CachingMode::kUnlimited); - } + UncacheTemplateInstantiation(isolate, native_context, data, + CachingMode::kUnlimited); return MaybeHandle(); } data->set_published(true); @@ -596,7 +612,8 @@ MaybeHandle ApiNatives::InstantiateRemoteObject( object_map->set_may_have_interesting_symbols(true); Handle object = isolate->factory()->NewJSObjectFromMap(object_map); - JSObject::ForceSetPrototype(object, isolate->factory()->null_value()); + JSObject::ForceSetPrototype(isolate, object, + isolate->factory()->null_value()); return object; } @@ -653,8 +670,7 @@ Handle ApiNatives::CreateApiFunction( Isolate* isolate, Handle native_context, Handle obj, Handle prototype, InstanceType type, MaybeHandle maybe_name) { - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kCreateApiFunction); + RCS_SCOPE(isolate, RuntimeCallCounterId::kCreateApiFunction); Handle shared = FunctionTemplateInfo::GetOrCreateSharedFunctionInfo(isolate, obj, maybe_name); diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index 01833cafc61d96..2c1223afedc5c0 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -568,7 +568,7 @@ StartupData SnapshotCreator::CreateBlob( i::GarbageCollectionReason::kSnapshotCreator); { i::HandleScope scope(isolate); - isolate->heap()->CompactWeakArrayLists(internal::AllocationType::kOld); + isolate->heap()->CompactWeakArrayLists(); } i::Snapshot::ClearReconstructableDataForSerialization( @@ -1095,9 +1095,13 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value) { // --- T e m p l a t e --- -static void InitializeTemplate(i::TemplateInfo that, int type) { +static void InitializeTemplate(i::TemplateInfo that, int type, + bool do_not_cache) { that.set_number_of_properties(0); that.set_tag(type); + int serial_number = + do_not_cache ? i::TemplateInfo::kDoNotCache : i::TemplateInfo::kUncached; + that.set_serial_number(serial_number); } void Template::Set(v8::Local name, v8::Local value, @@ -1107,15 +1111,18 @@ void Template::Set(v8::Local name, v8::Local value, ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); i::HandleScope scope(isolate); auto value_obj = Utils::OpenHandle(*value); + Utils::ApiCheck(!value_obj->IsJSReceiver() || value_obj->IsTemplateInfo(), "v8::Template::Set", "Invalid value, must be a primitive or a Template"); + + // The template cache only performs shallow clones, if we set an + // ObjectTemplate as a property value then we can not cache the receiver + // template. if (value_obj->IsObjectTemplateInfo()) { - templ->set_serial_number(0); - if (templ->IsFunctionTemplateInfo()) { - i::Handle::cast(templ)->set_do_not_cache(true); - } + templ->set_serial_number(i::TemplateInfo::kDoNotCache); } + i::ApiNatives::AddDataProperty(isolate, templ, Utils::OpenHandle(*name), value_obj, static_cast(attribute)); @@ -1147,8 +1154,9 @@ void Template::SetAccessorProperty(v8::Local name, } // --- F u n c t i o n T e m p l a t e --- -static void InitializeFunctionTemplate(i::FunctionTemplateInfo info) { - InitializeTemplate(info, Consts::FUNCTION_TEMPLATE); +static void InitializeFunctionTemplate(i::FunctionTemplateInfo info, + bool do_not_cache) { + InitializeTemplate(info, Consts::FUNCTION_TEMPLATE, do_not_cache); info.set_flag(0); } @@ -1179,7 +1187,7 @@ void FunctionTemplate::SetPrototypeProviderTemplate( Utils::OpenHandle(*prototype_provider); Utils::ApiCheck(self->GetPrototypeTemplate().IsUndefined(i_isolate), "v8::FunctionTemplate::SetPrototypeProviderTemplate", - "Protoype must be undefiend"); + "Protoype must be undefined"); Utils::ApiCheck(self->GetParentTemplate().IsUndefined(i_isolate), "v8::FunctionTemplate::SetPrototypeProviderTemplate", "Prototype provider must be empty"); @@ -1212,7 +1220,7 @@ static Local FunctionTemplateNew( bool do_not_cache, v8::Local cached_property_name = v8::Local(), SideEffectType side_effect_type = SideEffectType::kHasSideEffect, - const CFunction* c_function = nullptr) { + const MemorySpan& c_function_overloads = {}) { i::Handle struct_obj = isolate->factory()->NewStruct( i::FUNCTION_TEMPLATE_INFO_TYPE, i::AllocationType::kOld); i::Handle obj = @@ -1221,14 +1229,8 @@ static Local FunctionTemplateNew( // Disallow GC until all fields of obj have acceptable types. i::DisallowGarbageCollection no_gc; i::FunctionTemplateInfo raw = *obj; - InitializeFunctionTemplate(raw); + InitializeFunctionTemplate(raw, do_not_cache); raw.set_length(length); - raw.set_do_not_cache(do_not_cache); - int next_serial_number = i::FunctionTemplateInfo::kInvalidSerialNumber; - if (!do_not_cache) { - next_serial_number = isolate->heap()->GetNextTemplateSerialNumber(); - } - raw.set_serial_number(next_serial_number); raw.set_undetectable(false); raw.set_needs_access_check(false); raw.set_accept_any_receiver(true); @@ -1243,7 +1245,7 @@ static Local FunctionTemplateNew( } if (callback != nullptr) { Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type, - c_function); + c_function_overloads); } return Utils::ToLocal(obj); } @@ -1257,10 +1259,24 @@ Local FunctionTemplate::New( // function templates when the isolate is created for serialization. LOG_API(i_isolate, FunctionTemplate, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - auto templ = FunctionTemplateNew(i_isolate, callback, data, signature, length, - behavior, false, Local(), - side_effect_type, c_function); - return templ; + return FunctionTemplateNew( + i_isolate, callback, data, signature, length, behavior, false, + Local(), side_effect_type, + c_function ? MemorySpan{c_function, 1} + : MemorySpan{}); +} + +Local FunctionTemplate::NewWithCFunctionOverloads( + Isolate* isolate, FunctionCallback callback, v8::Local data, + v8::Local signature, int length, ConstructorBehavior behavior, + SideEffectType side_effect_type, + const MemorySpan& c_function_overloads) { + i::Isolate* i_isolate = reinterpret_cast(isolate); + LOG_API(i_isolate, FunctionTemplate, New); + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + return FunctionTemplateNew(i_isolate, callback, data, signature, length, + behavior, false, Local(), + side_effect_type, c_function_overloads); } Local FunctionTemplate::NewWithCache( @@ -1291,10 +1307,10 @@ Local AccessorSignature::New( (obj)->setter(*foreign); \ } while (false) -void FunctionTemplate::SetCallHandler(FunctionCallback callback, - v8::Local data, - SideEffectType side_effect_type, - const CFunction* c_function) { +void FunctionTemplate::SetCallHandler( + FunctionCallback callback, v8::Local data, + SideEffectType side_effect_type, + const MemorySpan& c_function_overloads) { auto info = Utils::OpenHandle(this); EnsureNotPublished(info, "v8::FunctionTemplate::SetCallHandler"); i::Isolate* isolate = info->GetIsolate(); @@ -1308,15 +1324,28 @@ void FunctionTemplate::SetCallHandler(FunctionCallback callback, data = v8::Undefined(reinterpret_cast(isolate)); } obj->set_data(*Utils::OpenHandle(*data)); - // Blink passes CFunction's constructed with the default constructor - // for non-fast calls, so we should check the address too. - if (c_function != nullptr && c_function->GetAddress()) { - i::FunctionTemplateInfo::SetCFunction( - isolate, info, - i::handle(*FromCData(isolate, c_function->GetAddress()), isolate)); - i::FunctionTemplateInfo::SetCSignature( - isolate, info, - i::handle(*FromCData(isolate, c_function->GetTypeInfo()), isolate)); + if (c_function_overloads.size() > 0) { + // Stores the data for a sequence of CFunction overloads into a single + // FixedArray, as [address_0, signature_0, ... address_n-1, signature_n-1]. + i::Handle function_overloads = + isolate->factory()->NewFixedArray(static_cast( + c_function_overloads.size() * + i::FunctionTemplateInfo::kFunctionOverloadEntrySize)); + int function_count = static_cast(c_function_overloads.size()); + for (int i = 0; i < function_count; i++) { + const CFunction& c_function = c_function_overloads.data()[i]; + i::Handle address = + FromCData(isolate, c_function.GetAddress()); + function_overloads->set( + i::FunctionTemplateInfo::kFunctionOverloadEntrySize * i, *address); + i::Handle signature = + FromCData(isolate, c_function.GetTypeInfo()); + function_overloads->set( + i::FunctionTemplateInfo::kFunctionOverloadEntrySize * i + 1, + *signature); + } + i::FunctionTemplateInfo::SetCFunctionOverloads(isolate, info, + function_overloads); } info->set_call_code(*obj, kReleaseStore); } @@ -1444,13 +1473,8 @@ static Local ObjectTemplateNew( // Disallow GC until all fields of obj have acceptable types. i::DisallowGarbageCollection no_gc; i::ObjectTemplateInfo raw = *obj; - InitializeTemplate(raw, Consts::OBJECT_TEMPLATE); + InitializeTemplate(raw, Consts::OBJECT_TEMPLATE, do_not_cache); raw.set_data(0); - int next_serial_number = 0; - if (!do_not_cache) { - next_serial_number = isolate->heap()->GetNextTemplateSerialNumber(); - } - raw.set_serial_number(next_serial_number); if (!constructor.IsEmpty()) { raw.set_constructor(*Utils::OpenHandle(*constructor)); } @@ -1926,26 +1950,44 @@ MaybeLocal Script::Run(Local context) { ENTER_V8(isolate, context, Script, Run, MaybeLocal(), InternalEscapableScope); i::HistogramTimerScope execute_timer(isolate->counters()->execute(), true); - i::AggregatingHistogramTimerScope timer(isolate->counters()->compile_lazy()); + i::AggregatingHistogramTimerScope histogram_timer( + isolate->counters()->compile_lazy()); i::TimerEventScope timer_scope(isolate); auto fun = i::Handle::cast(Utils::OpenHandle(this)); // TODO(crbug.com/1193459): remove once ablation study is completed - if (i::FLAG_script_run_delay) { - v8::base::OS::Sleep( - v8::base::TimeDelta::FromMilliseconds(i::FLAG_script_run_delay)); + base::ElapsedTimer timer; + base::TimeDelta delta; + if (i::FLAG_script_delay > 0) { + delta = v8::base::TimeDelta::FromMillisecondsD(i::FLAG_script_delay); } - if (i::FLAG_script_run_delay_once && !isolate->did_run_script_delay()) { - v8::base::OS::Sleep( - v8::base::TimeDelta::FromMilliseconds(i::FLAG_script_run_delay_once)); + if (i::FLAG_script_delay_once > 0 && !isolate->did_run_script_delay()) { + delta = v8::base::TimeDelta::FromMillisecondsD(i::FLAG_script_delay_once); isolate->set_did_run_script_delay(true); } + if (i::FLAG_script_delay_fraction > 0.0) { + timer.Start(); + } else if (delta.InMicroseconds() > 0) { + timer.Start(); + while (timer.Elapsed() < delta) { + // Busy wait. + } + } i::Handle receiver = isolate->global_proxy(); Local result; has_pending_exception = !ToLocal( i::Execution::Call(isolate, fun, receiver, 0, nullptr), &result); + if (i::FLAG_script_delay_fraction > 0.0) { + delta = v8::base::TimeDelta::FromMillisecondsD( + timer.Elapsed().InMillisecondsF() * i::FLAG_script_delay_fraction); + timer.Restart(); + while (timer.Elapsed() < delta) { + // Busy wait. + } + } + RETURN_ON_FAILED_EXECUTION(Value); RETURN_ESCAPED(result); } @@ -4062,34 +4104,56 @@ Maybe v8::Object::CreateDataProperty(v8::Local context, v8::Local key, v8::Local value) { auto isolate = reinterpret_cast(context->GetIsolate()); - ENTER_V8(isolate, context, Object, CreateDataProperty, Nothing(), - i::HandleScope); i::Handle self = Utils::OpenHandle(this); i::Handle key_obj = Utils::OpenHandle(*key); i::Handle value_obj = Utils::OpenHandle(*value); - Maybe result = i::JSReceiver::CreateDataProperty( - isolate, self, key_obj, value_obj, Just(i::kDontThrow)); - has_pending_exception = result.IsNothing(); - RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); - return result; + i::LookupIterator::Key lookup_key(isolate, key_obj); + i::LookupIterator it(isolate, self, lookup_key, i::LookupIterator::OWN); + if (self->IsJSProxy()) { + ENTER_V8(isolate, context, Object, CreateDataProperty, Nothing(), + i::HandleScope); + Maybe result = + i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); + has_pending_exception = result.IsNothing(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return result; + } else { + ENTER_V8_NO_SCRIPT(isolate, context, Object, CreateDataProperty, + Nothing(), i::HandleScope); + Maybe result = + i::JSObject::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); + has_pending_exception = result.IsNothing(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return result; + } } Maybe v8::Object::CreateDataProperty(v8::Local context, uint32_t index, v8::Local value) { auto isolate = reinterpret_cast(context->GetIsolate()); - ENTER_V8(isolate, context, Object, CreateDataProperty, Nothing(), - i::HandleScope); i::Handle self = Utils::OpenHandle(this); i::Handle value_obj = Utils::OpenHandle(*value); i::LookupIterator it(isolate, self, index, self, i::LookupIterator::OWN); - Maybe result = - i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); - has_pending_exception = result.IsNothing(); - RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); - return result; + if (self->IsJSProxy()) { + ENTER_V8(isolate, context, Object, CreateDataProperty, Nothing(), + i::HandleScope); + Maybe result = + i::JSReceiver::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); + has_pending_exception = result.IsNothing(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return result; + } else { + ENTER_V8_NO_SCRIPT(isolate, context, Object, CreateDataProperty, + Nothing(), i::HandleScope); + Maybe result = + i::JSObject::CreateDataProperty(&it, value_obj, Just(i::kDontThrow)); + has_pending_exception = result.IsNothing(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + return result; + } } struct v8::PropertyDescriptor::PrivateData { @@ -4339,17 +4403,27 @@ Local v8::Object::GetPrototype() { Maybe v8::Object::SetPrototype(Local context, Local value) { auto isolate = reinterpret_cast(context->GetIsolate()); - ENTER_V8(isolate, context, Object, SetPrototype, Nothing(), - i::HandleScope); auto self = Utils::OpenHandle(this); auto value_obj = Utils::OpenHandle(*value); - // We do not allow exceptions thrown while setting the prototype - // to propagate outside. - TryCatch try_catch(reinterpret_cast(isolate)); - auto result = - i::JSReceiver::SetPrototype(self, value_obj, false, i::kThrowOnError); - has_pending_exception = result.IsNothing(); - RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + if (self->IsJSProxy()) { + ENTER_V8(isolate, context, Object, SetPrototype, Nothing(), + i::HandleScope); + // We do not allow exceptions thrown while setting the prototype + // to propagate outside. + TryCatch try_catch(reinterpret_cast(isolate)); + auto result = i::JSProxy::SetPrototype(i::Handle::cast(self), + value_obj, false, i::kThrowOnError); + has_pending_exception = result.IsNothing(); + RETURN_ON_FAILED_EXECUTION_PRIMITIVE(bool); + } else { + ENTER_V8_NO_SCRIPT_NO_EXCEPTION(isolate); + auto result = i::JSObject::SetPrototype(i::Handle::cast(self), + value_obj, false, i::kThrowOnError); + if (result.IsNothing()) { + isolate->clear_pending_exception(); + return Nothing(); + } + } return Just(true); } @@ -5605,19 +5679,33 @@ Local Symbol::Description() const { // RO_SPACE. Since RO_SPACE objects are immovable we can use the // Handle(Address*) constructor with the address of the description // field in the Symbol object without needing an isolate. - DCHECK(!COMPRESS_POINTERS_BOOL); + DCHECK(!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL); +#ifndef V8_COMPRESS_POINTERS_IN_SHARED_CAGE i::Handle ro_description(reinterpret_cast( sym->GetFieldAddress(i::Symbol::kDescriptionOffset))); return Utils::ToLocal(ro_description); +#else + isolate = reinterpret_cast(Isolate::GetCurrent()); +#endif } - i::Handle description(sym->description(), isolate); + return Description(reinterpret_cast(isolate)); +} +Local Symbol::Description(Isolate* isolate) const { + i::Handle sym = Utils::OpenHandle(this); + i::Handle description(sym->description(), + reinterpret_cast(isolate)); return Utils::ToLocal(description); } Local Private::Name() const { - return reinterpret_cast(this)->Description(); + const Symbol* sym = reinterpret_cast(this); + i::Handle i_sym = Utils::OpenHandle(sym); + // v8::Private symbols are created by API and are therefore writable, so we + // can always recover an Isolate. + i::Isolate* isolate = i::GetIsolateFromWritableObject(*i_sym); + return sym->Description(reinterpret_cast(isolate)); } double Number::Value() const { @@ -5901,12 +5989,7 @@ void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) { i::ReadOnlyHeap::PopulateReadOnlySpaceStatistics(statistics); } -void V8::SetIsCrossOriginIsolated() { - i::FLAG_harmony_sharedarraybuffer = true; -#if V8_ENABLE_WEBASSEMBLY - i::FLAG_experimental_wasm_threads = true; -#endif // V8_ENABLE_WEBASSEMBLY -} +void V8::SetIsCrossOriginIsolated() {} template struct InvokeBootstrapper; @@ -6406,6 +6489,16 @@ bool FunctionTemplate::HasInstance(v8::Local value) { return false; } +bool FunctionTemplate::IsLeafTemplateForApiObject( + v8::Local value) const { + i::DisallowGarbageCollection no_gc; + + i::Object object = *Utils::OpenHandle(*value); + + auto self = Utils::OpenHandle(this); + return self->IsLeafTemplateForApiObject(object); +} + Local v8::External::New(Isolate* isolate, void* value) { STATIC_ASSERT(sizeof(value) == sizeof(i::Address)); i::Isolate* i_isolate = reinterpret_cast(isolate); @@ -8326,6 +8419,10 @@ v8::Local Isolate::GetIncumbentContext() { return Utils::ToLocal(context); } +v8::Local Isolate::ThrowError(v8::Local message) { + return ThrowException(v8::Exception::Error(message)); +} + v8::Local Isolate::ThrowException(v8::Local value) { i::Isolate* isolate = reinterpret_cast(this); ENTER_V8_DO_NOT_USE(isolate); @@ -8398,6 +8495,11 @@ EmbedderHeapTracer* Isolate::GetEmbedderHeapTracer() { return isolate->heap()->GetEmbedderHeapTracer(); } +void Isolate::SetEmbedderRootsHandler(EmbedderRootsHandler* handler) { + i::Isolate* isolate = reinterpret_cast(this); + isolate->heap()->SetEmbedderRootsHandler(handler); +} + void Isolate::AttachCppHeap(CppHeap* cpp_heap) { i::Isolate* isolate = reinterpret_cast(this); isolate->heap()->AttachCppHeap(cpp_heap); @@ -8816,10 +8918,11 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics, } } else { i::Space* space = heap->space(static_cast(index)); - space_statistics->space_size_ = space->CommittedMemory(); - space_statistics->space_used_size_ = space->SizeOfObjects(); - space_statistics->space_available_size_ = space->Available(); - space_statistics->physical_space_size_ = space->CommittedPhysicalMemory(); + space_statistics->space_size_ = space ? space->CommittedMemory() : 0; + space_statistics->space_used_size_ = space ? space->SizeOfObjects() : 0; + space_statistics->space_available_size_ = space ? space->Available() : 0; + space_statistics->physical_space_size_ = + space ? space->CommittedPhysicalMemory() : 0; } return true; } @@ -9167,10 +9270,9 @@ void Isolate::SetStackLimit(uintptr_t stack_limit) { void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) { i::Isolate* isolate = reinterpret_cast(this); - const base::AddressRegion& code_range = - isolate->heap()->memory_allocator()->code_range(); - *start = reinterpret_cast(code_range.begin()); - *length_in_bytes = code_range.size(); + const base::AddressRegion& code_region = isolate->heap()->code_region(); + *start = reinterpret_cast(code_region.begin()); + *length_in_bytes = code_region.size(); } void Isolate::GetEmbeddedCodeRange(const void** start, @@ -9252,12 +9354,19 @@ CALLBACK_SETTER(WasmSimdEnabledCallback, WasmSimdEnabledCallback, CALLBACK_SETTER(WasmExceptionsEnabledCallback, WasmExceptionsEnabledCallback, wasm_exceptions_enabled_callback) +CALLBACK_SETTER(SharedArrayBufferConstructorEnabledCallback, + SharedArrayBufferConstructorEnabledCallback, + sharedarraybuffer_constructor_enabled_callback) + void Isolate::InstallConditionalFeatures(Local context) { -#if V8_ENABLE_WEBASSEMBLY v8::HandleScope handle_scope(this); v8::Context::Scope context_scope(context); - i::WasmJs::InstallConditionalFeatures(reinterpret_cast(this), - Utils::OpenHandle(*context)); + i::Isolate* isolate = reinterpret_cast(this); + isolate->InstallConditionalFeatures(Utils::OpenHandle(*context)); +#if V8_ENABLE_WEBASSEMBLY + if (i::FLAG_expose_wasm) { + i::WasmJs::InstallConditionalFeatures(isolate, Utils::OpenHandle(*context)); + } #endif // V8_ENABLE_WEBASSEMBLY } @@ -10021,7 +10130,16 @@ const HeapSnapshot* HeapProfiler::TakeHeapSnapshot( bool treat_global_objects_as_roots) { return reinterpret_cast( reinterpret_cast(this)->TakeSnapshot( - control, resolver, treat_global_objects_as_roots)); + control, resolver, treat_global_objects_as_roots, false)); +} + +const HeapSnapshot* HeapProfiler::TakeHeapSnapshotV8_92( + ActivityControl* control, ObjectNameResolver* resolver, + bool treat_global_objects_as_roots, bool capture_numeric_value) { + return reinterpret_cast( + reinterpret_cast(this)->TakeSnapshot( + control, resolver, treat_global_objects_as_roots, + capture_numeric_value)); } void HeapProfiler::StartTrackingHeapObjects(bool track_allocations) { @@ -10085,7 +10203,8 @@ void EmbedderHeapTracer::SetStackStart(void* stack_start) { void EmbedderHeapTracer::NotifyEmptyEmbedderStack() { CHECK(isolate_); reinterpret_cast(isolate_) - ->global_handles() + ->heap() + ->local_embedder_heap_tracer() ->NotifyEmptyEmbedderStack(); } @@ -10384,8 +10503,7 @@ void InvokeAccessorGetterCallback( v8::AccessorNameGetterCallback getter) { // Leaving JavaScript. Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kAccessorGetterCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kAccessorGetterCallback); Address getter_address = reinterpret_cast
(getter); VMState state(isolate); ExternalCallbackScope call_scope(isolate, getter_address); @@ -10395,7 +10513,7 @@ void InvokeAccessorGetterCallback( void InvokeFunctionCallback(const v8::FunctionCallbackInfo& info, v8::FunctionCallback callback) { Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope timer(isolate, RuntimeCallCounterId::kFunctionCallback); + RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionCallback); Address callback_address = reinterpret_cast
(callback); VMState state(isolate); ExternalCallbackScope call_scope(isolate, callback_address); @@ -10407,8 +10525,8 @@ void InvokeFinalizationRegistryCleanupFromTask( Handle finalization_registry, Handle callback) { Isolate* isolate = finalization_registry->native_context().GetIsolate(); - RuntimeCallTimerScope timer( - isolate, RuntimeCallCounterId::kFinalizationRegistryCleanupFromTask); + RCS_SCOPE(isolate, + RuntimeCallCounterId::kFinalizationRegistryCleanupFromTask); // Do not use ENTER_V8 because this is always called from a running // FinalizationRegistryCleanupTask within V8 and we should not log it as an // API call. This method is implemented here to avoid duplication of the diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc index 6e454b22f1ded5..a300359a0a5900 100644 --- a/deps/v8/src/ast/ast-value-factory.cc +++ b/deps/v8/src/ast/ast-value-factory.cc @@ -61,8 +61,8 @@ class OneByteStringStream { } // namespace -template -void AstRawString::Internalize(LocalIsolate* isolate) { +template +void AstRawString::Internalize(IsolateT* isolate) { DCHECK(!has_string_); if (literal_bytes_.length() == 0) { set_string(isolate->factory()->empty_string()); @@ -185,8 +185,8 @@ int AstRawString::Compare(const AstRawString* lhs, const AstRawString* rhs) { return lhs->byte_length() - rhs->byte_length(); } -template -Handle AstConsString::Allocate(LocalIsolate* isolate) const { +template +Handle AstConsString::Allocate(IsolateT* isolate) const { DCHECK(string_.is_null()); if (IsEmpty()) { @@ -210,8 +210,8 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Handle AstConsString::Allocate( LocalIsolate* isolate) const; -template -Handle AstConsString::AllocateFlat(LocalIsolate* isolate) const { +template +Handle AstConsString::AllocateFlat(IsolateT* isolate) const { if (IsEmpty()) { return isolate->factory()->empty_string(); } @@ -370,8 +370,8 @@ AstConsString* AstValueFactory::NewConsString(const AstRawString* str1, return NewConsString()->AddString(zone(), str1)->AddString(zone(), str2); } -template -void AstValueFactory::Internalize(LocalIsolate* isolate) { +template +void AstValueFactory::Internalize(IsolateT* isolate) { if (!zone_) return; // Strings need to be internalized before values, because values refer to diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h index b66e11f99faceb..290da7838f7338 100644 --- a/deps/v8/src/ast/ast-value-factory.h +++ b/deps/v8/src/ast/ast-value-factory.h @@ -65,8 +65,8 @@ class AstRawString final : public ZoneObject { V8_EXPORT_PRIVATE bool IsOneByteEqualTo(const char* data) const; uint16_t FirstCharacter() const; - template - void Internalize(LocalIsolate* isolate); + template + void Internalize(IsolateT* isolate); // Access the physical representation: bool is_one_byte() const { return is_one_byte_; } @@ -161,17 +161,17 @@ class AstConsString final : public ZoneObject { return segment_.string == nullptr; } - template - Handle GetString(LocalIsolate* isolate) { + template + Handle GetString(IsolateT* isolate) { if (string_.is_null()) { string_ = Allocate(isolate); } return string_; } - template + template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) - Handle AllocateFlat(LocalIsolate* isolate) const; + Handle AllocateFlat(IsolateT* isolate) const; std::forward_list ToRawStrings() const; @@ -181,9 +181,9 @@ class AstConsString final : public ZoneObject { AstConsString() : string_(), segment_({nullptr, nullptr}) {} - template + template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) - Handle Allocate(LocalIsolate* isolate) const; + Handle Allocate(IsolateT* isolate) const; Handle string_; @@ -354,8 +354,8 @@ class AstValueFactory { // Internalize all the strings in the factory, and prevent any more from being // allocated. Multiple calls to Internalize are allowed, for simplicity, where // subsequent calls are a no-op. - template - void Internalize(LocalIsolate* isolate); + template + void Internalize(IsolateT* isolate); #define F(name, str) \ const AstRawString* name##_string() const { \ diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc index 5515a4a3fccc10..3054f99f1cdfe5 100644 --- a/deps/v8/src/ast/ast.cc +++ b/deps/v8/src/ast/ast.cc @@ -443,8 +443,8 @@ int ObjectLiteral::InitDepthAndFlags() { return depth_acc; } -template -void ObjectLiteral::BuildBoilerplateDescription(LocalIsolate* isolate) { +template +void ObjectLiteral::BuildBoilerplateDescription(IsolateT* isolate) { if (!boilerplate_description_.is_null()) return; int index_keys = 0; @@ -597,8 +597,8 @@ int ArrayLiteral::InitDepthAndFlags() { return depth_acc; } -template -void ArrayLiteral::BuildBoilerplateDescription(LocalIsolate* isolate) { +template +void ArrayLiteral::BuildBoilerplateDescription(IsolateT* isolate) { if (!boilerplate_description_.is_null()) return; int constants_length = @@ -642,7 +642,7 @@ void ArrayLiteral::BuildBoilerplateDescription(LocalIsolate* isolate) { } // New handle scope here, needs to be after BuildContants(). - typename LocalIsolate::HandleScopeType scope(isolate); + typename IsolateT::HandleScopeType scope(isolate); Object boilerplate_value = *GetBoilerplateValue(element, isolate); // We shouldn't allocate after creating the boilerplate value. @@ -697,9 +697,9 @@ bool MaterializedLiteral::IsSimple() const { return false; } -template +template Handle MaterializedLiteral::GetBoilerplateValue(Expression* expression, - LocalIsolate* isolate) { + IsolateT* isolate) { if (expression->IsLiteral()) { return expression->AsLiteral()->BuildValue(isolate); } @@ -742,8 +742,8 @@ bool MaterializedLiteral::NeedsInitialAllocationSite() { return false; } -template -void MaterializedLiteral::BuildConstants(LocalIsolate* isolate) { +template +void MaterializedLiteral::BuildConstants(IsolateT* isolate) { if (IsArrayLiteral()) { AsArrayLiteral()->BuildBoilerplateDescription(isolate); return; @@ -760,9 +760,9 @@ template EXPORT_TEMPLATE_DEFINE( V8_BASE_EXPORT) void MaterializedLiteral::BuildConstants(LocalIsolate* isolate); -template +template Handle GetTemplateObject::GetOrBuildDescription( - LocalIsolate* isolate) { + IsolateT* isolate) { Handle raw_strings = isolate->factory()->NewFixedArray( this->raw_strings()->length(), AllocationType::kOld); bool raw_and_cooked_match = true; @@ -978,8 +978,8 @@ bool Literal::AsArrayIndex(uint32_t* value) const { return ToUint32(value) && *value != kMaxUInt32; } -template -Handle Literal::BuildValue(LocalIsolate* isolate) const { +template +Handle Literal::BuildValue(IsolateT* isolate) const { switch (type()) { case kSmi: return handle(Smi::FromInt(smi_), isolate); diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h index e11e6c458ffcee..e9c85920014593 100644 --- a/deps/v8/src/ast/ast.h +++ b/deps/v8/src/ast/ast.h @@ -986,8 +986,8 @@ class Literal final : public Expression { // Returns an appropriate Object representing this Literal, allocating // a heap object if needed. - template - Handle BuildValue(LocalIsolate* isolate) const; + template + Handle BuildValue(IsolateT* isolate) const; // Support for using Literal as a HashMap key. NOTE: Currently, this works // only for string and number literals! @@ -1058,17 +1058,16 @@ class MaterializedLiteral : public Expression { bool NeedsInitialAllocationSite(); // Populate the constant properties/elements fixed array. - template - void BuildConstants(LocalIsolate* isolate); + template + void BuildConstants(IsolateT* isolate); // If the expression is a literal, return the literal value; // if the expression is a materialized literal and is_simple // then return an Array or Object Boilerplate Description // Otherwise, return undefined literal as the placeholder // in the object literal boilerplate. - template - Handle GetBoilerplateValue(Expression* expression, - LocalIsolate* isolate); + template + Handle GetBoilerplateValue(Expression* expression, IsolateT* isolate); }; // Node for capturing a regexp literal. @@ -1265,9 +1264,9 @@ class ObjectLiteral final : public AggregateLiteral { int InitDepthAndFlags(); // Get the boilerplate description, populating it if necessary. - template + template Handle GetOrBuildBoilerplateDescription( - LocalIsolate* isolate) { + IsolateT* isolate) { if (boilerplate_description_.is_null()) { BuildBoilerplateDescription(isolate); } @@ -1275,8 +1274,8 @@ class ObjectLiteral final : public AggregateLiteral { } // Populate the boilerplate description. - template - void BuildBoilerplateDescription(LocalIsolate* isolate); + template + void BuildBoilerplateDescription(IsolateT* isolate); // Mark all computed expressions that are bound to a key that // is shadowed by a later occurrence of the same key. For the @@ -1366,9 +1365,9 @@ class ArrayLiteral final : public AggregateLiteral { int InitDepthAndFlags(); // Get the boilerplate description, populating it if necessary. - template + template Handle GetOrBuildBoilerplateDescription( - LocalIsolate* isolate) { + IsolateT* isolate) { if (boilerplate_description_.is_null()) { BuildBoilerplateDescription(isolate); } @@ -1376,8 +1375,8 @@ class ArrayLiteral final : public AggregateLiteral { } // Populate the boilerplate description. - template - void BuildBoilerplateDescription(LocalIsolate* isolate); + template + void BuildBoilerplateDescription(IsolateT* isolate); // Determines whether the {CreateShallowArrayLiteral} builtin can be used. bool IsFastCloningSupported() const; @@ -2121,8 +2120,8 @@ class FunctionLiteral final : public Expression { // Empty handle means that the function does not have a shared name (i.e. // the name will be set dynamically after creation of the function closure). - template - MaybeHandle GetName(LocalIsolate* isolate) const { + template + MaybeHandle GetName(IsolateT* isolate) const { return raw_name_ ? raw_name_->AllocateFlat(isolate) : MaybeHandle(); } bool has_shared_name() const { return raw_name_ != nullptr; } @@ -2644,9 +2643,8 @@ class GetTemplateObject final : public Expression { return raw_strings_; } - template - Handle GetOrBuildDescription( - LocalIsolate* isolate); + template + Handle GetOrBuildDescription(IsolateT* isolate); private: friend class AstNodeFactory; diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc index 62dc619141414e..6d1bff226d93e8 100644 --- a/deps/v8/src/ast/modules.cc +++ b/deps/v8/src/ast/modules.cc @@ -116,17 +116,17 @@ void SourceTextModuleDescriptor::AddStarExport( } namespace { -template -Handle ToStringOrUndefined(LocalIsolate* isolate, +template +Handle ToStringOrUndefined(IsolateT* isolate, const AstRawString* s) { if (s == nullptr) return isolate->factory()->undefined_value(); return s->string(); } } // namespace -template +template Handle SourceTextModuleDescriptor::AstModuleRequest::Serialize( - LocalIsolate* isolate) const { + IsolateT* isolate) const { // The import assertions will be stored in this array in the form: // [key1, value1, location1, key2, value2, location2, ...] Handle import_assertions_array = @@ -151,9 +151,9 @@ template Handle SourceTextModuleDescriptor::AstModuleRequest::Serialize( LocalIsolate* isolate) const; -template +template Handle SourceTextModuleDescriptor::Entry::Serialize( - LocalIsolate* isolate) const { + IsolateT* isolate) const { CHECK(Smi::IsValid(module_request)); // TODO(neis): Check earlier? return SourceTextModuleInfoEntry::New( isolate, ToStringOrUndefined(isolate, export_name), @@ -166,9 +166,9 @@ SourceTextModuleDescriptor::Entry::Serialize(Isolate* isolate) const; template Handle SourceTextModuleDescriptor::Entry::Serialize(LocalIsolate* isolate) const; -template +template Handle SourceTextModuleDescriptor::SerializeRegularExports( - LocalIsolate* isolate, Zone* zone) const { + IsolateT* isolate, Zone* zone) const { // We serialize regular exports in a way that lets us later iterate over their // local names and for each local name immediately access all its export // names. (Regular exports have neither import name nor module request.) diff --git a/deps/v8/src/ast/modules.h b/deps/v8/src/ast/modules.h index f776d2b522631c..f496a0bb85a3c3 100644 --- a/deps/v8/src/ast/modules.h +++ b/deps/v8/src/ast/modules.h @@ -115,8 +115,8 @@ class SourceTextModuleDescriptor : public ZoneObject { module_request(-1), cell_index(0) {} - template - Handle Serialize(LocalIsolate* isolate) const; + template + Handle Serialize(IsolateT* isolate) const; }; enum CellIndexKind { kInvalid, kExport, kImport }; @@ -132,8 +132,8 @@ class SourceTextModuleDescriptor : public ZoneObject { position_(position), index_(index) {} - template - Handle Serialize(LocalIsolate* isolate) const; + template + Handle Serialize(IsolateT* isolate) const; const AstRawString* specifier() const { return specifier_; } const ImportAssertions* import_assertions() const { @@ -225,8 +225,8 @@ class SourceTextModuleDescriptor : public ZoneObject { namespace_imports_.push_back(entry); } - template - Handle SerializeRegularExports(LocalIsolate* isolate, + template + Handle SerializeRegularExports(IsolateT* isolate, Zone* zone) const; private: diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index de9b25a5c597e3..e5b621a283a1c1 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -623,9 +623,9 @@ void DeclarationScope::HoistSloppyBlockFunctions(AstNodeFactory* factory) { } bool DeclarationScope::Analyze(ParseInfo* info) { - RuntimeCallTimerScope runtimeTimer( - info->runtime_call_stats(), RuntimeCallCounterId::kCompileScopeAnalysis, - RuntimeCallStats::kThreadSpecific); + RCS_SCOPE(info->runtime_call_stats(), + RuntimeCallCounterId::kCompileScopeAnalysis, + RuntimeCallStats::kThreadSpecific); DCHECK_NOT_NULL(info->literal()); DeclarationScope* scope = info->literal()->scope(); @@ -2532,8 +2532,8 @@ void Scope::AllocateVariablesRecursively() { }); } -template -void Scope::AllocateScopeInfosRecursively(LocalIsolate* isolate, +template +void Scope::AllocateScopeInfosRecursively(IsolateT* isolate, MaybeHandle outer_scope) { DCHECK(scope_info_.is_null()); MaybeHandle next_outer_scope = outer_scope; @@ -2603,9 +2603,8 @@ void DeclarationScope::RecordNeedsPrivateNameContextChainRecalc() { } // static -template -void DeclarationScope::AllocateScopeInfos(ParseInfo* info, - LocalIsolate* isolate) { +template +void DeclarationScope::AllocateScopeInfos(ParseInfo* info, IsolateT* isolate) { DeclarationScope* scope = info->literal()->scope(); // No one else should have allocated a scope info for this scope yet. diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h index 717c797383be5e..2aa0c2376795ab 100644 --- a/deps/v8/src/ast/scopes.h +++ b/deps/v8/src/ast/scopes.h @@ -716,8 +716,8 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { V8_INLINE void AllocateNonParameterLocalsAndDeclaredGlobals(); void AllocateVariablesRecursively(); - template - void AllocateScopeInfosRecursively(LocalIsolate* isolate, + template + void AllocateScopeInfosRecursively(IsolateT* isolate, MaybeHandle outer_scope); void AllocateDebuggerScopeInfos(Isolate* isolate, @@ -1151,9 +1151,9 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { // Allocate ScopeInfos for top scope and any inner scopes that need them. // Does nothing if ScopeInfo is already allocated. - template + template V8_EXPORT_PRIVATE static void AllocateScopeInfos(ParseInfo* info, - LocalIsolate* isolate); + IsolateT* isolate); Handle CollectNonLocals(Isolate* isolate, Handle non_locals); diff --git a/deps/v8/src/ast/variables.cc b/deps/v8/src/ast/variables.cc index da2d8387f5117c..4f1f46150ffde7 100644 --- a/deps/v8/src/ast/variables.cc +++ b/deps/v8/src/ast/variables.cc @@ -30,14 +30,15 @@ bool Variable::IsGlobalObjectProperty() const { scope_ != nullptr && scope_->is_script_scope(); } -bool Variable::IsReplGlobalLet() const { - return scope()->is_repl_mode_scope() && mode() == VariableMode::kLet; +bool Variable::IsReplGlobal() const { + return scope()->is_repl_mode_scope() && + (mode() == VariableMode::kLet || mode() == VariableMode::kConst); } void Variable::RewriteLocationForRepl() { DCHECK(scope_->is_repl_mode_scope()); - if (mode() == VariableMode::kLet) { + if (mode() == VariableMode::kLet || mode() == VariableMode::kConst) { DCHECK_EQ(location(), VariableLocation::CONTEXT); bit_field_ = LocationField::update(bit_field_, VariableLocation::REPL_GLOBAL); diff --git a/deps/v8/src/ast/variables.h b/deps/v8/src/ast/variables.h index 7c6ee4324e8153..ec31eb0689ed16 100644 --- a/deps/v8/src/ast/variables.h +++ b/deps/v8/src/ast/variables.h @@ -125,8 +125,9 @@ class Variable final : public ZoneObject { bool IsLookupSlot() const { return location() == VariableLocation::LOOKUP; } bool IsGlobalObjectProperty() const; - // True for 'let' variables declared in the script scope of a REPL script. - bool IsReplGlobalLet() const; + // True for 'let' and 'const' variables declared in the script scope of a REPL + // script. + bool IsReplGlobal() const; bool is_dynamic() const { return IsDynamicVariableMode(mode()); } diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h index 5d6422be520db6..cb6940ea70a39e 100644 --- a/deps/v8/src/base/atomicops.h +++ b/deps/v8/src/base/atomicops.h @@ -27,6 +27,8 @@ #include +#include + // Small C++ header which defines implementation specific macros used to // identify the STL implementation. // - libc++: captures __config for _LIBCPP_VERSION @@ -35,6 +37,7 @@ #include "src/base/base-export.h" #include "src/base/build_config.h" +#include "src/base/macros.h" #if defined(V8_OS_STARBOARD) #include "starboard/atomic.h" @@ -77,6 +80,21 @@ using AtomicWord = SbAtomicPtr; using AtomicWord = intptr_t; #endif +namespace helper { +template +volatile std::atomic* to_std_atomic(volatile T* ptr) { + return reinterpret_cast*>(ptr); +} +template +volatile const std::atomic* to_std_atomic_const(volatile const T* ptr) { + return reinterpret_cast*>(ptr); +} +} // namespace helper + +inline void SeqCst_MemoryFence() { + std::atomic_thread_fence(std::memory_order_seq_cst); +} + // Atomically execute: // result = *ptr; // if (result == old_value) @@ -86,75 +104,225 @@ using AtomicWord = intptr_t; // I.e. replace |*ptr| with |new_value| if |*ptr| used to be |old_value|. // Always return the value of |*ptr| before the operation. // Acquire, Relaxed, Release correspond to standard C++ memory orders. -Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value, - Atomic8 new_value); -Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr, Atomic16 old_value, - Atomic16 new_value); -Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, - Atomic32 new_value); -Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, Atomic32 new_value); -Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, - Atomic32 new_value); -Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, - Atomic32 new_value); -#ifdef V8_HOST_ARCH_64_BIT -Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, - Atomic64 new_value); -Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, Atomic64 new_value); -Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, - Atomic64 new_value); -Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, - Atomic64 new_value); -#endif // V8_HOST_ARCH_64_BIT - -// Atomically store new_value into |*ptr|, returning the previous value held in -// |*ptr|. -Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value); -#ifdef V8_HOST_ARCH_64_BIT -Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value); -#endif // V8_HOST_ARCH_64_BIT - -// Atomically increment |*ptr| by |increment|. Returns the new value of -// |*ptr| with the increment applied. -Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment); - -#ifdef V8_HOST_ARCH_64_BIT -Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment); -#endif // V8_HOST_ARCH_64_BIT - -void SeqCst_MemoryFence(); - -void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value); -void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value); -void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value); -void Release_Store(volatile Atomic32* ptr, Atomic32 value); -#ifdef V8_HOST_ARCH_64_BIT -void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value); -void Release_Store(volatile Atomic64* ptr, Atomic64 value); -#endif // V8_HOST_ARCH_64_BIT - -Atomic8 Relaxed_Load(volatile const Atomic8* ptr); -Atomic16 Relaxed_Load(volatile const Atomic16* ptr); -Atomic32 Relaxed_Load(volatile const Atomic32* ptr); -Atomic32 Acquire_Load(volatile const Atomic32* ptr); -#ifdef V8_HOST_ARCH_64_BIT -Atomic64 Relaxed_Load(volatile const Atomic64* ptr); -Atomic64 Acquire_Load(volatile const Atomic64* ptr); -#endif // V8_HOST_ARCH_64_BIT +inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value, + Atomic8 new_value) { + std::atomic_compare_exchange_strong_explicit( + helper::to_std_atomic(ptr), &old_value, new_value, + std::memory_order_relaxed, std::memory_order_relaxed); + return old_value; +} + +inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr, + Atomic16 old_value, Atomic16 new_value) { + std::atomic_compare_exchange_strong_explicit( + helper::to_std_atomic(ptr), &old_value, new_value, + std::memory_order_relaxed, std::memory_order_relaxed); + return old_value; +} + +inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, Atomic32 new_value) { + std::atomic_compare_exchange_strong_explicit( + helper::to_std_atomic(ptr), &old_value, new_value, + std::memory_order_relaxed, std::memory_order_relaxed); + return old_value; +} + +inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value, + std::memory_order_relaxed); +} + +inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr), + increment, + std::memory_order_relaxed); +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, Atomic32 new_value) { + atomic_compare_exchange_strong_explicit( + helper::to_std_atomic(ptr), &old_value, new_value, + std::memory_order_acquire, std::memory_order_acquire); + return old_value; +} + +inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value, + Atomic8 new_value) { + bool result = atomic_compare_exchange_strong_explicit( + helper::to_std_atomic(ptr), &old_value, new_value, + std::memory_order_release, std::memory_order_relaxed); + USE(result); // Make gcc compiler happy. + return old_value; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, Atomic32 new_value) { + atomic_compare_exchange_strong_explicit( + helper::to_std_atomic(ptr), &old_value, new_value, + std::memory_order_release, std::memory_order_relaxed); + return old_value; +} + +inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + atomic_compare_exchange_strong_explicit( + helper::to_std_atomic(ptr), &old_value, new_value, + std::memory_order_acq_rel, std::memory_order_acquire); + return old_value; +} + +inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) { + std::atomic_store_explicit(helper::to_std_atomic(ptr), value, + std::memory_order_relaxed); +} + +inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) { + std::atomic_store_explicit(helper::to_std_atomic(ptr), value, + std::memory_order_relaxed); +} + +inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) { + std::atomic_store_explicit(helper::to_std_atomic(ptr), value, + std::memory_order_relaxed); +} + +inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) { + std::atomic_store_explicit(helper::to_std_atomic(ptr), value, + std::memory_order_release); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + std::atomic_store_explicit(helper::to_std_atomic(ptr), value, + std::memory_order_release); +} + +inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) { + return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), + std::memory_order_relaxed); +} + +inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) { + return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), + std::memory_order_relaxed); +} + +inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) { + return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), + std::memory_order_relaxed); +} + +inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) { + return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), + std::memory_order_acquire); +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), + std::memory_order_acquire); +} + +#if defined(V8_HOST_ARCH_64_BIT) + +inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, Atomic64 new_value) { + std::atomic_compare_exchange_strong_explicit( + helper::to_std_atomic(ptr), &old_value, new_value, + std::memory_order_relaxed, std::memory_order_relaxed); + return old_value; +} + +inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value, + std::memory_order_relaxed); +} + +inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr), + increment, + std::memory_order_relaxed); +} + +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, Atomic64 new_value) { + std::atomic_compare_exchange_strong_explicit( + helper::to_std_atomic(ptr), &old_value, new_value, + std::memory_order_acquire, std::memory_order_acquire); + return old_value; +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, Atomic64 new_value) { + std::atomic_compare_exchange_strong_explicit( + helper::to_std_atomic(ptr), &old_value, new_value, + std::memory_order_release, std::memory_order_relaxed); + return old_value; +} + +inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + std::atomic_compare_exchange_strong_explicit( + helper::to_std_atomic(ptr), &old_value, new_value, + std::memory_order_acq_rel, std::memory_order_acquire); + return old_value; +} + +inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) { + std::atomic_store_explicit(helper::to_std_atomic(ptr), value, + std::memory_order_relaxed); +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + std::atomic_store_explicit(helper::to_std_atomic(ptr), value, + std::memory_order_release); +} + +inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) { + return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), + std::memory_order_relaxed); +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), + std::memory_order_acquire); +} + +#endif // defined(V8_HOST_ARCH_64_BIT) + +inline void Relaxed_Memcpy(volatile Atomic8* dst, volatile const Atomic8* src, + size_t bytes) { + constexpr size_t kAtomicWordSize = sizeof(AtomicWord); + while (bytes > 0 && + !IsAligned(reinterpret_cast(dst), kAtomicWordSize)) { + Relaxed_Store(dst++, Relaxed_Load(src++)); + --bytes; + } + if (IsAligned(reinterpret_cast(src), kAtomicWordSize) && + IsAligned(reinterpret_cast(dst), kAtomicWordSize)) { + while (bytes >= kAtomicWordSize) { + Relaxed_Store( + reinterpret_cast(dst), + Relaxed_Load(reinterpret_cast(src))); + dst += kAtomicWordSize; + src += kAtomicWordSize; + bytes -= kAtomicWordSize; + } + } + while (bytes > 0) { + Relaxed_Store(dst++, Relaxed_Load(src++)); + --bytes; + } +} } // namespace base } // namespace v8 -#if defined(V8_OS_WIN) || defined(V8_OS_STARBOARD) -#include "src/base/atomicops_internals_std.h" -#else -// TODO(ulan): Switch to std version after performance regression with Wheezy -// sysroot is no longer relevant. Debian Wheezy LTS ends on 31st of May 2018. -#include "src/base/atomicops_internals_portable.h" -#endif - // On some platforms we need additional declarations to make // AtomicWord compatible with our other Atomic* types. #if defined(V8_OS_MACOSX) || defined(V8_OS_OPENBSD) || defined(V8_OS_AIX) diff --git a/deps/v8/src/base/atomicops_internals_portable.h b/deps/v8/src/base/atomicops_internals_portable.h deleted file mode 100644 index ac162e2a8257ca..00000000000000 --- a/deps/v8/src/base/atomicops_internals_portable.h +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright 2016 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -// This file is an internal atomic implementation, use atomicops.h instead. -// -// This implementation uses C++11 atomics' member functions. The code base is -// currently written assuming atomicity revolves around accesses instead of -// C++11's memory locations. The burden is on the programmer to ensure that all -// memory locations accessed atomically are never accessed non-atomically (tsan -// should help with this). -// -// Of note in this implementation: -// * All NoBarrier variants are implemented as relaxed. -// * All Barrier variants are implemented as sequentially-consistent. -// * Compare exchange's failure ordering is always the same as the success one -// (except for release, which fails as relaxed): using a weaker ordering is -// only valid under certain uses of compare exchange. -// * Acquire store doesn't exist in the C11 memory model, it is instead -// implemented as a relaxed store followed by a sequentially consistent -// fence. -// * Release load doesn't exist in the C11 memory model, it is instead -// implemented as sequentially consistent fence followed by a relaxed load. -// * Atomic increment is expected to return the post-incremented value, whereas -// C11 fetch add returns the previous value. The implementation therefore -// needs to increment twice (which the compiler should be able to detect and -// optimize). - -#ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ -#define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ - -#include - -#include "src/base/build_config.h" -#include "src/base/macros.h" - -namespace v8 { -namespace base { - -// This implementation is transitional and maintains the original API for -// atomicops.h. - -inline void SeqCst_MemoryFence() { -#if defined(__GLIBCXX__) - // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but - // not defined, leading to the linker complaining about undefined references. - __atomic_thread_fence(std::memory_order_seq_cst); -#else - std::atomic_thread_fence(std::memory_order_seq_cst); -#endif -} - -inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value, - Atomic8 new_value) { - bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value, false, - __ATOMIC_RELAXED, __ATOMIC_RELAXED); - USE(result); // Make gcc compiler happy. - return old_value; -} - -inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr, - Atomic16 old_value, Atomic16 new_value) { - __atomic_compare_exchange_n(ptr, &old_value, new_value, false, - __ATOMIC_RELAXED, __ATOMIC_RELAXED); - return old_value; -} - -inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, Atomic32 new_value) { - __atomic_compare_exchange_n(ptr, &old_value, new_value, false, - __ATOMIC_RELAXED, __ATOMIC_RELAXED); - return old_value; -} - -inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); -} - -inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, Atomic32 new_value) { - __atomic_compare_exchange_n(ptr, &old_value, new_value, false, - __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); - return old_value; -} - -inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value, - Atomic8 new_value) { - bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value, false, - __ATOMIC_RELEASE, __ATOMIC_RELAXED); - USE(result); // Make gcc compiler happy. - return old_value; -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, Atomic32 new_value) { - __atomic_compare_exchange_n(ptr, &old_value, new_value, false, - __ATOMIC_RELEASE, __ATOMIC_RELAXED); - return old_value; -} - -inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - __atomic_compare_exchange_n(ptr, &old_value, new_value, false, - __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); - return old_value; -} - -inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) { - __atomic_store_n(ptr, value, __ATOMIC_RELAXED); -} - -inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) { - __atomic_store_n(ptr, value, __ATOMIC_RELAXED); -} - -inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) { - __atomic_store_n(ptr, value, __ATOMIC_RELAXED); -} - -inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) { - __atomic_store_n(ptr, value, __ATOMIC_RELEASE); -} - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - __atomic_store_n(ptr, value, __ATOMIC_RELEASE); -} - -inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) { - return __atomic_load_n(ptr, __ATOMIC_RELAXED); -} - -inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) { - return __atomic_load_n(ptr, __ATOMIC_RELAXED); -} - -inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) { - return __atomic_load_n(ptr, __ATOMIC_RELAXED); -} - -inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) { - return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); -} - -#if defined(V8_HOST_ARCH_64_BIT) - -inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, Atomic64 new_value) { - __atomic_compare_exchange_n(ptr, &old_value, new_value, false, - __ATOMIC_RELAXED, __ATOMIC_RELAXED); - return old_value; -} - -inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); -} - -inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); -} - -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, Atomic64 new_value) { - __atomic_compare_exchange_n(ptr, &old_value, new_value, false, - __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); - return old_value; -} - -inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, Atomic64 new_value) { - __atomic_compare_exchange_n(ptr, &old_value, new_value, false, - __ATOMIC_RELEASE, __ATOMIC_RELAXED); - return old_value; -} - -inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - __atomic_compare_exchange_n(ptr, &old_value, new_value, false, - __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE); - return old_value; -} - -inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) { - __atomic_store_n(ptr, value, __ATOMIC_RELAXED); -} - -inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { - __atomic_store_n(ptr, value, __ATOMIC_RELEASE); -} - -inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) { - return __atomic_load_n(ptr, __ATOMIC_RELAXED); -} - -inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { - return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); -} - -#endif // defined(V8_HOST_ARCH_64_BIT) -} // namespace base -} // namespace v8 - -#endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ diff --git a/deps/v8/src/base/atomicops_internals_std.h b/deps/v8/src/base/atomicops_internals_std.h deleted file mode 100644 index 1638b8b52a0b40..00000000000000 --- a/deps/v8/src/base/atomicops_internals_std.h +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_BASE_ATOMICOPS_INTERNALS_STD_H_ -#define V8_BASE_ATOMICOPS_INTERNALS_STD_H_ - -#include - -#include "src/base/build_config.h" -#include "src/base/macros.h" - -namespace v8 { -namespace base { - -namespace helper { -template -volatile std::atomic* to_std_atomic(volatile T* ptr) { - return reinterpret_cast*>(ptr); -} -template -volatile const std::atomic* to_std_atomic_const(volatile const T* ptr) { - return reinterpret_cast*>(ptr); -} -} // namespace helper - -inline void SeqCst_MemoryFence() { - std::atomic_thread_fence(std::memory_order_seq_cst); -} - -inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value, - Atomic8 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_relaxed, std::memory_order_relaxed); - return old_value; -} - -inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr, - Atomic16 old_value, Atomic16 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_relaxed, std::memory_order_relaxed); - return old_value; -} - -inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, Atomic32 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_relaxed, std::memory_order_relaxed); - return old_value; -} - -inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, - Atomic32 new_value) { - return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value, - std::memory_order_relaxed); -} - -inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, - Atomic32 increment) { - return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr), - increment, - std::memory_order_relaxed); -} - -inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, Atomic32 new_value) { - atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_acquire, std::memory_order_acquire); - return old_value; -} - -inline Atomic8 Release_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value, - Atomic8 new_value) { - bool result = atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_release, std::memory_order_relaxed); - USE(result); // Make gcc compiler happy. - return old_value; -} - -inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, Atomic32 new_value) { - atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_release, std::memory_order_relaxed); - return old_value; -} - -inline Atomic32 AcquireRelease_CompareAndSwap(volatile Atomic32* ptr, - Atomic32 old_value, - Atomic32 new_value) { - atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_acq_rel, std::memory_order_acquire); - return old_value; -} - -inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_relaxed); -} - -inline void Relaxed_Store(volatile Atomic16* ptr, Atomic16 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_relaxed); -} - -inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_relaxed); -} - -inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_release); -} - -inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_release); -} - -inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_relaxed); -} - -inline Atomic16 Relaxed_Load(volatile const Atomic16* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_relaxed); -} - -inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_relaxed); -} - -inline Atomic8 Acquire_Load(volatile const Atomic8* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_acquire); -} - -inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_acquire); -} - -#if defined(V8_HOST_ARCH_64_BIT) - -inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, Atomic64 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_relaxed, std::memory_order_relaxed); - return old_value; -} - -inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, - Atomic64 new_value) { - return std::atomic_exchange_explicit(helper::to_std_atomic(ptr), new_value, - std::memory_order_relaxed); -} - -inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, - Atomic64 increment) { - return increment + std::atomic_fetch_add_explicit(helper::to_std_atomic(ptr), - increment, - std::memory_order_relaxed); -} - -inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, Atomic64 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_acquire, std::memory_order_acquire); - return old_value; -} - -inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, Atomic64 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_release, std::memory_order_relaxed); - return old_value; -} - -inline Atomic64 AcquireRelease_CompareAndSwap(volatile Atomic64* ptr, - Atomic64 old_value, - Atomic64 new_value) { - std::atomic_compare_exchange_strong_explicit( - helper::to_std_atomic(ptr), &old_value, new_value, - std::memory_order_acq_rel, std::memory_order_acquire); - return old_value; -} - -inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_relaxed); -} - -inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { - std::atomic_store_explicit(helper::to_std_atomic(ptr), value, - std::memory_order_release); -} - -inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_relaxed); -} - -inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { - return std::atomic_load_explicit(helper::to_std_atomic_const(ptr), - std::memory_order_acquire); -} - -#endif // defined(V8_HOST_ARCH_64_BIT) -} // namespace base -} // namespace v8 - -#endif // V8_BASE_ATOMICOPS_INTERNALS_STD_H_ diff --git a/deps/v8/src/base/bit-field.h b/deps/v8/src/base/bit-field.h index ca5fb459210ec2..7b2796e3df23b6 100644 --- a/deps/v8/src/base/bit-field.h +++ b/deps/v8/src/base/bit-field.h @@ -52,7 +52,7 @@ class BitField final { // Returns a type U with the bit field value encoded. static constexpr U encode(T value) { - CONSTEXPR_DCHECK(is_valid(value)); + DCHECK(is_valid(value)); return static_cast(value) << kShift; } diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h index b137f73936a834..f790dfaab47bba 100644 --- a/deps/v8/src/base/bits.h +++ b/deps/v8/src/base/bits.h @@ -144,7 +144,7 @@ inline constexpr typename std::enable_if::value && sizeof(T) <= 8, unsigned>::type CountTrailingZerosNonZero(T value) { - CONSTEXPR_DCHECK(value != 0); + DCHECK_NE(0, value); #if V8_HAS_BUILTIN_CTZ return bits == 64 ? __builtin_ctzll(static_cast(value)) : __builtin_ctz(static_cast(value)); @@ -165,7 +165,7 @@ constexpr inline bool IsPowerOfTwo(T value) { template ::value>::type> inline constexpr int WhichPowerOfTwo(T value) { - CONSTEXPR_DCHECK(IsPowerOfTwo(value)); + DCHECK(IsPowerOfTwo(value)); #if V8_HAS_BUILTIN_CTZ STATIC_ASSERT(sizeof(T) <= 8); return sizeof(T) == 8 ? __builtin_ctzll(static_cast(value)) diff --git a/deps/v8/src/base/bounds.h b/deps/v8/src/base/bounds.h index fb8c968d660eca..0fe141b3097e65 100644 --- a/deps/v8/src/base/bounds.h +++ b/deps/v8/src/base/bounds.h @@ -15,7 +15,7 @@ namespace base { // branch. template inline constexpr bool IsInRange(T value, U lower_limit, U higher_limit) { - CONSTEXPR_DCHECK(lower_limit <= higher_limit); + DCHECK_LE(lower_limit, higher_limit); STATIC_ASSERT(sizeof(U) <= sizeof(T)); using unsigned_T = typename std::make_unsigned::type; // Use static_cast to support enum classes. diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc index 17ef42a299a864..9ddf8939bef51c 100644 --- a/deps/v8/src/base/cpu.cc +++ b/deps/v8/src/base/cpu.cc @@ -50,7 +50,7 @@ #include "src/base/logging.h" #include "src/base/platform/wrappers.h" #if V8_OS_WIN -#include "src/base/win32-headers.h" // NOLINT +#include "src/base/win32-headers.h" #endif namespace v8 { diff --git a/deps/v8/src/base/enum-set.h b/deps/v8/src/base/enum-set.h index f623198c2d2fc3..ce49b3996ed52a 100644 --- a/deps/v8/src/base/enum-set.h +++ b/deps/v8/src/base/enum-set.h @@ -79,7 +79,7 @@ class EnumSet { explicit constexpr EnumSet(T bits) : bits_(bits) {} static constexpr T Mask(E element) { - CONSTEXPR_DCHECK(sizeof(T) * 8 > static_cast(element)); + DCHECK_GT(sizeof(T) * 8, static_cast(element)); return T{1} << static_cast::type>(element); } diff --git a/deps/v8/src/base/hashmap.h b/deps/v8/src/base/hashmap.h index 179da5ecba6965..819d589a81ea08 100644 --- a/deps/v8/src/base/hashmap.h +++ b/deps/v8/src/base/hashmap.h @@ -530,8 +530,8 @@ class TemplateHashMap AllocationPolicy>; public: - STATIC_ASSERT(sizeof(Key*) == sizeof(void*)); // NOLINT - STATIC_ASSERT(sizeof(Value*) == sizeof(void*)); // NOLINT + STATIC_ASSERT(sizeof(Key*) == sizeof(void*)); + STATIC_ASSERT(sizeof(Value*) == sizeof(void*)); struct value_type { Key* first; Value* second; diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h index 2c4c536cf32bc8..08db24a947eca2 100644 --- a/deps/v8/src/base/logging.h +++ b/deps/v8/src/base/logging.h @@ -134,12 +134,6 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int, #endif -#if V8_HAS_CXX14_CONSTEXPR -#define CONSTEXPR_DCHECK(cond) DCHECK(cond) -#else -#define CONSTEXPR_DCHECK(cond) -#endif - namespace detail { template std::string PrintToString(Ts&&... ts) { diff --git a/deps/v8/src/base/once.h b/deps/v8/src/base/once.h index dd8b6be6213cb8..c4224e84e35478 100644 --- a/deps/v8/src/base/once.h +++ b/deps/v8/src/base/once.h @@ -53,10 +53,12 @@ #define V8_BASE_ONCE_H_ #include + #include #include #include "src/base/base-export.h" +#include "src/base/template-utils.h" namespace v8 { namespace base { @@ -76,9 +78,9 @@ enum : uint8_t { using PointerArgFunction = void (*)(void* arg); -template -struct OneArgFunction { - using type = void (*)(T); +template +struct FunctionWithArgs { + using type = void (*)(Args...); }; V8_BASE_EXPORT void CallOnceImpl(OnceType* once, @@ -90,11 +92,13 @@ inline void CallOnce(OnceType* once, std::function init_func) { } } -template +template ...>::value>> inline void CallOnce(OnceType* once, - typename OneArgFunction::type init_func, Arg* arg) { + typename FunctionWithArgs::type init_func, + Args... args) { if (once->load(std::memory_order_acquire) != ONCE_STATE_DONE) { - CallOnceImpl(once, [=]() { init_func(arg); }); + CallOnceImpl(once, [=]() { init_func(args...); }); } } diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h index 3c13e654c80cf5..77e9bb896e366d 100644 --- a/deps/v8/src/base/optional.h +++ b/deps/v8/src/base/optional.h @@ -558,32 +558,32 @@ class OPTIONAL_DECLSPEC_EMPTY_BASES Optional } constexpr const T* operator->() const { - CONSTEXPR_DCHECK(storage_.is_populated_); + DCHECK(storage_.is_populated_); return &storage_.value_; } constexpr T* operator->() { - CONSTEXPR_DCHECK(storage_.is_populated_); + DCHECK(storage_.is_populated_); return &storage_.value_; } constexpr const T& operator*() const& { - CONSTEXPR_DCHECK(storage_.is_populated_); + DCHECK(storage_.is_populated_); return storage_.value_; } constexpr T& operator*() & { - CONSTEXPR_DCHECK(storage_.is_populated_); + DCHECK(storage_.is_populated_); return storage_.value_; } constexpr const T&& operator*() const&& { - CONSTEXPR_DCHECK(storage_.is_populated_); + DCHECK(storage_.is_populated_); return std::move(storage_.value_); } constexpr T&& operator*() && { - CONSTEXPR_DCHECK(storage_.is_populated_); + DCHECK(storage_.is_populated_); return std::move(storage_.value_); } diff --git a/deps/v8/src/base/platform/mutex.h b/deps/v8/src/base/platform/mutex.h index 5685797f4ee9aa..328c593a30eb60 100644 --- a/deps/v8/src/base/platform/mutex.h +++ b/deps/v8/src/base/platform/mutex.h @@ -14,7 +14,7 @@ #include "src/base/logging.h" #if V8_OS_POSIX -#include // NOLINT +#include #endif #if V8_OS_STARBOARD @@ -164,6 +164,8 @@ class V8_BASE_EXPORT RecursiveMutex final { // successfully locked. bool TryLock() V8_WARN_UNUSED_RESULT; + V8_INLINE void AssertHeld() const { DCHECK_LT(0, level_); } + private: // The implementation-defined native handle type. #if V8_OS_POSIX diff --git a/deps/v8/src/base/platform/platform-aix.cc b/deps/v8/src/base/platform/platform-aix.cc index 6b6a870370b3b6..e5a5305d483ee3 100644 --- a/deps/v8/src/base/platform/platform-aix.cc +++ b/deps/v8/src/base/platform/platform-aix.cc @@ -82,7 +82,7 @@ double AIXTimezoneCache::LocalTimeOffset(double time_ms, bool is_utc) { TimezoneCache* OS::CreateTimezoneCache() { return new AIXTimezoneCache(); } static unsigned StringToLong(char* buffer) { - return static_cast(strtol(buffer, nullptr, 16)); // NOLINT + return static_cast(strtol(buffer, nullptr, 16)); } std::vector OS::GetSharedLibraryAddresses() { diff --git a/deps/v8/src/base/platform/platform-freebsd.cc b/deps/v8/src/base/platform/platform-freebsd.cc index edc793c662efc7..ac36b0527e7c3b 100644 --- a/deps/v8/src/base/platform/platform-freebsd.cc +++ b/deps/v8/src/base/platform/platform-freebsd.cc @@ -44,7 +44,7 @@ TimezoneCache* OS::CreateTimezoneCache() { } static unsigned StringToLong(char* buffer) { - return static_cast(strtol(buffer, nullptr, 16)); // NOLINT + return static_cast(strtol(buffer, nullptr, 16)); } std::vector OS::GetSharedLibraryAddresses() { diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index ee787f7d9ab0bb..9f61a0aeb57d0b 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -23,12 +23,12 @@ #include #if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \ defined(__NetBSD__) || defined(__OpenBSD__) -#include // NOLINT, for sysctl +#include // for sysctl #endif #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT) #define LOG_TAG "v8" -#include // NOLINT +#include #endif #include @@ -52,7 +52,7 @@ #endif #if V8_OS_LINUX -#include // NOLINT, for prctl +#include // for prctl #endif #if defined(V8_OS_FUCHSIA) @@ -82,7 +82,7 @@ extern int madvise(caddr_t, size_t, int); #endif #if defined(V8_LIBC_GLIBC) -extern "C" void* __libc_stack_end; // NOLINT +extern "C" void* __libc_stack_end; #endif namespace v8 { @@ -936,8 +936,7 @@ static void InitializeTlsBaseOffset() { buffer[kBufferSize - 1] = '\0'; char* period_pos = strchr(buffer, '.'); *period_pos = '\0'; - int kernel_version_major = - static_cast(strtol(buffer, nullptr, 10)); // NOLINT + int kernel_version_major = static_cast(strtol(buffer, nullptr, 10)); // The constants below are taken from pthreads.s from the XNU kernel // sources archive at www.opensource.apple.com. if (kernel_version_major < 11) { diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc index 50da60c72f547f..9fbb2570760650 100644 --- a/deps/v8/src/base/platform/platform-win32.cc +++ b/deps/v8/src/base/platform/platform-win32.cc @@ -30,7 +30,7 @@ #include #if defined(_MSC_VER) -#include // NOLINT +#include #endif // defined(_MSC_VER) // Extra functions for MinGW. Most of these are the _s functions which are in diff --git a/deps/v8/src/base/platform/semaphore.h b/deps/v8/src/base/platform/semaphore.h index 83a7a3392f7098..ec107bd290ebde 100644 --- a/deps/v8/src/base/platform/semaphore.h +++ b/deps/v8/src/base/platform/semaphore.h @@ -12,9 +12,9 @@ #endif #if V8_OS_MACOSX -#include // NOLINT +#include #elif V8_OS_POSIX -#include // NOLINT +#include #endif #if V8_OS_STARBOARD diff --git a/deps/v8/src/base/sanitizer/asan.h b/deps/v8/src/base/sanitizer/asan.h new file mode 100644 index 00000000000000..291006d58c2e72 --- /dev/null +++ b/deps/v8/src/base/sanitizer/asan.h @@ -0,0 +1,37 @@ +// Copyright 2018 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// AddressSanitizer support. + +#ifndef V8_BASE_SANITIZER_ASAN_H_ +#define V8_BASE_SANITIZER_ASAN_H_ + +#include + +#include "src/base/macros.h" + +#ifdef V8_USE_ADDRESS_SANITIZER + +#include + +#if !defined(ASAN_POISON_MEMORY_REGION) || !defined(ASAN_UNPOISON_MEMORY_REGION) +#error \ + "ASAN_POISON_MEMORY_REGION and ASAN_UNPOISON_MEMORY_REGION must be defined" +#endif + +#else // !V8_USE_ADDRESS_SANITIZER + +#define ASAN_POISON_MEMORY_REGION(start, size) \ + static_assert(std::is_pointer::value, \ + "static type violation"); \ + static_assert(std::is_convertible::value, \ + "static type violation"); \ + USE(start, size) + +#define ASAN_UNPOISON_MEMORY_REGION(start, size) \ + ASAN_POISON_MEMORY_REGION(start, size) + +#endif // !V8_USE_ADDRESS_SANITIZER + +#endif // V8_BASE_SANITIZER_ASAN_H_ diff --git a/deps/v8/src/sanitizer/lsan-page-allocator.cc b/deps/v8/src/base/sanitizer/lsan-page-allocator.cc similarity index 97% rename from deps/v8/src/sanitizer/lsan-page-allocator.cc rename to deps/v8/src/base/sanitizer/lsan-page-allocator.cc index 7794e0b734e6dc..bb52eb368fd61e 100644 --- a/deps/v8/src/sanitizer/lsan-page-allocator.cc +++ b/deps/v8/src/base/sanitizer/lsan-page-allocator.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/sanitizer/lsan-page-allocator.h" +#include "src/base/sanitizer/lsan-page-allocator.h" #include "include/v8-platform.h" #include "src/base/logging.h" diff --git a/deps/v8/src/sanitizer/lsan-page-allocator.h b/deps/v8/src/base/sanitizer/lsan-page-allocator.h similarity index 86% rename from deps/v8/src/sanitizer/lsan-page-allocator.h rename to deps/v8/src/base/sanitizer/lsan-page-allocator.h index f86ffd98e84caf..4c8a1f04a0dc93 100644 --- a/deps/v8/src/sanitizer/lsan-page-allocator.h +++ b/deps/v8/src/base/sanitizer/lsan-page-allocator.h @@ -2,10 +2,11 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#ifndef V8_SANITIZER_LSAN_PAGE_ALLOCATOR_H_ -#define V8_SANITIZER_LSAN_PAGE_ALLOCATOR_H_ +#ifndef V8_BASE_SANITIZER_LSAN_PAGE_ALLOCATOR_H_ +#define V8_BASE_SANITIZER_LSAN_PAGE_ALLOCATOR_H_ #include "include/v8-platform.h" +#include "src/base/base-export.h" #include "src/base/compiler-specific.h" namespace v8 { @@ -14,7 +15,7 @@ namespace base { // This is a v8::PageAllocator implementation that decorates provided page // allocator object with leak sanitizer notifications when LEAK_SANITIZER // is defined. -class LsanPageAllocator : public v8::PageAllocator { +class V8_BASE_EXPORT LsanPageAllocator : public v8::PageAllocator { public: explicit LsanPageAllocator(v8::PageAllocator* page_allocator); ~LsanPageAllocator() override = default; @@ -56,4 +57,4 @@ class LsanPageAllocator : public v8::PageAllocator { } // namespace base } // namespace v8 -#endif // V8_SANITIZER_LSAN_PAGE_ALLOCATOR_H_ +#endif // V8_BASE_SANITIZER_LSAN_PAGE_ALLOCATOR_H_ diff --git a/deps/v8/src/base/lsan.h b/deps/v8/src/base/sanitizer/lsan.h similarity index 75% rename from deps/v8/src/base/lsan.h rename to deps/v8/src/base/sanitizer/lsan.h index fd9bbd21c1b818..2d7dcd7f68046f 100644 --- a/deps/v8/src/base/lsan.h +++ b/deps/v8/src/base/sanitizer/lsan.h @@ -4,14 +4,16 @@ // LeakSanitizer support. -#ifndef V8_BASE_LSAN_H_ -#define V8_BASE_LSAN_H_ +#ifndef V8_BASE_SANITIZER_LSAN_H_ +#define V8_BASE_SANITIZER_LSAN_H_ #include -// There is no compile time flag for LSan, to enable this whenever ASan is +#include "src/base/macros.h" + +// There is no compile time flag for LSan, so enable this whenever ASan is // enabled. Note that LSan can be used as part of ASan with 'detect_leaks=1'. -// On windows, LSan is not implemented yet, so disable it there. +// On Windows, LSan is not implemented yet, so disable it there. #if defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN) #include @@ -26,4 +28,4 @@ #endif // defined(V8_USE_ADDRESS_SANITIZER) && !defined(V8_OS_WIN) -#endif // V8_BASE_LSAN_H_ +#endif // V8_BASE_SANITIZER_LSAN_H_ diff --git a/deps/v8/src/base/sanitizer/msan.h b/deps/v8/src/base/sanitizer/msan.h new file mode 100644 index 00000000000000..e15208efaf8266 --- /dev/null +++ b/deps/v8/src/base/sanitizer/msan.h @@ -0,0 +1,40 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// MemorySanitizer support. + +#ifndef V8_BASE_SANITIZER_MSAN_H_ +#define V8_BASE_SANITIZER_MSAN_H_ + +#include "src/base/macros.h" +#include "src/base/memory.h" + +#ifdef V8_USE_MEMORY_SANITIZER + +#include + +// Marks a memory range as uninitialized, as if it was allocated here. +#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(start, size) \ + __msan_allocated_memory(reinterpret_cast(start), (size)) + +// Marks a memory range as initialized. +#define MSAN_MEMORY_IS_INITIALIZED(start, size) \ + __msan_unpoison(reinterpret_cast(start), (size)) + +#else // !V8_USE_MEMORY_SANITIZER + +#define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(start, size) \ + static_assert((std::is_pointer::value || \ + std::is_same::value), \ + "static type violation"); \ + static_assert(std::is_convertible::value, \ + "static type violation"); \ + USE(start, size) + +#define MSAN_MEMORY_IS_INITIALIZED(start, size) \ + MSAN_ALLOCATED_UNINITIALIZED_MEMORY(start, size) + +#endif // V8_USE_MEMORY_SANITIZER + +#endif // V8_BASE_SANITIZER_MSAN_H_ diff --git a/deps/v8/src/base/small-vector.h b/deps/v8/src/base/small-vector.h index c337b9052d488a..b087d44be4e8b2 100644 --- a/deps/v8/src/base/small-vector.h +++ b/deps/v8/src/base/small-vector.h @@ -154,6 +154,13 @@ class SmallVector { base::bits::RoundUpToPowerOfTwo(std::max(min_capacity, 2 * capacity())); T* new_storage = reinterpret_cast(base::Malloc(sizeof(T) * new_capacity)); + if (new_storage == nullptr) { + // Should be: V8::FatalProcessOutOfMemory, but we don't include V8 from + // base. The message is intentionally the same as FatalProcessOutOfMemory + // since that will help fuzzers and chromecrash to categorize such + // crashes appropriately. + FATAL("Fatal process out of memory: base::SmallVector::Grow"); + } base::Memcpy(new_storage, begin_, sizeof(T) * in_use); if (is_big()) base::Free(begin_); begin_ = new_storage; diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h index 4f082845d95fe6..f222593e2d2657 100644 --- a/deps/v8/src/base/template-utils.h +++ b/deps/v8/src/base/template-utils.h @@ -98,6 +98,15 @@ struct make_void { template using void_t = typename make_void::type; +// Corresponds to C++17's std::conjunction +template +struct conjunction : std::true_type {}; +template +struct conjunction : B {}; +template +struct conjunction + : std::conditional_t, B> {}; + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/v8-fallthrough.h b/deps/v8/src/base/v8-fallthrough.h index f61238de0600ab..a6dc6972d6d21d 100644 --- a/deps/v8/src/base/v8-fallthrough.h +++ b/deps/v8/src/base/v8-fallthrough.h @@ -13,7 +13,7 @@ // So do not include this header in any of v8's public headers -- only // use it in src/, not in include/. #if defined(__clang__) -#define V8_FALLTHROUGH [[clang::fallthrough]] // NOLINT(whitespace/braces) +#define V8_FALLTHROUGH [[clang::fallthrough]] #else #define V8_FALLTHROUGH #endif diff --git a/deps/v8/src/base/vlq.h b/deps/v8/src/base/vlq.h index baeb5b9430d5cc..96ee42cf6e808d 100644 --- a/deps/v8/src/base/vlq.h +++ b/deps/v8/src/base/vlq.h @@ -14,60 +14,95 @@ namespace v8 { namespace base { static constexpr uint32_t kContinueShift = 7; -static constexpr uint32_t kContinueMask = 1 << kContinueShift; -static constexpr uint32_t kDataMask = kContinueMask - 1; +static constexpr uint32_t kContinueBit = 1 << kContinueShift; +static constexpr uint32_t kDataMask = kContinueBit - 1; // Encodes an unsigned value using variable-length encoding and stores it using -// the passed process_byte function. -inline void VLQEncodeUnsigned(const std::function& process_byte, - uint32_t value) { - bool has_next; +// the passed process_byte function. The function should return a pointer to +// the byte that was written, so that VLQEncodeUnsigned can mutate it after +// writing it. +template +inline typename std::enable_if< + std::is_same()(0)), byte*>::value, + void>::type +VLQEncodeUnsigned(Function&& process_byte, uint32_t value) { + byte* written_byte = process_byte(value); + if (value <= kDataMask) { + // Value fits in first byte, early return. + return; + } do { - byte cur_byte = value & kDataMask; + // Turn on continuation bit in the byte we just wrote. + *written_byte |= kContinueBit; value >>= kContinueShift; - has_next = value != 0; - // The most significant bit is set when we are not done with the value yet. - cur_byte |= static_cast(has_next) << kContinueShift; - process_byte(cur_byte); - } while (has_next); + written_byte = process_byte(value); + } while (value > kDataMask); } // Encodes value using variable-length encoding and stores it using the passed // process_byte function. -inline void VLQEncode(const std::function& process_byte, - int32_t value) { +template +inline typename std::enable_if< + std::is_same()(0)), byte*>::value, + void>::type +VLQEncode(Function&& process_byte, int32_t value) { // This wouldn't handle kMinInt correctly if it ever encountered it. DCHECK_NE(value, std::numeric_limits::min()); bool is_negative = value < 0; // Encode sign in least significant bit. uint32_t bits = static_cast((is_negative ? -value : value) << 1) | static_cast(is_negative); - VLQEncodeUnsigned(process_byte, bits); + VLQEncodeUnsigned(std::forward(process_byte), bits); } // Wrapper of VLQEncode for std::vector backed storage containers. template inline void VLQEncode(std::vector* data, int32_t value) { - VLQEncode([data](byte value) { data->push_back(value); }, value); + VLQEncode( + [data](byte value) { + data->push_back(value); + return &data->back(); + }, + value); } // Wrapper of VLQEncodeUnsigned for std::vector backed storage containers. template inline void VLQEncodeUnsigned(std::vector* data, uint32_t value) { - VLQEncodeUnsigned([data](byte value) { data->push_back(value); }, value); + VLQEncodeUnsigned( + [data](byte value) { + data->push_back(value); + return &data->back(); + }, + value); +} + +// Decodes a variable-length encoded unsigned value from bytes returned by +// successive calls to the given function. +template +inline typename std::enable_if< + std::is_same()()), byte>::value, + uint32_t>::type +VLQDecodeUnsigned(GetNextFunction&& get_next) { + byte cur_byte = get_next(); + // Single byte fast path; no need to mask. + if (cur_byte <= kDataMask) { + return cur_byte; + } + uint32_t bits = cur_byte & kDataMask; + for (int shift = kContinueShift; shift <= 32; shift += kContinueShift) { + byte cur_byte = get_next(); + bits |= (cur_byte & kDataMask) << shift; + if (cur_byte <= kDataMask) break; + } + return bits; } // Decodes a variable-length encoded unsigned value stored in contiguous memory // starting at data_start + index, updating index to where the next encoded // value starts. inline uint32_t VLQDecodeUnsigned(byte* data_start, int* index) { - uint32_t bits = 0; - for (int shift = 0; true; shift += kContinueShift) { - byte cur_byte = data_start[(*index)++]; - bits += (cur_byte & kDataMask) << shift; - if ((cur_byte & kContinueMask) == 0) break; - } - return bits; + return VLQDecodeUnsigned([&] { return data_start[(*index)++]; }); } // Decodes a variable-length encoded value stored in contiguous memory starting diff --git a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h index eca2b47cc0e3f8..bfccef90f8f1f1 100644 --- a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h +++ b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h @@ -124,7 +124,7 @@ void BaselineAssembler::CallBuiltin(Builtins::Name builtin) { Register temp = temps.AcquireScratch(); __ LoadEntryFromBuiltinIndex(builtin, temp); __ Call(temp); - if (FLAG_code_comments) __ RecordComment("]"); + __ RecordComment("]"); } void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) { @@ -133,7 +133,7 @@ void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) { Register temp = temps.AcquireScratch(); __ LoadEntryFromBuiltinIndex(builtin, temp); __ Jump(temp); - if (FLAG_code_comments) __ RecordComment("]"); + __ RecordComment("]"); } void BaselineAssembler::Test(Register value, int mask) { @@ -151,7 +151,7 @@ void BaselineAssembler::CmpInstanceType(Register map, InstanceType instance_type) { ScratchRegisterScope temps(this); Register type = temps.AcquireScratch(); - if (emit_debug_code()) { + if (FLAG_debug_code) { __ AssertNotSmi(map); __ CompareObjectType(map, type, type, MAP_TYPE); __ Assert(eq, AbortReason::kUnexpectedValue); @@ -198,10 +198,10 @@ void BaselineAssembler::Move(MemOperand output, Register source) { __ str(source, output); } void BaselineAssembler::Move(Register output, ExternalReference reference) { - __ mov(output, Operand(reference)); + __ Move32BitImmediate(output, Operand(reference)); } void BaselineAssembler::Move(Register output, Handle value) { - __ mov(output, Operand(value)); + __ Move32BitImmediate(output, Operand(value)); } void BaselineAssembler::Move(Register output, int32_t value) { __ mov(output, Operand(value)); @@ -351,7 +351,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, Register value) { __ str(value, FieldMemOperand(target, offset)); __ RecordWriteField(target, offset, value, kLRHasNotBeenSaved, - kDontSaveFPRegs); + SaveFPRegsMode::kIgnore); } void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, int offset, diff --git a/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h index ff2b6d1a831294..d7f0a606d3bfcf 100644 --- a/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h +++ b/deps/v8/src/baseline/arm/baseline-compiler-arm-inl.h @@ -19,9 +19,9 @@ void BaselineCompiler::Prologue() { __ masm()->EnterFrame(StackFrame::BASELINE); DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); int max_frame_size = bytecode_->frame_size() + max_call_args_; - CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister, - kJSFunctionRegister, kJavaScriptCallArgCountRegister, - max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); + CallBuiltin( + kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, + max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); PrologueFillFrame(); } diff --git a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h index 27b7c2b2d8db93..63e90df4d620fe 100644 --- a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h +++ b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h @@ -167,7 +167,7 @@ void BaselineAssembler::CmpInstanceType(Register map, InstanceType instance_type) { ScratchRegisterScope temps(this); Register type = temps.AcquireScratch(); - if (emit_debug_code()) { + if (FLAG_debug_code) { __ AssertNotSmi(map); __ CompareObjectType(map, type, type, MAP_TYPE); __ Assert(eq, AbortReason::kUnexpectedValue); @@ -422,7 +422,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, Register value) { __ StoreTaggedField(value, FieldMemOperand(target, offset)); __ RecordWriteField(target, offset, value, kLRHasNotBeenSaved, - kDontSaveFPRegs); + SaveFPRegsMode::kIgnore); } void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, int offset, diff --git a/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h index e567be41d245fd..0807c5434acb1f 100644 --- a/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h +++ b/deps/v8/src/baseline/arm64/baseline-compiler-arm64-inl.h @@ -18,9 +18,9 @@ void BaselineCompiler::Prologue() { __ masm()->EnterFrame(StackFrame::BASELINE); DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); int max_frame_size = bytecode_->frame_size() + max_call_args_; - CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister, - kJSFunctionRegister, kJavaScriptCallArgCountRegister, - max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); + CallBuiltin( + kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, + max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); __ masm()->AssertSpAligned(); PrologueFillFrame(); diff --git a/deps/v8/src/baseline/baseline-assembler-inl.h b/deps/v8/src/baseline/baseline-assembler-inl.h index 8fd54d63a2fbe4..401062517f6747 100644 --- a/deps/v8/src/baseline/baseline-assembler-inl.h +++ b/deps/v8/src/baseline/baseline-assembler-inl.h @@ -8,12 +8,13 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM + V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 #include #include #include "src/baseline/baseline-assembler.h" +#include "src/codegen/interface-descriptors-inl.h" #include "src/interpreter/bytecode-register.h" #include "src/objects/feedback-cell.h" #include "src/objects/js-function.h" @@ -27,6 +28,8 @@ #include "src/baseline/ia32/baseline-assembler-ia32-inl.h" #elif V8_TARGET_ARCH_ARM #include "src/baseline/arm/baseline-assembler-arm-inl.h" +#elif V8_TARGET_ARCH_RISCV64 +#include "src/baseline/riscv64/baseline-assembler-riscv64-inl.h" #else #error Unsupported target architecture. #endif @@ -41,10 +44,10 @@ void BaselineAssembler::GetCode(Isolate* isolate, CodeDesc* desc) { __ GetCode(isolate, desc); } int BaselineAssembler::pc_offset() const { return __ pc_offset(); } -bool BaselineAssembler::emit_debug_code() const { return __ emit_debug_code(); } void BaselineAssembler::CodeEntry() const { __ CodeEntry(); } void BaselineAssembler::ExceptionHandler() const { __ ExceptionHandler(); } void BaselineAssembler::RecordComment(const char* string) { + if (!FLAG_code_comments) return; __ RecordComment(string); } void BaselineAssembler::Trap() { __ Trap(); } diff --git a/deps/v8/src/baseline/baseline-assembler.h b/deps/v8/src/baseline/baseline-assembler.h index 38874d556f0f73..7c46cd5e2c447e 100644 --- a/deps/v8/src/baseline/baseline-assembler.h +++ b/deps/v8/src/baseline/baseline-assembler.h @@ -8,7 +8,7 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM + V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 #include "src/codegen/macro-assembler.h" #include "src/objects/tagged-index.h" @@ -32,10 +32,9 @@ class BaselineAssembler { inline void GetCode(Isolate* isolate, CodeDesc* desc); inline int pc_offset() const; - inline bool emit_debug_code() const; inline void CodeEntry() const; inline void ExceptionHandler() const; - inline void RecordComment(const char* string); + V8_INLINE void RecordComment(const char* string); inline void Trap(); inline void DebugBreak(); diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc index 3d599c11fd50de..9c6e3f10e6de76 100644 --- a/deps/v8/src/baseline/baseline-compiler.cc +++ b/deps/v8/src/baseline/baseline-compiler.cc @@ -4,8 +4,9 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. +#include "src/base/bits.h" #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM + V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 #include "src/baseline/baseline-compiler.h" @@ -19,7 +20,7 @@ #include "src/builtins/builtins.h" #include "src/codegen/assembler.h" #include "src/codegen/compiler.h" -#include "src/codegen/interface-descriptors.h" +#include "src/codegen/interface-descriptors-inl.h" #include "src/codegen/machine-type.h" #include "src/codegen/macro-assembler-inl.h" #include "src/common/globals.h" @@ -40,6 +41,8 @@ #include "src/baseline/ia32/baseline-compiler-ia32-inl.h" #elif V8_TARGET_ARCH_ARM #include "src/baseline/arm/baseline-compiler-arm-inl.h" +#elif V8_TARGET_ARCH_RISCV64 +#include "src/baseline/riscv64/baseline-compiler-riscv64-inl.h" #else #error Unsupported target architecture. #endif @@ -48,9 +51,9 @@ namespace v8 { namespace internal { namespace baseline { -template +template Handle BytecodeOffsetTableBuilder::ToBytecodeOffsetTable( - LocalIsolate* isolate) { + IsolateT* isolate) { if (bytes_.empty()) return isolate->factory()->empty_byte_array(); Handle table = isolate->factory()->NewByteArray( static_cast(bytes_.size()), AllocationType::kOld); @@ -68,6 +71,7 @@ bool Clobbers(Register target, TaggedIndex index) { return false; } bool Clobbers(Register target, int32_t imm) { return false; } bool Clobbers(Register target, RootIndex index) { return false; } bool Clobbers(Register target, interpreter::Register reg) { return false; } +bool Clobbers(Register target, interpreter::RegisterList list) { return false; } // We don't know what's inside machine registers or operands, so assume they // match. @@ -97,134 +101,151 @@ bool MachineTypeMatches(MachineType type, interpreter::Register reg) { return type.IsTagged(); } -template +template struct CheckArgsHelper; -template <> -struct CheckArgsHelper<> { - static void Check(BaselineAssembler* masm, CallInterfaceDescriptor descriptor, - int i) { - if (descriptor.AllowVarArgs()) { - CHECK_GE(i, descriptor.GetParameterCount()); +template +struct CheckArgsHelper { + static void Check(BaselineAssembler* masm, int i) { + if (Descriptor::AllowVarArgs()) { + CHECK_GE(i, Descriptor::GetParameterCount()); } else { - CHECK_EQ(i, descriptor.GetParameterCount()); + CHECK_EQ(i, Descriptor::GetParameterCount()); } } }; -template -struct CheckArgsHelper { - static void Check(BaselineAssembler* masm, CallInterfaceDescriptor descriptor, - int i, Arg arg, Args... args) { - if (i >= descriptor.GetParameterCount()) { - CHECK(descriptor.AllowVarArgs()); +template +struct CheckArgsHelper { + static void Check(BaselineAssembler* masm, int i, Arg arg, Args... args) { + if (i >= Descriptor::GetParameterCount()) { + CHECK(Descriptor::AllowVarArgs()); return; } - CHECK(MachineTypeMatches(descriptor.GetParameterType(i), arg)); - CheckArgsHelper::Check(masm, descriptor, i + 1, args...); + CHECK(MachineTypeMatches(Descriptor().GetParameterType(i), arg)); + CheckArgsHelper::Check(masm, i + 1, args...); } }; -template -struct CheckArgsHelper { - static void Check(BaselineAssembler* masm, CallInterfaceDescriptor descriptor, - int i, interpreter::RegisterList list, Args... args) { +template +struct CheckArgsHelper { + static void Check(BaselineAssembler* masm, int i, + interpreter::RegisterList list, Args... args) { for (int reg_index = 0; reg_index < list.register_count(); ++reg_index, ++i) { - if (i >= descriptor.GetParameterCount()) { - CHECK(descriptor.AllowVarArgs()); + if (i >= Descriptor::GetParameterCount()) { + CHECK(Descriptor::AllowVarArgs()); return; } - CHECK( - MachineTypeMatches(descriptor.GetParameterType(i), list[reg_index])); + CHECK(MachineTypeMatches(Descriptor().GetParameterType(i), + list[reg_index])); } - CheckArgsHelper::Check(masm, descriptor, i, args...); + CheckArgsHelper::Check(masm, i, args...); } }; -template -void CheckArgs(BaselineAssembler* masm, CallInterfaceDescriptor descriptor, - Args... args) { - CheckArgsHelper::Check(masm, descriptor, 0, args...); +template +void CheckArgs(BaselineAssembler* masm, Args... args) { + CheckArgsHelper::Check(masm, 0, args...); +} + +void CheckSettingDoesntClobber(Register target) {} +template +void CheckSettingDoesntClobber(Register target, Arg arg, Args... args) { + DCHECK(!Clobbers(target, arg)); + CheckSettingDoesntClobber(target, args...); } #else // DEBUG -template +template void CheckArgs(Args... args) {} +template +void CheckSettingDoesntClobber(Register target, Args... args) {} + #endif // DEBUG -template +template struct ArgumentSettingHelper; -template <> -struct ArgumentSettingHelper<> { - static void Set(BaselineAssembler* masm, CallInterfaceDescriptor descriptor, - int i) {} - static void CheckSettingDoesntClobber(Register target, int arg_index) {} +template +struct ArgumentSettingHelper { + static void Set(BaselineAssembler* masm) { + // Should only ever be called for the end of register arguments. + STATIC_ASSERT(ArgIndex == Descriptor::GetRegisterParameterCount()); + } }; -template -struct ArgumentSettingHelper { - static void Set(BaselineAssembler* masm, CallInterfaceDescriptor descriptor, - int i, Arg arg, Args... args) { - if (i < descriptor.GetRegisterParameterCount()) { - Register target = descriptor.GetRegisterParameter(i); - ArgumentSettingHelper::CheckSettingDoesntClobber(target, i + 1, - args...); - masm->Move(target, arg); - ArgumentSettingHelper::Set(masm, descriptor, i + 1, args...); - } else if (descriptor.GetStackArgumentOrder() == - StackArgumentOrder::kDefault) { - masm->Push(arg, args...); - } else { - masm->PushReverse(arg, args...); - } +template +struct ArgumentSettingHelper { + static void Set(BaselineAssembler* masm, Arg arg, Args... args) { + STATIC_ASSERT(ArgIndex < Descriptor::GetRegisterParameterCount()); + Register target = Descriptor::GetRegisterParameter(ArgIndex); + CheckSettingDoesntClobber(target, args...); + masm->Move(target, arg); + ArgumentSettingHelper::Set(masm, args...); } - static void CheckSettingDoesntClobber(Register target, int arg_index, Arg arg, - Args... args) { - DCHECK(!Clobbers(target, arg)); - ArgumentSettingHelper::CheckSettingDoesntClobber( - target, arg_index + 1, args...); +}; + +template +struct ArgumentSettingHelper { + static void Set(BaselineAssembler* masm, interpreter::RegisterList list) { + STATIC_ASSERT(ArgIndex < Descriptor::GetRegisterParameterCount()); + DCHECK_EQ(ArgIndex + list.register_count(), + Descriptor::GetRegisterParameterCount()); + for (int i = 0; ArgIndex + i < Descriptor::GetRegisterParameterCount(); + ++i) { + Register target = Descriptor::GetRegisterParameter(ArgIndex + i); + masm->Move(target, masm->RegisterFrameOperand(list[i])); + } } }; -// Specialization for interpreter::RegisterList which iterates it. -// RegisterLists are only allowed to be the last argument. -template <> -struct ArgumentSettingHelper { - static void Set(BaselineAssembler* masm, CallInterfaceDescriptor descriptor, - int i, interpreter::RegisterList list) { - // Either all the values are in machine registers, or they're all on the - // stack. - if (i < descriptor.GetRegisterParameterCount()) { - for (int reg_index = 0; reg_index < list.register_count(); - ++reg_index, ++i) { - Register target = descriptor.GetRegisterParameter(i); - masm->Move(target, masm->RegisterFrameOperand(list[reg_index])); - } - } else if (descriptor.GetStackArgumentOrder() == - StackArgumentOrder::kDefault) { - masm->Push(list); +template +struct ArgumentSettingHelper { + static void Set(BaselineAssembler* masm, Arg arg, Args... args) { + if (Descriptor::kStackArgumentOrder == StackArgumentOrder::kDefault) { + masm->Push(arg, args...); } else { - masm->PushReverse(list); + masm->PushReverse(arg, args...); } } - static void CheckSettingDoesntClobber(Register target, int arg_index, - interpreter::RegisterList arg) {} }; -template -void MoveArgumentsForDescriptor(BaselineAssembler* masm, - CallInterfaceDescriptor descriptor, - Args... args) { - CheckArgs(masm, descriptor, args...); - ArgumentSettingHelper::Set(masm, descriptor, 0, args...); +template +void MoveArgumentsForBuiltin(BaselineAssembler* masm, Args... args) { + using Descriptor = typename CallInterfaceDescriptorFor::type; + CheckArgs(masm, args...); + ArgumentSettingHelper::Set(masm, args...); + if (Descriptor::HasContextParameter()) { + masm->LoadContext(Descriptor::ContextRegister()); + } } } // namespace detail +namespace { +// Rough upper-bound estimate. Copying the data is most likely more expensive +// than pre-allocating a large enough buffer. +#ifdef V8_TARGET_ARCH_IA32 +const int kAverageBytecodeToInstructionRatio = 5; +#else +const int kAverageBytecodeToInstructionRatio = 7; +#endif +std::unique_ptr AllocateBuffer( + Handle bytecodes) { + int estimated_size = bytecodes->length() * kAverageBytecodeToInstructionRatio; + return NewAssemblerBuffer(RoundUp(estimated_size, 4 * KB)); +} +} // namespace + BaselineCompiler::BaselineCompiler( Isolate* isolate, Handle shared_function_info, Handle bytecode) @@ -232,20 +253,26 @@ BaselineCompiler::BaselineCompiler( stats_(isolate->counters()->runtime_call_stats()), shared_function_info_(shared_function_info), bytecode_(bytecode), - masm_(isolate, CodeObjectRequired::kNo), + masm_(isolate, CodeObjectRequired::kNo, AllocateBuffer(bytecode)), basm_(&masm_), iterator_(bytecode_), zone_(isolate->allocator(), ZONE_NAME), labels_(zone_.NewArray(bytecode_->length())) { MemsetPointer(labels_, nullptr, bytecode_->length()); + + // Empirically determined expected size of the offset table at the 95th %ile, + // based on the size of the bytecode, to be: + // + // 16 + (bytecode size) / 4 + bytecode_offset_table_builder_.Reserve( + base::bits::RoundUpToPowerOfTwo(16 + bytecode_->Size() / 4)); } #define __ basm_. void BaselineCompiler::GenerateCode() { { - RuntimeCallTimerScope runtimeTimer( - stats_, RuntimeCallCounterId::kCompileBaselinePreVisit); + RCS_SCOPE(stats_, RuntimeCallCounterId::kCompileBaselinePreVisit); for (; !iterator_.done(); iterator_.Advance()) { PreVisitSingleBytecode(); } @@ -257,8 +284,7 @@ void BaselineCompiler::GenerateCode() { __ CodeEntry(); { - RuntimeCallTimerScope runtimeTimer( - stats_, RuntimeCallCounterId::kCompileBaselineVisit); + RCS_SCOPE(stats_, RuntimeCallCounterId::kCompileBaselineVisit); Prologue(); AddPosition(); for (; !iterator_.done(); iterator_.Advance()) { @@ -453,7 +479,7 @@ void BaselineCompiler::VisitSingleBytecode() { } void BaselineCompiler::VerifyFrame() { - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ RecordComment("[ Verify frame"); __ RecordComment(" -- Verify frame size"); VerifyFrameSize(); @@ -552,28 +578,18 @@ Label* BaselineCompiler::BuildForwardJumpLabel() { return &threaded_label->label; } -template -void BaselineCompiler::CallBuiltin(Builtins::Name builtin, Args... args) { +template +void BaselineCompiler::CallBuiltin(Args... args) { __ RecordComment("[ CallBuiltin"); - CallInterfaceDescriptor descriptor = - Builtins::CallInterfaceDescriptorFor(builtin); - detail::MoveArgumentsForDescriptor(&basm_, descriptor, args...); - if (descriptor.HasContextParameter()) { - __ LoadContext(descriptor.ContextRegister()); - } - __ CallBuiltin(builtin); + detail::MoveArgumentsForBuiltin(&basm_, args...); + __ CallBuiltin(kBuiltin); __ RecordComment("]"); } -template -void BaselineCompiler::TailCallBuiltin(Builtins::Name builtin, Args... args) { - CallInterfaceDescriptor descriptor = - Builtins::CallInterfaceDescriptorFor(builtin); - detail::MoveArgumentsForDescriptor(&basm_, descriptor, args...); - if (descriptor.HasContextParameter()) { - __ LoadContext(descriptor.ContextRegister()); - } - __ TailCallBuiltin(builtin); +template +void BaselineCompiler::TailCallBuiltin(Args... args) { + detail::MoveArgumentsForBuiltin(&basm_, args...); + __ TailCallBuiltin(kBuiltin); } template @@ -584,27 +600,17 @@ void BaselineCompiler::CallRuntime(Runtime::FunctionId function, Args... args) { } // Returns into kInterpreterAccumulatorRegister -void BaselineCompiler::JumpIfToBoolean(bool do_jump_if_true, Register reg, - Label* label, Label::Distance distance) { - Label end; - Label::Distance end_distance = Label::kNear; - - Label* true_label = do_jump_if_true ? label : &end; - Label::Distance true_distance = do_jump_if_true ? distance : end_distance; - Label* false_label = do_jump_if_true ? &end : label; - Label::Distance false_distance = do_jump_if_true ? end_distance : distance; - - BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); - Register to_boolean = scratch_scope.AcquireScratch(); - { - SaveAccumulatorScope accumulator_scope(&basm_); - CallBuiltin(Builtins::kToBoolean, reg); - __ Move(to_boolean, kInterpreterAccumulatorRegister); - } - __ JumpIfRoot(to_boolean, RootIndex::kTrueValue, true_label, true_distance); - if (false_label != &end) __ Jump(false_label, false_distance); - - __ Bind(&end); +void BaselineCompiler::JumpIfToBoolean(bool do_jump_if_true, Label* label, + Label::Distance distance) { + CallBuiltin( + kInterpreterAccumulatorRegister); + // ToBooleanForBaselineJump returns the ToBoolean value into return reg 1, and + // the original value into kInterpreterAccumulatorRegister, so we don't have + // to worry about it getting clobbered. + STATIC_ASSERT(kReturnRegister0 == kInterpreterAccumulatorRegister); + __ Cmp(kReturnRegister1, Smi::FromInt(0)); + __ JumpIf(do_jump_if_true ? Condition::kNotEqual : Condition::kEqual, label, + distance); } void BaselineCompiler::VisitLdaZero() { @@ -641,22 +647,21 @@ void BaselineCompiler::VisitLdaConstant() { } void BaselineCompiler::VisitLdaGlobal() { - CallBuiltin(Builtins::kLoadGlobalICBaseline, - Constant(0), // name - IndexAsTagged(1)); // slot + CallBuiltin(Constant(0), // name + IndexAsTagged(1)); // slot } void BaselineCompiler::VisitLdaGlobalInsideTypeof() { - CallBuiltin(Builtins::kLoadGlobalICInsideTypeofBaseline, - Constant(0), // name - IndexAsTagged(1)); // slot + CallBuiltin( + Constant(0), // name + IndexAsTagged(1)); // slot } void BaselineCompiler::VisitStaGlobal() { - CallBuiltin(Builtins::kStoreGlobalICBaseline, - Constant(0), // name - kInterpreterAccumulatorRegister, // value - IndexAsTagged(1)); // slot + CallBuiltin( + Constant(0), // name + kInterpreterAccumulatorRegister, // value + IndexAsTagged(1)); // slot } void BaselineCompiler::VisitPushContext() { @@ -730,13 +735,13 @@ void BaselineCompiler::VisitLdaLookupSlot() { } void BaselineCompiler::VisitLdaLookupContextSlot() { - CallBuiltin(Builtins::kLookupContextBaseline, Constant(0), - UintAsTagged(2), IndexAsTagged(1)); + CallBuiltin( + Constant(0), UintAsTagged(2), IndexAsTagged(1)); } void BaselineCompiler::VisitLdaLookupGlobalSlot() { - CallBuiltin(Builtins::kLookupGlobalICBaseline, Constant(0), - UintAsTagged(2), IndexAsTagged(1)); + CallBuiltin( + Constant(0), UintAsTagged(2), IndexAsTagged(1)); } void BaselineCompiler::VisitLdaLookupSlotInsideTypeof() { @@ -744,13 +749,13 @@ void BaselineCompiler::VisitLdaLookupSlotInsideTypeof() { } void BaselineCompiler::VisitLdaLookupContextSlotInsideTypeof() { - CallBuiltin(Builtins::kLookupContextInsideTypeofBaseline, Constant(0), - UintAsTagged(2), IndexAsTagged(1)); + CallBuiltin( + Constant(0), UintAsTagged(2), IndexAsTagged(1)); } void BaselineCompiler::VisitLdaLookupGlobalSlotInsideTypeof() { - CallBuiltin(Builtins::kLookupGlobalICInsideTypeofBaseline, Constant(0), - UintAsTagged(2), IndexAsTagged(1)); + CallBuiltin( + Constant(0), UintAsTagged(2), IndexAsTagged(1)); } void BaselineCompiler::VisitStaLookupSlot() { @@ -793,14 +798,13 @@ void BaselineCompiler::VisitMov() { } void BaselineCompiler::VisitLdaNamedProperty() { - CallBuiltin(Builtins::kLoadICBaseline, - RegisterOperand(0), // object - Constant(1), // name - IndexAsTagged(2)); // slot + CallBuiltin(RegisterOperand(0), // object + Constant(1), // name + IndexAsTagged(2)); // slot } void BaselineCompiler::VisitLdaNamedPropertyNoFeedback() { - CallBuiltin(Builtins::kGetProperty, RegisterOperand(0), Constant(1)); + CallBuiltin(RegisterOperand(0), Constant(1)); } void BaselineCompiler::VisitLdaNamedPropertyFromSuper() { @@ -808,19 +812,19 @@ void BaselineCompiler::VisitLdaNamedPropertyFromSuper() { LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister(), kInterpreterAccumulatorRegister); - CallBuiltin(Builtins::kLoadSuperICBaseline, - RegisterOperand(0), // object - LoadWithReceiverAndVectorDescriptor:: - LookupStartObjectRegister(), // lookup start - Constant(1), // name - IndexAsTagged(2)); // slot + CallBuiltin( + RegisterOperand(0), // object + LoadWithReceiverAndVectorDescriptor:: + LookupStartObjectRegister(), // lookup start + Constant(1), // name + IndexAsTagged(2)); // slot } void BaselineCompiler::VisitLdaKeyedProperty() { - CallBuiltin(Builtins::kKeyedLoadICBaseline, - RegisterOperand(0), // object - kInterpreterAccumulatorRegister, // key - IndexAsTagged(1)); // slot + CallBuiltin( + RegisterOperand(0), // object + kInterpreterAccumulatorRegister, // key + IndexAsTagged(1)); // slot } void BaselineCompiler::VisitLdaModuleVariable() { @@ -878,11 +882,11 @@ void BaselineCompiler::VisitStaModuleVariable() { } void BaselineCompiler::VisitStaNamedProperty() { - CallBuiltin(Builtins::kStoreICBaseline, - RegisterOperand(0), // object - Constant(1), // name - kInterpreterAccumulatorRegister, // value - IndexAsTagged(2)); // slot + CallBuiltin( + RegisterOperand(0), // object + Constant(1), // name + kInterpreterAccumulatorRegister, // value + IndexAsTagged(2)); // slot } void BaselineCompiler::VisitStaNamedPropertyNoFeedback() { @@ -900,19 +904,19 @@ void BaselineCompiler::VisitStaNamedOwnProperty() { } void BaselineCompiler::VisitStaKeyedProperty() { - CallBuiltin(Builtins::kKeyedStoreICBaseline, - RegisterOperand(0), // object - RegisterOperand(1), // key - kInterpreterAccumulatorRegister, // value - IndexAsTagged(2)); // slot + CallBuiltin( + RegisterOperand(0), // object + RegisterOperand(1), // key + kInterpreterAccumulatorRegister, // value + IndexAsTagged(2)); // slot } void BaselineCompiler::VisitStaInArrayLiteral() { - CallBuiltin(Builtins::kStoreInArrayLiteralICBaseline, - RegisterOperand(0), // object - RegisterOperand(1), // name - kInterpreterAccumulatorRegister, // value - IndexAsTagged(2)); // slot + CallBuiltin( + RegisterOperand(0), // object + RegisterOperand(1), // name + kInterpreterAccumulatorRegister, // value + IndexAsTagged(2)); // slot } void BaselineCompiler::VisitStaDataPropertyInLiteral() { @@ -934,140 +938,149 @@ void BaselineCompiler::VisitCollectTypeProfile() { } void BaselineCompiler::VisitAdd() { - CallBuiltin(Builtins::kAdd_Baseline, RegisterOperand(0), - kInterpreterAccumulatorRegister, Index(1)); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitSub() { - CallBuiltin(Builtins::kSubtract_Baseline, RegisterOperand(0), - kInterpreterAccumulatorRegister, Index(1)); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitMul() { - CallBuiltin(Builtins::kMultiply_Baseline, RegisterOperand(0), - kInterpreterAccumulatorRegister, Index(1)); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitDiv() { - CallBuiltin(Builtins::kDivide_Baseline, RegisterOperand(0), - kInterpreterAccumulatorRegister, Index(1)); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitMod() { - CallBuiltin(Builtins::kModulus_Baseline, RegisterOperand(0), - kInterpreterAccumulatorRegister, Index(1)); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitExp() { - CallBuiltin(Builtins::kExponentiate_Baseline, RegisterOperand(0), - kInterpreterAccumulatorRegister, Index(1)); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitBitwiseOr() { - CallBuiltin(Builtins::kBitwiseOr_Baseline, RegisterOperand(0), - kInterpreterAccumulatorRegister, Index(1)); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitBitwiseXor() { - CallBuiltin(Builtins::kBitwiseXor_Baseline, RegisterOperand(0), - kInterpreterAccumulatorRegister, Index(1)); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitBitwiseAnd() { - CallBuiltin(Builtins::kBitwiseAnd_Baseline, RegisterOperand(0), - kInterpreterAccumulatorRegister, Index(1)); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitShiftLeft() { - CallBuiltin(Builtins::kShiftLeft_Baseline, RegisterOperand(0), - kInterpreterAccumulatorRegister, Index(1)); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitShiftRight() { - CallBuiltin(Builtins::kShiftRight_Baseline, RegisterOperand(0), - kInterpreterAccumulatorRegister, Index(1)); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitShiftRightLogical() { - CallBuiltin(Builtins::kShiftRightLogical_Baseline, RegisterOperand(0), - kInterpreterAccumulatorRegister, Index(1)); -} - -void BaselineCompiler::BuildBinopWithConstant(Builtins::Name builtin_name) { - CallBuiltin(builtin_name, kInterpreterAccumulatorRegister, IntAsSmi(0), - Index(1)); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitAddSmi() { - BuildBinopWithConstant(Builtins::kAdd_Baseline); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitSubSmi() { - BuildBinopWithConstant(Builtins::kSubtract_Baseline); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitMulSmi() { - BuildBinopWithConstant(Builtins::kMultiply_Baseline); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitDivSmi() { - BuildBinopWithConstant(Builtins::kDivide_Baseline); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitModSmi() { - BuildBinopWithConstant(Builtins::kModulus_Baseline); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitExpSmi() { - BuildBinopWithConstant(Builtins::kExponentiate_Baseline); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitBitwiseOrSmi() { - BuildBinopWithConstant(Builtins::kBitwiseOr_Baseline); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitBitwiseXorSmi() { - BuildBinopWithConstant(Builtins::kBitwiseXor_Baseline); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitBitwiseAndSmi() { - BuildBinopWithConstant(Builtins::kBitwiseAnd_Baseline); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitShiftLeftSmi() { - BuildBinopWithConstant(Builtins::kShiftLeft_Baseline); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitShiftRightSmi() { - BuildBinopWithConstant(Builtins::kShiftRight_Baseline); + CallBuiltin(kInterpreterAccumulatorRegister, + IntAsSmi(0), Index(1)); } void BaselineCompiler::VisitShiftRightLogicalSmi() { - BuildBinopWithConstant(Builtins::kShiftRightLogical_Baseline); + CallBuiltin( + kInterpreterAccumulatorRegister, IntAsSmi(0), Index(1)); } -void BaselineCompiler::BuildUnop(Builtins::Name builtin_name) { - CallBuiltin(builtin_name, - kInterpreterAccumulatorRegister, // value - Index(0)); // slot +void BaselineCompiler::VisitInc() { + CallBuiltin(kInterpreterAccumulatorRegister, + Index(0)); } -void BaselineCompiler::VisitInc() { BuildUnop(Builtins::kIncrement_Baseline); } - -void BaselineCompiler::VisitDec() { BuildUnop(Builtins::kDecrement_Baseline); } +void BaselineCompiler::VisitDec() { + CallBuiltin(kInterpreterAccumulatorRegister, + Index(0)); +} -void BaselineCompiler::VisitNegate() { BuildUnop(Builtins::kNegate_Baseline); } +void BaselineCompiler::VisitNegate() { + CallBuiltin(kInterpreterAccumulatorRegister, + Index(0)); +} void BaselineCompiler::VisitBitwiseNot() { - BuildUnop(Builtins::kBitwiseNot_Baseline); + CallBuiltin(kInterpreterAccumulatorRegister, + Index(0)); } void BaselineCompiler::VisitToBooleanLogicalNot() { SelectBooleanConstant(kInterpreterAccumulatorRegister, [&](Label* if_true, Label::Distance distance) { - JumpIfToBoolean(false, - kInterpreterAccumulatorRegister, - if_true, distance); + JumpIfToBoolean(false, if_true, distance); }); } @@ -1081,23 +1094,23 @@ void BaselineCompiler::VisitLogicalNot() { } void BaselineCompiler::VisitTypeOf() { - CallBuiltin(Builtins::kTypeof, kInterpreterAccumulatorRegister); + CallBuiltin(kInterpreterAccumulatorRegister); } void BaselineCompiler::VisitDeletePropertyStrict() { BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); Register scratch = scratch_scope.AcquireScratch(); __ Move(scratch, kInterpreterAccumulatorRegister); - CallBuiltin(Builtins::kDeleteProperty, RegisterOperand(0), scratch, - Smi::FromEnum(LanguageMode::kStrict)); + CallBuiltin(RegisterOperand(0), scratch, + Smi::FromEnum(LanguageMode::kStrict)); } void BaselineCompiler::VisitDeletePropertySloppy() { BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); Register scratch = scratch_scope.AcquireScratch(); __ Move(scratch, kInterpreterAccumulatorRegister); - CallBuiltin(Builtins::kDeleteProperty, RegisterOperand(0), scratch, - Smi::FromEnum(LanguageMode::kSloppy)); + CallBuiltin(RegisterOperand(0), scratch, + Smi::FromEnum(LanguageMode::kSloppy)); } void BaselineCompiler::VisitGetSuperConstructor() { @@ -1106,87 +1119,115 @@ void BaselineCompiler::VisitGetSuperConstructor() { __ LoadPrototype(prototype, kInterpreterAccumulatorRegister); StoreRegister(0, prototype); } -template -void BaselineCompiler::BuildCall(ConvertReceiverMode mode, uint32_t slot, - uint32_t arg_count, Args... args) { - Builtins::Name builtin; + +namespace { +constexpr Builtins::Name ConvertReceiverModeToCompactBuiltin( + ConvertReceiverMode mode) { switch (mode) { case ConvertReceiverMode::kAny: - builtin = Builtins::kCall_ReceiverIsAny_Baseline; + return Builtins::kCall_ReceiverIsAny_Baseline_Compact; break; case ConvertReceiverMode::kNullOrUndefined: - builtin = Builtins::kCall_ReceiverIsNullOrUndefined_Baseline; + return Builtins::kCall_ReceiverIsNullOrUndefined_Baseline_Compact; break; case ConvertReceiverMode::kNotNullOrUndefined: - builtin = Builtins::kCall_ReceiverIsNotNullOrUndefined_Baseline; + return Builtins::kCall_ReceiverIsNotNullOrUndefined_Baseline_Compact; break; - default: - UNREACHABLE(); } - CallBuiltin(builtin, - RegisterOperand(0), // kFunction - arg_count, // kActualArgumentsCount - slot, // kSlot - args...); // Arguments +} +constexpr Builtins::Name ConvertReceiverModeToBuiltin( + ConvertReceiverMode mode) { + switch (mode) { + case ConvertReceiverMode::kAny: + return Builtins::kCall_ReceiverIsAny_Baseline; + break; + case ConvertReceiverMode::kNullOrUndefined: + return Builtins::kCall_ReceiverIsNullOrUndefined_Baseline; + break; + case ConvertReceiverMode::kNotNullOrUndefined: + return Builtins::kCall_ReceiverIsNotNullOrUndefined_Baseline; + break; + } +} +} // namespace + +template +void BaselineCompiler::BuildCall(uint32_t slot, uint32_t arg_count, + Args... args) { + uint32_t bitfield; + if (CallTrampoline_Baseline_CompactDescriptor::EncodeBitField(arg_count, slot, + &bitfield)) { + CallBuiltin( + RegisterOperand(0), // kFunction + bitfield, // kActualArgumentsCount | kSlot + args...); // Arguments + } else { + CallBuiltin( + RegisterOperand(0), // kFunction + arg_count, // kActualArgumentsCount + slot, // kSlot + args...); // Arguments + } } void BaselineCompiler::VisitCallAnyReceiver() { interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count() - 1; // Remove receiver. - BuildCall(ConvertReceiverMode::kAny, Index(3), arg_count, args); + BuildCall(Index(3), arg_count, args); } void BaselineCompiler::VisitCallProperty() { interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count() - 1; // Remove receiver. - BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(3), arg_count, - args); + BuildCall(Index(3), arg_count, + args); } void BaselineCompiler::VisitCallProperty0() { - BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(2), 0, - RegisterOperand(1)); + BuildCall(Index(2), 0, + RegisterOperand(1)); } void BaselineCompiler::VisitCallProperty1() { - BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(3), 1, - RegisterOperand(1), RegisterOperand(2)); + BuildCall( + Index(3), 1, RegisterOperand(1), RegisterOperand(2)); } void BaselineCompiler::VisitCallProperty2() { - BuildCall(ConvertReceiverMode::kNotNullOrUndefined, Index(4), 2, - RegisterOperand(1), RegisterOperand(2), RegisterOperand(3)); + BuildCall( + Index(4), 2, RegisterOperand(1), RegisterOperand(2), RegisterOperand(3)); } void BaselineCompiler::VisitCallUndefinedReceiver() { interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count(); - BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(3), arg_count, - RootIndex::kUndefinedValue, args); + BuildCall( + Index(3), arg_count, RootIndex::kUndefinedValue, args); } void BaselineCompiler::VisitCallUndefinedReceiver0() { - BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(1), 0, - RootIndex::kUndefinedValue); + BuildCall(Index(1), 0, + RootIndex::kUndefinedValue); } void BaselineCompiler::VisitCallUndefinedReceiver1() { - BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(2), 1, - RootIndex::kUndefinedValue, RegisterOperand(1)); + BuildCall( + Index(2), 1, RootIndex::kUndefinedValue, RegisterOperand(1)); } void BaselineCompiler::VisitCallUndefinedReceiver2() { - BuildCall(ConvertReceiverMode::kNullOrUndefined, Index(3), 2, - RootIndex::kUndefinedValue, RegisterOperand(1), RegisterOperand(2)); + BuildCall( + Index(3), 2, RootIndex::kUndefinedValue, RegisterOperand(1), + RegisterOperand(2)); } void BaselineCompiler::VisitCallNoFeedback() { interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count(); - CallBuiltin(Builtins::kCall_ReceiverIsAny, - RegisterOperand(0), // kFunction - arg_count - 1, // kActualArgumentsCount - args); + CallBuiltin( + RegisterOperand(0), // kFunction + arg_count - 1, // kActualArgumentsCount + args); } void BaselineCompiler::VisitCallWithSpread() { @@ -1198,12 +1239,12 @@ void BaselineCompiler::VisitCallWithSpread() { uint32_t arg_count = args.register_count() - 1; // Remove receiver. - CallBuiltin(Builtins::kCallWithSpread_Baseline, - RegisterOperand(0), // kFunction - arg_count, // kActualArgumentsCount - spread_register, // kSpread - Index(3), // kSlot - args); + CallBuiltin( + RegisterOperand(0), // kFunction + arg_count, // kActualArgumentsCount + spread_register, // kSpread + Index(3), // kSlot + args); } void BaselineCompiler::VisitCallRuntime() { @@ -1226,11 +1267,11 @@ void BaselineCompiler::VisitCallJSRuntime() { __ LoadContext(kContextRegister); __ LoadNativeContextSlot(kJavaScriptCallTargetRegister, iterator().GetNativeContextIndexOperand(0)); - CallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, - kJavaScriptCallTargetRegister, // kFunction - arg_count, // kActualArgumentsCount - RootIndex::kUndefinedValue, // kReceiver - args); + CallBuiltin( + kJavaScriptCallTargetRegister, // kFunction + arg_count, // kActualArgumentsCount + RootIndex::kUndefinedValue, // kReceiver + args); } void BaselineCompiler::VisitInvokeIntrinsic() { @@ -1301,29 +1342,25 @@ void BaselineCompiler::VisitIntrinsicIsSmi(interpreter::RegisterList args) { void BaselineCompiler::VisitIntrinsicCopyDataProperties( interpreter::RegisterList args) { - CallBuiltin(Builtins::kCopyDataProperties, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicCreateIterResultObject( interpreter::RegisterList args) { - CallBuiltin(Builtins::kCreateIterResultObject, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicHasProperty( interpreter::RegisterList args) { - CallBuiltin(Builtins::kHasProperty, args); -} - -void BaselineCompiler::VisitIntrinsicToString(interpreter::RegisterList args) { - CallBuiltin(Builtins::kToString, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicToLength(interpreter::RegisterList args) { - CallBuiltin(Builtins::kToLength, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicToObject(interpreter::RegisterList args) { - CallBuiltin(Builtins::kToObject, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicCall(interpreter::RegisterList args) { @@ -1335,20 +1372,20 @@ void BaselineCompiler::VisitIntrinsicCall(interpreter::RegisterList args) { args = args.PopLeft(); uint32_t arg_count = args.register_count(); - CallBuiltin(Builtins::kCall_ReceiverIsAny, - kJavaScriptCallTargetRegister, // kFunction - arg_count - 1, // kActualArgumentsCount - args); + CallBuiltin( + kJavaScriptCallTargetRegister, // kFunction + arg_count - 1, // kActualArgumentsCount + args); } void BaselineCompiler::VisitIntrinsicCreateAsyncFromSyncIterator( interpreter::RegisterList args) { - CallBuiltin(Builtins::kCreateAsyncFromSyncIteratorBaseline, args[0]); + CallBuiltin(args[0]); } void BaselineCompiler::VisitIntrinsicCreateJSGeneratorObject( interpreter::RegisterList args) { - CallBuiltin(Builtins::kCreateGeneratorObject, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicGeneratorGetResumeMode( @@ -1370,69 +1407,69 @@ void BaselineCompiler::VisitIntrinsicGeneratorClose( void BaselineCompiler::VisitIntrinsicGetImportMetaObject( interpreter::RegisterList args) { - CallBuiltin(Builtins::kGetImportMetaObjectBaseline); + CallBuiltin(); } void BaselineCompiler::VisitIntrinsicAsyncFunctionAwaitCaught( interpreter::RegisterList args) { - CallBuiltin(Builtins::kAsyncFunctionAwaitCaught, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicAsyncFunctionAwaitUncaught( interpreter::RegisterList args) { - CallBuiltin(Builtins::kAsyncFunctionAwaitUncaught, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicAsyncFunctionEnter( interpreter::RegisterList args) { - CallBuiltin(Builtins::kAsyncFunctionEnter, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicAsyncFunctionReject( interpreter::RegisterList args) { - CallBuiltin(Builtins::kAsyncFunctionReject, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicAsyncFunctionResolve( interpreter::RegisterList args) { - CallBuiltin(Builtins::kAsyncFunctionResolve, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicAsyncGeneratorAwaitCaught( interpreter::RegisterList args) { - CallBuiltin(Builtins::kAsyncGeneratorAwaitCaught, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicAsyncGeneratorAwaitUncaught( interpreter::RegisterList args) { - CallBuiltin(Builtins::kAsyncGeneratorAwaitUncaught, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicAsyncGeneratorReject( interpreter::RegisterList args) { - CallBuiltin(Builtins::kAsyncGeneratorReject, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicAsyncGeneratorResolve( interpreter::RegisterList args) { - CallBuiltin(Builtins::kAsyncGeneratorResolve, args); + CallBuiltin(args); } void BaselineCompiler::VisitIntrinsicAsyncGeneratorYield( interpreter::RegisterList args) { - CallBuiltin(Builtins::kAsyncGeneratorYield, args); + CallBuiltin(args); } void BaselineCompiler::VisitConstruct() { interpreter::RegisterList args = iterator().GetRegisterListOperand(1); uint32_t arg_count = args.register_count(); - CallBuiltin(Builtins::kConstruct_Baseline, - RegisterOperand(0), // kFunction - kInterpreterAccumulatorRegister, // kNewTarget - arg_count, // kActualArgumentsCount - Index(3), // kSlot - RootIndex::kUndefinedValue, // kReceiver - args); + CallBuiltin( + RegisterOperand(0), // kFunction + kInterpreterAccumulatorRegister, // kNewTarget + arg_count, // kActualArgumentsCount + Index(3), // kSlot + RootIndex::kUndefinedValue, // kReceiver + args); } void BaselineCompiler::VisitConstructWithSpread() { @@ -1444,51 +1481,50 @@ void BaselineCompiler::VisitConstructWithSpread() { uint32_t arg_count = args.register_count(); + using Descriptor = + CallInterfaceDescriptorFor::type; Register new_target = - Builtins::CallInterfaceDescriptorFor( - Builtins::kConstructWithSpread_Baseline) - .GetRegisterParameter( - ConstructWithSpread_BaselineDescriptor::kNewTarget); + Descriptor::GetRegisterParameter(Descriptor::kNewTarget); __ Move(new_target, kInterpreterAccumulatorRegister); - CallBuiltin(Builtins::kConstructWithSpread_Baseline, - RegisterOperand(0), // kFunction - new_target, // kNewTarget - arg_count, // kActualArgumentsCount - Index(3), // kSlot - spread_register, // kSpread - RootIndex::kUndefinedValue, // kReceiver - args); -} - -void BaselineCompiler::BuildCompare(Builtins::Name builtin_name) { - CallBuiltin(builtin_name, RegisterOperand(0), // lhs - kInterpreterAccumulatorRegister, // rhs - Index(1)); // slot + CallBuiltin( + RegisterOperand(0), // kFunction + new_target, // kNewTarget + arg_count, // kActualArgumentsCount + Index(3), // kSlot + spread_register, // kSpread + RootIndex::kUndefinedValue, // kReceiver + args); } void BaselineCompiler::VisitTestEqual() { - BuildCompare(Builtins::kEqual_Baseline); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitTestEqualStrict() { - BuildCompare(Builtins::kStrictEqual_Baseline); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitTestLessThan() { - BuildCompare(Builtins::kLessThan_Baseline); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitTestGreaterThan() { - BuildCompare(Builtins::kGreaterThan_Baseline); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitTestLessThanOrEqual() { - BuildCompare(Builtins::kLessThanOrEqual_Baseline); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitTestGreaterThanOrEqual() { - BuildCompare(Builtins::kGreaterThanOrEqual_Baseline); + CallBuiltin( + RegisterOperand(0), kInterpreterAccumulatorRegister, Index(1)); } void BaselineCompiler::VisitTestReferenceEqual() { @@ -1502,21 +1538,21 @@ void BaselineCompiler::VisitTestReferenceEqual() { } void BaselineCompiler::VisitTestInstanceOf() { - Register callable = - Builtins::CallInterfaceDescriptorFor(Builtins::kInstanceOf_Baseline) - .GetRegisterParameter(Compare_BaselineDescriptor::kRight); + using Descriptor = + CallInterfaceDescriptorFor::type; + Register callable = Descriptor::GetRegisterParameter(Descriptor::kRight); __ Move(callable, kInterpreterAccumulatorRegister); - CallBuiltin(Builtins::kInstanceOf_Baseline, - RegisterOperand(0), // object - callable, // callable - Index(1)); // slot + + CallBuiltin(RegisterOperand(0), // object + callable, // callable + Index(1)); // slot } void BaselineCompiler::VisitTestIn() { - CallBuiltin(Builtins::kKeyedHasICBaseline, - kInterpreterAccumulatorRegister, // object - RegisterOperand(0), // name - IndexAsTagged(1)); // slot + CallBuiltin( + kInterpreterAccumulatorRegister, // object + RegisterOperand(0), // name + IndexAsTagged(1)); // slot } void BaselineCompiler::VisitTestUndetectable() { @@ -1727,36 +1763,36 @@ void BaselineCompiler::VisitTestTypeOf() { void BaselineCompiler::VisitToName() { SaveAccumulatorScope save_accumulator(&basm_); - CallBuiltin(Builtins::kToName, kInterpreterAccumulatorRegister); + CallBuiltin(kInterpreterAccumulatorRegister); StoreRegister(0, kInterpreterAccumulatorRegister); } void BaselineCompiler::VisitToNumber() { - CallBuiltin(Builtins::kToNumber_Baseline, kInterpreterAccumulatorRegister, - Index(0)); + CallBuiltin(kInterpreterAccumulatorRegister, + Index(0)); } void BaselineCompiler::VisitToNumeric() { - CallBuiltin(Builtins::kToNumeric_Baseline, kInterpreterAccumulatorRegister, - Index(0)); + CallBuiltin(kInterpreterAccumulatorRegister, + Index(0)); } void BaselineCompiler::VisitToObject() { SaveAccumulatorScope save_accumulator(&basm_); - CallBuiltin(Builtins::kToObject, kInterpreterAccumulatorRegister); + CallBuiltin(kInterpreterAccumulatorRegister); StoreRegister(0, kInterpreterAccumulatorRegister); } void BaselineCompiler::VisitToString() { - CallBuiltin(Builtins::kToString, kInterpreterAccumulatorRegister); + CallBuiltin(kInterpreterAccumulatorRegister); } void BaselineCompiler::VisitCreateRegExpLiteral() { - CallBuiltin(Builtins::kCreateRegExpLiteral, - FeedbackVector(), // feedback vector - IndexAsTagged(1), // slot - Constant(0), // pattern - FlagAsSmi(2)); // flags + CallBuiltin( + FeedbackVector(), // feedback vector + IndexAsTagged(1), // slot + Constant(0), // pattern + FlagAsSmi(2)); // flags } void BaselineCompiler::VisitCreateArrayLiteral() { @@ -1765,11 +1801,11 @@ void BaselineCompiler::VisitCreateArrayLiteral() { interpreter::CreateArrayLiteralFlags::FlagsBits::decode(flags)); if (flags & interpreter::CreateArrayLiteralFlags::FastCloneSupportedBit::kMask) { - CallBuiltin(Builtins::kCreateShallowArrayLiteral, - FeedbackVector(), // feedback vector - IndexAsTagged(1), // slot - Constant(0), // constant elements - Smi::FromInt(flags_raw)); // flags + CallBuiltin( + FeedbackVector(), // feedback vector + IndexAsTagged(1), // slot + Constant(0), // constant elements + Smi::FromInt(flags_raw)); // flags } else { CallRuntime(Runtime::kCreateArrayLiteral, FeedbackVector(), // feedback vector @@ -1780,13 +1816,13 @@ void BaselineCompiler::VisitCreateArrayLiteral() { } void BaselineCompiler::VisitCreateArrayFromIterable() { - CallBuiltin(Builtins::kIterableToListWithSymbolLookup, - kInterpreterAccumulatorRegister); // iterable + CallBuiltin( + kInterpreterAccumulatorRegister); // iterable } void BaselineCompiler::VisitCreateEmptyArrayLiteral() { - CallBuiltin(Builtins::kCreateEmptyArrayLiteral, FeedbackVector(), - IndexAsTagged(0)); + CallBuiltin(FeedbackVector(), + IndexAsTagged(0)); } void BaselineCompiler::VisitCreateObjectLiteral() { @@ -1795,11 +1831,11 @@ void BaselineCompiler::VisitCreateObjectLiteral() { interpreter::CreateObjectLiteralFlags::FlagsBits::decode(flags)); if (flags & interpreter::CreateObjectLiteralFlags::FastCloneSupportedBit::kMask) { - CallBuiltin(Builtins::kCreateShallowObjectLiteral, - FeedbackVector(), // feedback vector - IndexAsTagged(1), // slot - Constant(0), // boilerplate - Smi::FromInt(flags_raw)); // flags + CallBuiltin( + FeedbackVector(), // feedback vector + IndexAsTagged(1), // slot + Constant(0), // boilerplate + Smi::FromInt(flags_raw)); // flags } else { CallRuntime(Runtime::kCreateObjectLiteral, FeedbackVector(), // feedback vector @@ -1810,39 +1846,39 @@ void BaselineCompiler::VisitCreateObjectLiteral() { } void BaselineCompiler::VisitCreateEmptyObjectLiteral() { - CallBuiltin(Builtins::kCreateEmptyLiteralObject); + CallBuiltin(); } void BaselineCompiler::VisitCloneObject() { uint32_t flags = Flag(1); int32_t raw_flags = interpreter::CreateObjectLiteralFlags::FlagsBits::decode(flags); - CallBuiltin(Builtins::kCloneObjectICBaseline, - RegisterOperand(0), // source - Smi::FromInt(raw_flags), // flags - IndexAsTagged(2)); // slot + CallBuiltin( + RegisterOperand(0), // source + Smi::FromInt(raw_flags), // flags + IndexAsTagged(2)); // slot } void BaselineCompiler::VisitGetTemplateObject() { BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); - CallBuiltin(Builtins::kGetTemplateObject, - shared_function_info_, // shared function info - Constant(0), // description - Index(1), // slot - FeedbackVector()); // feedback_vector + CallBuiltin( + shared_function_info_, // shared function info + Constant(0), // description + Index(1), // slot + FeedbackVector()); // feedback_vector } void BaselineCompiler::VisitCreateClosure() { Register feedback_cell = - Builtins::CallInterfaceDescriptorFor(Builtins::kFastNewClosure) - .GetRegisterParameter(FastNewClosureDescriptor::kFeedbackCell); + FastNewClosureBaselineDescriptor::GetRegisterParameter( + FastNewClosureBaselineDescriptor::kFeedbackCell); LoadClosureFeedbackArray(feedback_cell); __ LoadFixedArrayElement(feedback_cell, feedback_cell, Index(1)); uint32_t flags = Flag(2); if (interpreter::CreateClosureFlags::FastNewClosureBit::decode(flags)) { - CallBuiltin(Builtins::kFastNewClosure, Constant(0), - feedback_cell); + CallBuiltin( + Constant(0), feedback_cell); } else { Runtime::FunctionId function_id = interpreter::CreateClosureFlags::PretenuredBit::decode(flags) @@ -1868,7 +1904,7 @@ void BaselineCompiler::VisitCreateFunctionContext() { if (slot_count < static_cast( ConstructorBuiltins::MaximumFunctionContextSlots())) { DCHECK_EQ(info->scope_type(), ScopeType::FUNCTION_SCOPE); - CallBuiltin(Builtins::kFastNewFunctionContextFunction, info, slot_count); + CallBuiltin(info, slot_count); } else { CallRuntime(Runtime::kNewFunctionContext, Constant(0)); } @@ -1880,7 +1916,7 @@ void BaselineCompiler::VisitCreateEvalContext() { if (slot_count < static_cast( ConstructorBuiltins::MaximumFunctionContextSlots())) { DCHECK_EQ(info->scope_type(), ScopeType::EVAL_SCOPE); - CallBuiltin(Builtins::kFastNewFunctionContextEval, info, slot_count); + CallBuiltin(info, slot_count); } else { CallRuntime(Runtime::kNewFunctionContext, Constant(0)); } @@ -1896,16 +1932,16 @@ void BaselineCompiler::VisitCreateMappedArguments() { if (shared_function_info_->has_duplicate_parameters()) { CallRuntime(Runtime::kNewSloppyArguments, __ FunctionOperand()); } else { - CallBuiltin(Builtins::kFastNewSloppyArguments, __ FunctionOperand()); + CallBuiltin(__ FunctionOperand()); } } void BaselineCompiler::VisitCreateUnmappedArguments() { - CallBuiltin(Builtins::kFastNewStrictArguments, __ FunctionOperand()); + CallBuiltin(__ FunctionOperand()); } void BaselineCompiler::VisitCreateRestParameter() { - CallBuiltin(Builtins::kFastNewRestArguments, __ FunctionOperand()); + CallBuiltin(__ FunctionOperand()); } void BaselineCompiler::VisitJumpLoop() { @@ -1919,7 +1955,7 @@ void BaselineCompiler::VisitJumpLoop() { int loop_depth = iterator().GetImmediateOperand(1); __ CompareByte(osr_level, loop_depth); __ JumpIf(Condition::kUnsignedLessThanEqual, &osr_not_armed); - CallBuiltin(Builtins::kBaselineOnStackReplacement); + CallBuiltin(); __ RecordComment("]"); __ Bind(&osr_not_armed); @@ -1972,16 +2008,14 @@ void BaselineCompiler::VisitJumpIfToBooleanFalseConstant() { void BaselineCompiler::VisitJumpIfToBooleanTrue() { Label dont_jump; - JumpIfToBoolean(false, kInterpreterAccumulatorRegister, &dont_jump, - Label::kNear); + JumpIfToBoolean(false, &dont_jump, Label::kNear); UpdateInterruptBudgetAndDoInterpreterJump(); __ Bind(&dont_jump); } void BaselineCompiler::VisitJumpIfToBooleanFalse() { Label dont_jump; - JumpIfToBoolean(true, kInterpreterAccumulatorRegister, &dont_jump, - Label::kNear); + JumpIfToBoolean(true, &dont_jump, Label::kNear); UpdateInterruptBudgetAndDoInterpreterJump(); __ Bind(&dont_jump); } @@ -2057,13 +2091,13 @@ void BaselineCompiler::VisitSwitchOnSmiNoFeedback() { } void BaselineCompiler::VisitForInEnumerate() { - CallBuiltin(Builtins::kForInEnumerate, RegisterOperand(0)); + CallBuiltin(RegisterOperand(0)); } void BaselineCompiler::VisitForInPrepare() { StoreRegister(0, kInterpreterAccumulatorRegister); - CallBuiltin(Builtins::kForInPrepare, kInterpreterAccumulatorRegister, - IndexAsTagged(1), FeedbackVector()); + CallBuiltin(kInterpreterAccumulatorRegister, + IndexAsTagged(1), FeedbackVector()); interpreter::Register first = iterator().GetRegisterOperand(0); interpreter::Register second(first.index() + 1); interpreter::Register third(first.index() + 2); @@ -2085,13 +2119,12 @@ void BaselineCompiler::VisitForInContinue() { void BaselineCompiler::VisitForInNext() { interpreter::Register cache_type, cache_array; std::tie(cache_type, cache_array) = iterator().GetRegisterPairOperand(2); - CallBuiltin(Builtins::kForInNext, - Index(3), // vector slot - RegisterOperand(0), // object - cache_array, // cache array - cache_type, // cache type - RegisterOperand(1), // index - FeedbackVector()); // feedback vector + CallBuiltin(Index(3), // vector slot + RegisterOperand(0), // object + cache_array, // cache array + cache_type, // cache type + RegisterOperand(1), // index + FeedbackVector()); // feedback vector } void BaselineCompiler::VisitForInStep() { @@ -2131,8 +2164,8 @@ void BaselineCompiler::VisitReturn() { int parameter_count_without_receiver = parameter_count - 1; // Exclude the receiver to simplify the // computation. We'll account for it at the end. - TailCallBuiltin(Builtins::kBaselineLeaveFrame, - parameter_count_without_receiver, -profiling_weight); + TailCallBuiltin( + parameter_count_without_receiver, -profiling_weight); __ RecordComment("]"); } @@ -2235,10 +2268,11 @@ void BaselineCompiler::VisitSuspendGenerator() { int bytecode_offset = BytecodeArray::kHeaderSize + iterator().current_offset(); - CallBuiltin(Builtins::kSuspendGeneratorBaseline, generator_object, - static_cast(Uint(3)), // suspend_id - bytecode_offset, - static_cast(RegisterCount(2))); // register_count + CallBuiltin( + generator_object, + static_cast(Uint(3)), // suspend_id + bytecode_offset, + static_cast(RegisterCount(2))); // register_count } VisitReturn(); } @@ -2248,26 +2282,27 @@ void BaselineCompiler::VisitResumeGenerator() { BaselineAssembler::ScratchRegisterScope scratch_scope(&basm_); Register generator_object = scratch_scope.AcquireScratch(); LoadRegister(generator_object, 0); - CallBuiltin(Builtins::kResumeGeneratorBaseline, generator_object, - static_cast(RegisterCount(2))); // register_count + CallBuiltin( + generator_object, + static_cast(RegisterCount(2))); // register_count } void BaselineCompiler::VisitGetIterator() { - CallBuiltin(Builtins::kGetIteratorBaseline, - RegisterOperand(0), // receiver - IndexAsTagged(1), // load_slot - IndexAsTagged(2)); // call_slot + CallBuiltin(RegisterOperand(0), // receiver + IndexAsTagged(1), // load_slot + IndexAsTagged(2)); // call_slot } void BaselineCompiler::VisitDebugger() { SaveAccumulatorScope accumulator_scope(&basm_); - CallBuiltin(Builtins::kHandleDebuggerStatement); + CallRuntime(Runtime::kHandleDebuggerStatement); } void BaselineCompiler::VisitIncBlockCounter() { SaveAccumulatorScope accumulator_scope(&basm_); - CallBuiltin(Builtins::kIncBlockCounter, __ FunctionOperand(), - IndexAsSmi(0)); // coverage array slot + CallBuiltin( + __ FunctionOperand(), + IndexAsSmi(0)); // coverage array slot } void BaselineCompiler::VisitAbort() { diff --git a/deps/v8/src/baseline/baseline-compiler.h b/deps/v8/src/baseline/baseline-compiler.h index dbb2f64f6c5dbc..c86d9417e8a469 100644 --- a/deps/v8/src/baseline/baseline-compiler.h +++ b/deps/v8/src/baseline/baseline-compiler.h @@ -8,7 +8,7 @@ // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM + V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 #include "src/base/logging.h" #include "src/base/threaded-list.h" @@ -39,8 +39,10 @@ class BytecodeOffsetTableBuilder { previous_pc_ = pc_offset; } - template - Handle ToBytecodeOffsetTable(LocalIsolate* isolate); + template + Handle ToBytecodeOffsetTable(IsolateT* isolate); + + void Reserve(size_t size) { bytes_.reserve(size); } private: size_t previous_pc_ = 0; @@ -121,31 +123,21 @@ class BaselineCompiler { void SelectBooleanConstant( Register output, std::function jump_func); - // Returns ToBoolean result into kInterpreterAccumulatorRegister. - void JumpIfToBoolean(bool do_jump_if_true, Register reg, Label* label, + // Jumps based on calling ToBoolean on kInterpreterAccumulatorRegister. + void JumpIfToBoolean(bool do_jump_if_true, Label* label, Label::Distance distance = Label::kFar); // Call helpers. - template - void CallBuiltin(Builtins::Name builtin, Args... args); + template + void CallBuiltin(Args... args); template void CallRuntime(Runtime::FunctionId function, Args... args); - template - void TailCallBuiltin(Builtins::Name builtin, Args... args); + template + void TailCallBuiltin(Args... args); - void BuildBinop( - Builtins::Name builtin_name, bool fast_path = false, - bool check_overflow = false, - std::function instruction = [](Register, - Register) {}); - void BuildUnop(Builtins::Name builtin_name); - void BuildCompare(Builtins::Name builtin_name); - void BuildBinopWithConstant(Builtins::Name builtin_name); - - template - void BuildCall(ConvertReceiverMode mode, uint32_t slot, uint32_t arg_count, - Args... args); + template + void BuildCall(uint32_t slot, uint32_t arg_count, Args... args); #ifdef V8_TRACE_UNOPTIMIZED void TraceBytecode(Runtime::FunctionId function_id); diff --git a/deps/v8/src/baseline/baseline-osr-inl.h b/deps/v8/src/baseline/baseline-osr-inl.h new file mode 100644 index 00000000000000..d37007f9cf626c --- /dev/null +++ b/deps/v8/src/baseline/baseline-osr-inl.h @@ -0,0 +1,38 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +#ifndef V8_BASELINE_BASELINE_OSR_INL_H_ +#define V8_BASELINE_BASELINE_OSR_INL_H_ + +#include "src/execution/frames.h" +#include "src/execution/isolate-inl.h" + +namespace v8 { +namespace internal { + +inline void OSRInterpreterFrameToBaseline(Isolate* isolate, + Handle function, + UnoptimizedFrame* frame) { + IsCompiledScope is_compiled_scope( + function->shared().is_compiled_scope(isolate)); + if (Compiler::CompileBaseline(isolate, function, Compiler::CLEAR_EXCEPTION, + &is_compiled_scope)) { + if (V8_LIKELY(FLAG_use_osr)) { + DCHECK_NOT_NULL(frame); + if (FLAG_trace_osr) { + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintF(scope.file(), + "[OSR - Entry at OSR bytecode offset %d into baseline code]\n", + frame->GetBytecodeOffset()); + } + frame->GetBytecodeArray().set_osr_loop_nesting_level( + AbstractCode::kMaxLoopNestingMarker); + } + } +} + +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_BASELINE_OSR_INL_H_ diff --git a/deps/v8/src/baseline/baseline.cc b/deps/v8/src/baseline/baseline.cc index b5355660f94e1d..c7cc130c5edf5b 100644 --- a/deps/v8/src/baseline/baseline.cc +++ b/deps/v8/src/baseline/baseline.cc @@ -5,14 +5,16 @@ #include "src/baseline/baseline.h" #include "src/handles/maybe-handles.h" +#include "src/objects/shared-function-info.h" // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM + V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 #include "src/baseline/baseline-assembler-inl.h" #include "src/baseline/baseline-compiler.h" +#include "src/debug/debug.h" #include "src/heap/factory-inl.h" #include "src/logging/counters.h" #include "src/objects/script-inl.h" @@ -21,10 +23,36 @@ namespace v8 { namespace internal { +bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) { + DisallowGarbageCollection no_gc; + + if (!FLAG_sparkplug) return false; + + // Check that short builtin calls are enabled if needed. + if (FLAG_sparkplug_needs_short_builtins && + !isolate->is_short_builtin_calls_enabled()) { + return false; + } + + // Check if we actually have bytecode. + if (!shared.HasBytecodeArray()) return false; + + // Do not optimize when debugger needs to hook into every call. + if (isolate->debug()->needs_check_on_function_call()) return false; + + // Functions with breakpoints have to stay interpreted. + if (shared.HasBreakInfo()) return false; + + // Do not baseline compile if sparkplug is disabled or function doesn't pass + // sparkplug_filter. + if (!shared.PassesFilter(FLAG_sparkplug_filter)) return false; + + return true; +} + MaybeHandle GenerateBaselineCode(Isolate* isolate, Handle shared) { - RuntimeCallTimerScope runtimeTimer(isolate, - RuntimeCallCounterId::kCompileBaseline); + RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileBaseline); baseline::BaselineCompiler compiler( isolate, shared, handle(shared->GetBytecodeArray(isolate), isolate)); @@ -48,6 +76,10 @@ void EmitReturnBaseline(MacroAssembler* masm) { namespace v8 { namespace internal { +bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) { + return false; +} + MaybeHandle GenerateBaselineCode(Isolate* isolate, Handle shared) { UNREACHABLE(); diff --git a/deps/v8/src/baseline/baseline.h b/deps/v8/src/baseline/baseline.h index 2dba2d9674b194..10a6e25e4fbb94 100644 --- a/deps/v8/src/baseline/baseline.h +++ b/deps/v8/src/baseline/baseline.h @@ -14,6 +14,8 @@ class Code; class SharedFunctionInfo; class MacroAssembler; +bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared); + MaybeHandle GenerateBaselineCode(Isolate* isolate, Handle shared); diff --git a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h index 2cd34aef710db5..8babb4a5b7bda3 100644 --- a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h +++ b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h @@ -122,13 +122,13 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, void BaselineAssembler::CallBuiltin(Builtins::Name builtin) { __ RecordCommentForOffHeapTrampoline(builtin); __ Call(__ EntryFromBuiltinIndexAsOperand(builtin)); - if (FLAG_code_comments) __ RecordComment("]"); + __ RecordComment("]"); } void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) { __ RecordCommentForOffHeapTrampoline(builtin); __ jmp(__ EntryFromBuiltinIndexAsOperand(builtin)); - if (FLAG_code_comments) __ RecordComment("]"); + __ RecordComment("]"); } void BaselineAssembler::Test(Register value, int mask) { @@ -147,7 +147,7 @@ void BaselineAssembler::CmpObjectType(Register object, } void BaselineAssembler::CmpInstanceType(Register map, InstanceType instance_type) { - if (emit_debug_code()) { + if (FLAG_debug_code) { __ movd(xmm0, eax); __ AssertNotSmi(map); __ CmpObjectType(map, MAP_TYPE, eax); @@ -320,7 +320,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, Register scratch = scratch_scope.AcquireScratch(); DCHECK(!AreAliased(scratch, target, value)); __ mov(FieldOperand(target, offset), value); - __ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs); + __ RecordWriteField(target, offset, value, scratch, SaveFPRegsMode::kIgnore); } void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, int offset, diff --git a/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h index 733c05fe1854e2..4d09f536653afe 100644 --- a/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h +++ b/deps/v8/src/baseline/ia32/baseline-compiler-ia32-inl.h @@ -18,9 +18,9 @@ namespace baseline { void BaselineCompiler::Prologue() { DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); int max_frame_size = bytecode_->frame_size() + max_call_args_; - CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister, - kJSFunctionRegister, kJavaScriptCallArgCountRegister, - max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); + CallBuiltin( + kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, + max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); PrologueFillFrame(); } diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h new file mode 100644 index 00000000000000..e0667d3472b17a --- /dev/null +++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h @@ -0,0 +1,615 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_ +#define V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_ + +#include "src/baseline/baseline-assembler.h" +#include "src/codegen/assembler-inl.h" +#include "src/codegen/interface-descriptors.h" +namespace v8 { +namespace internal { +namespace baseline { + +constexpr Register kTestReg = t0; +class BaselineAssembler::ScratchRegisterScope { + public: + explicit ScratchRegisterScope(BaselineAssembler* assembler) + : assembler_(assembler), + prev_scope_(assembler->scratch_register_scope_), + wrapped_scope_(assembler->masm()) { + if (!assembler_->scratch_register_scope_) { + // If we haven't opened a scratch scope yet, for the first one add a + // couple of extra registers. + wrapped_scope_.Include(t2, t4); + } + assembler_->scratch_register_scope_ = this; + } + ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; } + + Register AcquireScratch() { return wrapped_scope_.Acquire(); } + + private: + BaselineAssembler* assembler_; + ScratchRegisterScope* prev_scope_; + UseScratchRegisterScope wrapped_scope_; +}; + +enum class Condition : uint32_t { + kEqual = eq, + kNotEqual = ne, + + kLessThan = lt, + kGreaterThan = gt, + kLessThanEqual = le, + kGreaterThanEqual = ge, + + kUnsignedLessThan = Uless, + kUnsignedGreaterThan = Ugreater, + kUnsignedLessThanEqual = Uless_equal, + kUnsignedGreaterThanEqual = Ugreater_equal, + + kOverflow = overflow, + kNoOverflow = no_overflow, + + kZero = eq, + kNotZero = ne, +}; + +inline internal::Condition AsMasmCondition(Condition cond) { + return static_cast(cond); +} + +namespace detail { + +#ifdef DEBUG +inline bool Clobbers(Register target, MemOperand op) { + return op.is_reg() && op.rm() == target; +} +#endif + +} // namespace detail + +#define __ masm_-> + +MemOperand BaselineAssembler::RegisterFrameOperand( + interpreter::Register interpreter_register) { + return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize); +} +MemOperand BaselineAssembler::FeedbackVectorOperand() { + return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp); +} + +void BaselineAssembler::Bind(Label* label) { __ bind(label); } + +void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); } + +void BaselineAssembler::JumpTarget() { + // Nop +} + +void BaselineAssembler::Jump(Label* target, Label::Distance distance) { + __ jmp(target); +} +void BaselineAssembler::JumpIf(Condition cc, Label* target, Label::Distance) { + __ Branch(target, AsMasmCondition(cc), kTestReg, Operand((int64_t)0)); +} +void BaselineAssembler::JumpIfRoot(Register value, RootIndex index, + Label* target, Label::Distance) { + __ JumpIfRoot(value, index, target); +} +void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index, + Label* target, Label::Distance) { + __ JumpIfNotRoot(value, index, target); +} +void BaselineAssembler::JumpIfSmi(Register value, Label* target, + Label::Distance) { + __ JumpIfSmi(value, target); +} +void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, + Label::Distance) { + __ JumpIfSmi(value, target); +} + +void BaselineAssembler::CallBuiltin(Builtins::Name builtin) { + if (masm()->options().short_builtin_calls) { + __ CallBuiltin(builtin); + } else { + __ RecordCommentForOffHeapTrampoline(builtin); + Register temp = t6; + __ LoadEntryFromBuiltinIndex(builtin, temp); + __ Call(temp); + __ RecordComment("]"); + } +} + +void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) { + if (masm()->options().short_builtin_calls) { + // Generate pc-relative jump. + __ TailCallBuiltin(builtin); + } else { + __ RecordCommentForOffHeapTrampoline(builtin); + // t6 be used for function call in RISCV64 + // For example 'jalr t6' or 'jal t6' + Register temp = t6; + __ LoadEntryFromBuiltinIndex(builtin, temp); + __ Jump(temp); + __ RecordComment("]"); + } +} + +void BaselineAssembler::Test(Register value, int mask) { + __ And(kTestReg, value, Operand(mask)); +} + +void BaselineAssembler::CmpObjectType(Register object, + InstanceType instance_type, + Register map) { + ScratchRegisterScope temps(this); + Register type = temps.AcquireScratch(); + __ GetObjectType(object, map, type); + __ Sub64(kTestReg, type, Operand(instance_type)); +} +void BaselineAssembler::CmpInstanceType(Register value, + InstanceType instance_type) { + ScratchRegisterScope temps(this); + Register type = temps.AcquireScratch(); + __ Ld(type, FieldMemOperand(value, Map::kInstanceTypeOffset)); + __ Sub64(kTestReg, type, Operand(instance_type)); +} + +void BaselineAssembler::Cmp(Register value, Smi smi) { + ScratchRegisterScope temps(this); + Register temp = temps.AcquireScratch(); + __ li(temp, Operand(smi)); + __ SmiUntag(temp); + __ Sub64(kTestReg, value, temp); +} +void BaselineAssembler::ComparePointer(Register value, MemOperand operand) { + ScratchRegisterScope temps(this); + Register temp = temps.AcquireScratch(); + __ Ld(temp, operand); + __ Sub64(kTestReg, value, temp); +} + +void BaselineAssembler::SmiCompare(Register lhs, Register rhs) { + __ AssertSmi(lhs); + __ AssertSmi(rhs); + if (COMPRESS_POINTERS_BOOL) { + __ Sub32(kTestReg, lhs, rhs); + } else { + __ Sub64(kTestReg, lhs, rhs); + } +} +void BaselineAssembler::CompareTagged(Register value, MemOperand operand) { + ScratchRegisterScope temps(this); + Register tmp = temps.AcquireScratch(); + __ Ld(tmp, operand); + if (COMPRESS_POINTERS_BOOL) { + __ Sub32(kTestReg, value, tmp); + } else { + __ Sub64(kTestReg, value, tmp); + } +} +void BaselineAssembler::CompareTagged(MemOperand operand, Register value) { + ScratchRegisterScope temps(this); + Register tmp = temps.AcquireScratch(); + __ Ld(tmp, operand); + if (COMPRESS_POINTERS_BOOL) { + __ Sub32(kTestReg, tmp, value); + } else { + __ Sub64(kTestReg, tmp, value); + } +} + +void BaselineAssembler::CompareByte(Register value, int32_t byte) { + __ Sub64(kTestReg, value, Operand(byte)); +} + +void BaselineAssembler::Move(interpreter::Register output, Register source) { + Move(RegisterFrameOperand(output), source); +} +void BaselineAssembler::Move(Register output, TaggedIndex value) { + __ li(output, Operand(value.ptr())); +} +void BaselineAssembler::Move(MemOperand output, Register source) { + __ Sd(source, output); +} +void BaselineAssembler::Move(Register output, ExternalReference reference) { + __ li(output, Operand(reference)); +} +void BaselineAssembler::Move(Register output, Handle value) { + __ li(output, Operand(value)); +} +void BaselineAssembler::Move(Register output, int32_t value) { + __ li(output, Operand(value)); +} +void BaselineAssembler::MoveMaybeSmi(Register output, Register source) { + __ Move(output, source); +} +void BaselineAssembler::MoveSmi(Register output, Register source) { + __ Move(output, source); +} + +namespace detail { + +template +inline Register ToRegister(BaselineAssembler* basm, + BaselineAssembler::ScratchRegisterScope* scope, + Arg arg) { + Register reg = scope->AcquireScratch(); + basm->Move(reg, arg); + return reg; +} +inline Register ToRegister(BaselineAssembler* basm, + BaselineAssembler::ScratchRegisterScope* scope, + Register reg) { + return reg; +} + +template +struct CountPushHelper; +template <> +struct CountPushHelper<> { + static int Count() { return 0; } +}; +template +struct CountPushHelper { + static int Count(Arg arg, Args... args) { + return 1 + CountPushHelper::Count(args...); + } +}; +template +struct CountPushHelper { + static int Count(interpreter::RegisterList list, Args... args) { + return list.register_count() + CountPushHelper::Count(args...); + } +}; + +template +struct PushAllHelper; +template +void PushAll(BaselineAssembler* basm, Args... args) { + PushAllHelper::Push(basm, args...); +} +template +void PushAllReverse(BaselineAssembler* basm, Args... args) { + PushAllHelper::PushReverse(basm, args...); +} + +template <> +struct PushAllHelper<> { + static void Push(BaselineAssembler* basm) {} + static void PushReverse(BaselineAssembler* basm) {} +}; + +inline void PushSingle(MacroAssembler* masm, RootIndex source) { + masm->PushRoot(source); +} +inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); } + +inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); } +inline void PushSingle(MacroAssembler* masm, Handle object) { + masm->Push(object); +} +inline void PushSingle(MacroAssembler* masm, int32_t immediate) { + masm->li(kScratchReg, (int64_t)(immediate)); + PushSingle(masm, kScratchReg); +} + +inline void PushSingle(MacroAssembler* masm, TaggedIndex value) { + masm->li(kScratchReg, static_cast(value.ptr())); + PushSingle(masm, kScratchReg); +} +inline void PushSingle(MacroAssembler* masm, MemOperand operand) { + masm->Ld(kScratchReg, operand); + PushSingle(masm, kScratchReg); +} +inline void PushSingle(MacroAssembler* masm, interpreter::Register source) { + return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source)); +} + +template +struct PushAllHelper { + static void Push(BaselineAssembler* basm, Arg arg) { + PushSingle(basm->masm(), arg); + } + static void PushReverse(BaselineAssembler* basm, Arg arg) { + // Push the padding register to round up the amount of values pushed. + return Push(basm, arg); + } +}; +template +struct PushAllHelper { + static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2, + Args... args) { + { + BaselineAssembler::ScratchRegisterScope scope(basm); + basm->masm()->Push(ToRegister(basm, &scope, arg1), + ToRegister(basm, &scope, arg2)); + } + PushAll(basm, args...); + } + static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2, + Args... args) { + PushAllReverse(basm, args...); + { + BaselineAssembler::ScratchRegisterScope scope(basm); + basm->masm()->Push(ToRegister(basm, &scope, arg2), + ToRegister(basm, &scope, arg1)); + } + } +}; +// Currently RegisterLists are always be the last argument, so we don't +// specialize for the case where they're not. We do still specialise for the +// aligned and unaligned cases. +template +struct PushAllHelper { + static void Push(BaselineAssembler* basm, Arg arg, + interpreter::RegisterList list) { + DCHECK_EQ(list.register_count() % 2, 1); + PushAll(basm, arg, list[0], list.PopLeft()); + } + static void PushReverse(BaselineAssembler* basm, Arg arg, + interpreter::RegisterList list) { + if (list.register_count() == 0) { + PushAllReverse(basm, arg); + } else { + PushAllReverse(basm, arg, list[0], list.PopLeft()); + } + } +}; +template <> +struct PushAllHelper { + static void Push(BaselineAssembler* basm, interpreter::RegisterList list) { + DCHECK_EQ(list.register_count() % 2, 0); + for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) { + PushAll(basm, list[reg_index], list[reg_index + 1]); + } + } + static void PushReverse(BaselineAssembler* basm, + interpreter::RegisterList list) { + int reg_index = list.register_count() - 1; + if (reg_index % 2 == 0) { + // Push the padding register to round up the amount of values pushed. + PushAllReverse(basm, list[reg_index]); + reg_index--; + } + for (; reg_index >= 1; reg_index -= 2) { + PushAllReverse(basm, list[reg_index - 1], list[reg_index]); + } + } +}; + +template +struct PopAllHelper; +template <> +struct PopAllHelper<> { + static void Pop(BaselineAssembler* basm) {} +}; +template <> +struct PopAllHelper { + static void Pop(BaselineAssembler* basm, Register reg) { + basm->masm()->Pop(reg); + } +}; +template +struct PopAllHelper { + static void Pop(BaselineAssembler* basm, Register reg1, Register reg2, + T... tail) { + basm->masm()->Pop(reg1, reg2); + PopAllHelper::Pop(basm, tail...); + } +}; + +} // namespace detail + +template +int BaselineAssembler::Push(T... vals) { + // We have to count the pushes first, to decide whether to add padding before + // the first push. + int push_count = detail::CountPushHelper::Count(vals...); + if (push_count % 2 == 0) { + detail::PushAll(this, vals...); + } else { + detail::PushAll(this, vals...); + } + return push_count; +} + +template +void BaselineAssembler::PushReverse(T... vals) { + detail::PushAllReverse(this, vals...); +} + +template +void BaselineAssembler::Pop(T... registers) { + detail::PopAllHelper::Pop(this, registers...); +} + +void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, + int offset) { + // FIXME(riscv64): riscv64 don't implement pointer compressed + // __ LoadTaggedPointerField(output, FieldMemOperand(source, offset)); + __ Ld(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, + int offset) { + // FIXME(riscv64): riscv64 don't implement pointer compressed + __ Ld(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, + int offset) { + // FIXME(riscv64): riscv64 don't implement pointer compressed + __ Ld(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::LoadByteField(Register output, Register source, + int offset) { + __ Ld(output, FieldMemOperand(source, offset)); +} +void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, + Smi value) { + ScratchRegisterScope temps(this); + Register tmp = temps.AcquireScratch(); + __ li(tmp, Operand(value)); + // FIXME(riscv64): riscv64 don't implement pointer compressed + __ Sd(tmp, FieldMemOperand(target, offset)); +} +void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, + int offset, + Register value) { + // FIXME(riscv64): riscv64 don't implement pointer compressed + __ Sd(value, FieldMemOperand(target, offset)); + ScratchRegisterScope temps(this); + Register tmp = temps.AcquireScratch(); + __ RecordWriteField(target, offset, value, tmp, kRAHasNotBeenSaved, + SaveFPRegsMode::kIgnore); +} +void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, + int offset, + Register value) { + // FIXME(riscv64): riscv64 don't implement pointer compressed + __ Sd(value, FieldMemOperand(target, offset)); +} + +void BaselineAssembler::AddToInterruptBudget(int32_t weight) { + ScratchRegisterScope scratch_scope(this); + Register feedback_cell = scratch_scope.AcquireScratch(); + LoadFunction(feedback_cell); + LoadTaggedPointerField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); + + Register interrupt_budget = scratch_scope.AcquireScratch(); + __ Ld(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + // Remember to set flags as part of the add! + __ Add64(interrupt_budget, interrupt_budget, weight); + __ Sd(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); +} + +void BaselineAssembler::AddToInterruptBudget(Register weight) { + ScratchRegisterScope scratch_scope(this); + Register feedback_cell = scratch_scope.AcquireScratch(); + LoadFunction(feedback_cell); + LoadTaggedPointerField(feedback_cell, feedback_cell, + JSFunction::kFeedbackCellOffset); + + Register interrupt_budget = scratch_scope.AcquireScratch(); + __ Ld(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); + // Remember to set flags as part of the add! + __ Add64(interrupt_budget, interrupt_budget, weight); + __ Sd(interrupt_budget, + FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); +} + +void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { + if (SmiValuesAre31Bits()) { + __ Add32(lhs, lhs, Operand(rhs)); + } else { + __ Add64(lhs, lhs, Operand(rhs)); + } +} + +void BaselineAssembler::Switch(Register reg, int case_value_base, + Label** labels, int num_labels) { + Label fallthrough; + if (case_value_base > 0) { + __ Sub64(reg, reg, Operand(case_value_base)); + } + + // Mostly copied from code-generator-riscv64.cc + ScratchRegisterScope scope(this); + Register temp = scope.AcquireScratch(); + Label table; + __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual), + reg, Operand(int64_t(num_labels))); + int64_t imm64; + imm64 = __ branch_long_offset(&table); + DCHECK(is_int32(imm64)); + int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12); + int32_t Lo12 = (int32_t)imm64 << 20 >> 20; + __ auipc(temp, Hi20); // Read PC + Hi20 into t6 + __ lui(temp, Lo12); // jump PC + Hi20 + Lo12 + + int entry_size_log2 = 2; + Register temp2 = scope.AcquireScratch(); + __ CalcScaledAddress(temp2, temp, reg, entry_size_log2); + __ Jump(temp); + { + TurboAssembler::BlockTrampolinePoolScope(masm()); + __ BlockTrampolinePoolFor(num_labels * kInstrSize); + __ bind(&table); + for (int i = 0; i < num_labels; ++i) { + __ Branch(labels[i]); + } + DCHECK_EQ(num_labels * kInstrSize, __ InstructionsGeneratedSince(&table)); + __ bind(&fallthrough); + } +} + +#undef __ + +#define __ basm. + +void BaselineAssembler::EmitReturn(MacroAssembler* masm) { + BaselineAssembler basm(masm); + + Register weight = BaselineLeaveFrameDescriptor::WeightRegister(); + Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister(); + + __ RecordComment("[ Update Interrupt Budget"); + __ AddToInterruptBudget(weight); + + // Use compare flags set by add + Label skip_interrupt_label; + __ JumpIf(Condition::kGreaterThanEqual, &skip_interrupt_label); + { + __ masm()->SmiTag(params_size); + __ masm()->Push(params_size, kInterpreterAccumulatorRegister); + + __ LoadContext(kContextRegister); + __ LoadFunction(kJSFunctionRegister); + __ masm()->Push(kJSFunctionRegister); + __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1); + + __ masm()->Pop(kInterpreterAccumulatorRegister, params_size); + __ masm()->SmiUntag(params_size); + } + __ RecordComment("]"); + + __ Bind(&skip_interrupt_label); + + BaselineAssembler::ScratchRegisterScope temps(&basm); + Register actual_params_size = temps.AcquireScratch(); + // Compute the size of the actual parameters + receiver (in bytes). + __ Move(actual_params_size, + MemOperand(fp, StandardFrameConstants::kArgCOffset)); + + // If actual is bigger than formal, then we should use it to free up the stack + // arguments. + Label corrected_args_count; + __ masm()->Branch(&corrected_args_count, ge, params_size, + Operand(actual_params_size)); + __ masm()->Move(params_size, actual_params_size); + __ Bind(&corrected_args_count); + + // Leave the frame (also dropping the register file). + __ masm()->LeaveFrame(StackFrame::BASELINE); + + // Drop receiver + arguments. + __ masm()->Add64(params_size, params_size, 1); // Include the receiver. + __ masm()->slli(params_size, params_size, kPointerSizeLog2); + __ masm()->Add64(sp, sp, params_size); + __ masm()->Ret(); +} + +#undef __ + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_ diff --git a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h new file mode 100644 index 00000000000000..98ca62e30341b8 --- /dev/null +++ b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h @@ -0,0 +1,112 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_ +#define V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_ + +#include "src/baseline/baseline-compiler.h" + +namespace v8 { +namespace internal { +namespace baseline { + +#define __ basm_. + +void BaselineCompiler::Prologue() { + // Enter the frame here, since CallBuiltin will override lr. + __ masm()->EnterFrame(StackFrame::BASELINE); + DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); + int max_frame_size = bytecode_->frame_size() + max_call_args_; + CallBuiltin( + kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, + max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); + PrologueFillFrame(); +} + +void BaselineCompiler::PrologueFillFrame() { + __ RecordComment("[ Fill frame"); + // Inlined register frame fill + interpreter::Register new_target_or_generator_register = + bytecode_->incoming_new_target_or_generator_register(); + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + int register_count = bytecode_->register_count(); + // Magic value + const int kLoopUnrollSize = 8; + const int new_target_index = new_target_or_generator_register.index(); + const bool has_new_target = new_target_index != kMaxInt; + // BaselineOutOfLinePrologue already pushed one undefined. + register_count -= 1; + if (has_new_target) { + if (new_target_index == 0) { + // Oops, need to fix up that undefined that BaselineOutOfLinePrologue + // pushed. + __ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp)); + } else { + DCHECK_LE(new_target_index, register_count); + int index = 1; + for (; index + 2 <= new_target_index; index += 2) { + __ masm()->Push(kInterpreterAccumulatorRegister, + kInterpreterAccumulatorRegister); + } + if (index == new_target_index) { + __ masm()->Push(kJavaScriptCallNewTargetRegister, + kInterpreterAccumulatorRegister); + } else { + DCHECK_EQ(index, new_target_index - 1); + __ masm()->Push(kInterpreterAccumulatorRegister, + kJavaScriptCallNewTargetRegister); + } + // We pushed "index" registers, minus the one the prologue pushed, plus + // the two registers that included new_target. + register_count -= (index - 1 + 2); + } + } + if (register_count < 2 * kLoopUnrollSize) { + // If the frame is small enough, just unroll the frame fill completely. + for (int i = 0; i < register_count; i += 2) { + __ masm()->Push(kInterpreterAccumulatorRegister, + kInterpreterAccumulatorRegister); + } + } else { + BaselineAssembler::ScratchRegisterScope temps(&basm_); + Register scratch = temps.AcquireScratch(); + + // Extract the first few registers to round to the unroll size. + int first_registers = register_count % kLoopUnrollSize; + for (int i = 0; i < first_registers; i += 2) { + __ masm()->Push(kInterpreterAccumulatorRegister, + kInterpreterAccumulatorRegister); + } + __ Move(scratch, register_count / kLoopUnrollSize); + // We enter the loop unconditionally, so make sure we need to loop at least + // once. + DCHECK_GT(register_count / kLoopUnrollSize, 0); + Label loop; + __ Bind(&loop); + for (int i = 0; i < kLoopUnrollSize; i += 2) { + __ masm()->Push(kInterpreterAccumulatorRegister, + kInterpreterAccumulatorRegister); + } + __ masm()->Sub64(scratch, scratch, 1); + __ JumpIf(Condition::kGreaterThan, &loop); + } + __ RecordComment("]"); +} + +void BaselineCompiler::VerifyFrameSize() { + __ masm()->Add64(kScratchReg, sp, + RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp + + bytecode_->frame_size(), + 2 * kSystemPointerSize)); + __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg, + Operand(fp)); +} + +#undef __ + +} // namespace baseline +} // namespace internal +} // namespace v8 + +#endif // V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_ diff --git a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h index 202f83c7615c76..98ed29a9cae404 100644 --- a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h +++ b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h @@ -7,7 +7,6 @@ #include "src/base/macros.h" #include "src/baseline/baseline-assembler.h" -#include "src/codegen/interface-descriptors.h" #include "src/codegen/x64/register-x64.h" namespace v8 { @@ -129,7 +128,7 @@ void BaselineAssembler::CallBuiltin(Builtins::Name builtin) { } else { __ RecordCommentForOffHeapTrampoline(builtin); __ Call(__ EntryFromBuiltinIndexAsOperand(builtin)); - if (FLAG_code_comments) __ RecordComment("]"); + __ RecordComment("]"); } } @@ -140,7 +139,7 @@ void BaselineAssembler::TailCallBuiltin(Builtins::Name builtin) { } else { __ RecordCommentForOffHeapTrampoline(builtin); __ Jump(__ EntryFromBuiltinIndexAsOperand(builtin)); - if (FLAG_code_comments) __ RecordComment("]"); + __ RecordComment("]"); } } @@ -160,7 +159,7 @@ void BaselineAssembler::CmpObjectType(Register object, } void BaselineAssembler::CmpInstanceType(Register map, InstanceType instance_type) { - if (emit_debug_code()) { + if (FLAG_debug_code) { __ AssertNotSmi(map); __ CmpObjectType(map, MAP_TYPE, kScratchRegister); __ Assert(equal, AbortReason::kUnexpectedValue); @@ -201,7 +200,7 @@ void BaselineAssembler::Move(Register output, Handle value) { __ Move(output, value); } void BaselineAssembler::Move(Register output, int32_t value) { - __ Move(output, Immediate(value)); + __ Move(output, value); } void BaselineAssembler::MoveMaybeSmi(Register output, Register source) { __ mov_tagged(output, source); @@ -326,7 +325,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, DCHECK_NE(target, scratch); DCHECK_NE(value, scratch); __ StoreTaggedField(FieldOperand(target, offset), value); - __ RecordWriteField(target, offset, value, scratch, kDontSaveFPRegs); + __ RecordWriteField(target, offset, value, scratch, SaveFPRegsMode::kIgnore); } void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, int offset, diff --git a/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h index 73b43770e567bf..a4d547b06712bf 100644 --- a/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h +++ b/deps/v8/src/baseline/x64/baseline-compiler-x64-inl.h @@ -18,9 +18,9 @@ namespace baseline { void BaselineCompiler::Prologue() { DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister); int max_frame_size = bytecode_->frame_size() + max_call_args_; - CallBuiltin(Builtins::kBaselineOutOfLinePrologue, kContextRegister, - kJSFunctionRegister, kJavaScriptCallArgCountRegister, - max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); + CallBuiltin( + kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, + max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_); PrologueFillFrame(); } diff --git a/deps/v8/src/bigint/bigint-internal.cc b/deps/v8/src/bigint/bigint-internal.cc new file mode 100644 index 00000000000000..6630c6c4c9d5fd --- /dev/null +++ b/deps/v8/src/bigint/bigint-internal.cc @@ -0,0 +1,43 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/bigint/bigint-internal.h" + +namespace v8 { +namespace bigint { + +ProcessorImpl::ProcessorImpl(Platform* platform) : platform_(platform) {} + +ProcessorImpl::~ProcessorImpl() { delete platform_; } + +Status ProcessorImpl::get_and_clear_status() { + Status result = status_; + status_ = Status::kOk; + return result; +} + +Processor* Processor::New(Platform* platform) { + ProcessorImpl* impl = new ProcessorImpl(platform); + return static_cast(impl); +} + +void Processor::Destroy() { delete static_cast(this); } + +void ProcessorImpl::Multiply(RWDigits Z, Digits X, Digits Y) { + X.Normalize(); + Y.Normalize(); + if (X.len() == 0 || Y.len() == 0) return Z.Clear(); + if (X.len() < Y.len()) std::swap(X, Y); + if (Y.len() == 1) return MultiplySingle(Z, X, Y[0]); + return MultiplySchoolbook(Z, X, Y); +} + +Status Processor::Multiply(RWDigits Z, Digits X, Digits Y) { + ProcessorImpl* impl = static_cast(this); + impl->Multiply(Z, X, Y); + return impl->get_and_clear_status(); +} + +} // namespace bigint +} // namespace v8 diff --git a/deps/v8/src/bigint/bigint-internal.h b/deps/v8/src/bigint/bigint-internal.h new file mode 100644 index 00000000000000..efe63a06a51f48 --- /dev/null +++ b/deps/v8/src/bigint/bigint-internal.h @@ -0,0 +1,65 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BIGINT_BIGINT_INTERNAL_H_ +#define V8_BIGINT_BIGINT_INTERNAL_H_ + +#include "src/bigint/bigint.h" + +namespace v8 { +namespace bigint { + +class ProcessorImpl : public Processor { + public: + explicit ProcessorImpl(Platform* platform); + ~ProcessorImpl(); + + Status get_and_clear_status(); + + void Multiply(RWDigits Z, Digits X, Digits Y); + void MultiplySingle(RWDigits Z, Digits X, digit_t y); + void MultiplySchoolbook(RWDigits Z, Digits X, Digits Y); + + private: + // Each unit is supposed to represent approximately one CPU {mul} instruction. + // Doesn't need to be accurate; we just want to make sure to check for + // interrupt requests every now and then (roughly every 10-100 ms; often + // enough not to appear stuck, rarely enough not to cause noticeable + // overhead). + static const uintptr_t kWorkEstimateThreshold = 5000000; + + void AddWorkEstimate(uintptr_t estimate) { + work_estimate_ += estimate; + if (work_estimate_ >= kWorkEstimateThreshold) { + work_estimate_ = 0; + if (platform_->InterruptRequested()) { + status_ = Status::kInterrupted; + } + } + } + + bool should_terminate() { return status_ == Status::kInterrupted; } + + uintptr_t work_estimate_{0}; + Status status_{Status::kOk}; + Platform* platform_; +}; + +#define CHECK(cond) \ + if (!(cond)) { \ + std::cerr << __FILE__ << ":" << __LINE__ << ": "; \ + std::cerr << "Assertion failed: " #cond "\n"; \ + abort(); \ + } + +#ifdef DEBUG +#define DCHECK(cond) CHECK(cond) +#else +#define DCHECK(cond) (void(0)) +#endif + +} // namespace bigint +} // namespace v8 + +#endif // V8_BIGINT_BIGINT_INTERNAL_H_ diff --git a/deps/v8/src/bigint/bigint.h b/deps/v8/src/bigint/bigint.h index a87622b167a700..a365359c530731 100644 --- a/deps/v8/src/bigint/bigint.h +++ b/deps/v8/src/bigint/bigint.h @@ -120,9 +120,117 @@ class Digits { } }; +// Writable version of a Digits array. +// Does not own the memory it points at. +class RWDigits : public Digits { + public: + RWDigits(digit_t* mem, int len) : Digits(mem, len) {} + RWDigits(RWDigits src, int offset, int len) : Digits(src, offset, len) {} + RWDigits operator+(int i) { + BIGINT_H_DCHECK(i >= 0 && i <= len_); + return RWDigits(digits_ + i, len_ - i); + } + +#if UINTPTR_MAX == 0xFFFFFFFF + digit_t& operator[](int i) { + BIGINT_H_DCHECK(i >= 0 && i < len_); + return digits_[i]; + } +#else + // 64-bit platform. We only require digits arrays to be 4-byte aligned, + // so we use a wrapper class to allow regular array syntax while + // performing unaligned memory accesses under the hood. + class WritableDigitReference { + public: + // Support "X[i] = x" notation. + void operator=(digit_t digit) { memcpy(ptr_, &digit, sizeof(digit)); } + // Support "X[i] = Y[j]" notation. + WritableDigitReference& operator=(const WritableDigitReference& src) { + memcpy(ptr_, src.ptr_, sizeof(digit_t)); + return *this; + } + // Support "x = X[i]" notation. + operator digit_t() { + digit_t result; + memcpy(&result, ptr_, sizeof(result)); + return result; + } + + private: + // This class is not for public consumption. + friend class RWDigits; + // Primary constructor. + explicit WritableDigitReference(digit_t* ptr) + : ptr_(reinterpret_cast(ptr)) {} + // Required for returning WDR instances from "operator[]" below. + WritableDigitReference(const WritableDigitReference& src) = default; + + uint32_t* ptr_; + }; + + WritableDigitReference operator[](int i) { + BIGINT_H_DCHECK(i >= 0 && i < len_); + return WritableDigitReference(digits_ + i); + } +#endif + + digit_t* digits() { return digits_; } + void set_len(int len) { len_ = len; } + + void Clear() { memset(digits_, 0, len_ * sizeof(digit_t)); } +}; + +class Platform { + public: + virtual ~Platform() = default; + + // If you want the ability to interrupt long-running operations, implement + // a Platform subclass that overrides this method. It will be queried + // every now and then by long-running operations. + virtual bool InterruptRequested() { return false; } +}; + +// These are the operations that this library supports. +// The signatures follow the convention: +// +// void Operation(RWDigits results, Digits inputs); +// +// You must preallocate the result; use the respective {OperationResultLength} +// function to determine its minimum required length. The actual result may +// be smaller, so you should call result.Normalize() on the result. +// +// The operations are divided into two groups: "fast" (O(n) with small +// coefficient) operations are exposed directly as free functions, "slow" +// operations are methods on a {BigIntProcessor} object, which provides +// support for interrupting execution via the {Platform}'s {InterruptRequested} +// mechanism when it takes too long. These functions return a {Status} value. + // Returns r such that r < 0 if A < B; r > 0 if A > B; r == 0 if A == B. int Compare(Digits A, Digits B); +enum class Status { kOk, kInterrupted }; + +class Processor { + public: + // Takes ownership of {platform}. + static Processor* New(Platform* platform); + + // Use this for any std::unique_ptr holding an instance of BigIntProcessor. + class Destroyer { + public: + void operator()(Processor* proc) { proc->Destroy(); } + }; + // When not using std::unique_ptr, call this to delete the instance. + void Destroy(); + + // Z := X * Y + Status Multiply(RWDigits Z, Digits X, Digits Y); +}; + +inline int MultiplyResultLength(Digits X, Digits Y) { + return X.len() + Y.len(); +} + } // namespace bigint } // namespace v8 diff --git a/deps/v8/src/bigint/digit-arithmetic.h b/deps/v8/src/bigint/digit-arithmetic.h new file mode 100644 index 00000000000000..1c5c93c0353dfa --- /dev/null +++ b/deps/v8/src/bigint/digit-arithmetic.h @@ -0,0 +1,87 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Helper functions that operate on individual digits. + +#ifndef V8_BIGINT_DIGIT_ARITHMETIC_H_ +#define V8_BIGINT_DIGIT_ARITHMETIC_H_ + +#include "src/bigint/bigint.h" + +namespace v8 { +namespace bigint { + +static constexpr int kHalfDigitBits = kDigitBits / 2; +static constexpr digit_t kHalfDigitBase = digit_t{1} << kHalfDigitBits; +static constexpr digit_t kHalfDigitMask = kHalfDigitBase - 1; + +// {carry} will be set to 0 or 1. +inline digit_t digit_add2(digit_t a, digit_t b, digit_t* carry) { +#if HAVE_TWODIGIT_T + twodigit_t result = twodigit_t{a} + b; + *carry = result >> kDigitBits; + return static_cast(result); +#else + digit_t result = a + b; + *carry = (result < a) ? 1 : 0; + return result; +#endif +} + +// This compiles to slightly better machine code than repeated invocations +// of {digit_add2}. +inline digit_t digit_add3(digit_t a, digit_t b, digit_t c, digit_t* carry) { +#if HAVE_TWODIGIT_T + twodigit_t result = twodigit_t{a} + b + c; + *carry = result >> kDigitBits; + return static_cast(result); +#else + digit_t result = a + b; + *carry = (result < a) ? 1 : 0; + result += c; + if (result < c) *carry += 1; + return result; +#endif +} + +// Returns the low half of the result. High half is in {high}. +inline digit_t digit_mul(digit_t a, digit_t b, digit_t* high) { +#if HAVE_TWODIGIT_T + twodigit_t result = twodigit_t{a} * b; + *high = result >> kDigitBits; + return static_cast(result); +#else + // Multiply in half-pointer-sized chunks. + // For inputs [AH AL]*[BH BL], the result is: + // + // [AL*BL] // r_low + // + [AL*BH] // r_mid1 + // + [AH*BL] // r_mid2 + // + [AH*BH] // r_high + // = [R4 R3 R2 R1] // high = [R4 R3], low = [R2 R1] + // + // Where of course we must be careful with carries between the columns. + digit_t a_low = a & kHalfDigitMask; + digit_t a_high = a >> kHalfDigitBits; + digit_t b_low = b & kHalfDigitMask; + digit_t b_high = b >> kHalfDigitBits; + + digit_t r_low = a_low * b_low; + digit_t r_mid1 = a_low * b_high; + digit_t r_mid2 = a_high * b_low; + digit_t r_high = a_high * b_high; + + digit_t carry = 0; + digit_t low = digit_add3(r_low, r_mid1 << kHalfDigitBits, + r_mid2 << kHalfDigitBits, &carry); + *high = + (r_mid1 >> kHalfDigitBits) + (r_mid2 >> kHalfDigitBits) + r_high + carry; + return low; +#endif +} + +} // namespace bigint +} // namespace v8 + +#endif // V8_BIGINT_DIGIT_ARITHMETIC_H_ diff --git a/deps/v8/src/bigint/mul-schoolbook.cc b/deps/v8/src/bigint/mul-schoolbook.cc new file mode 100644 index 00000000000000..8e10685018c696 --- /dev/null +++ b/deps/v8/src/bigint/mul-schoolbook.cc @@ -0,0 +1,99 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/bigint/bigint-internal.h" +#include "src/bigint/digit-arithmetic.h" +#include "src/bigint/vector-arithmetic.h" + +namespace v8 { +namespace bigint { + +// Z := X * y, where y is a single digit. +void ProcessorImpl::MultiplySingle(RWDigits Z, Digits X, digit_t y) { + DCHECK(y != 0); // NOLINT(readability/check) + digit_t carry = 0; + digit_t high = 0; + for (int i = 0; i < X.len(); i++) { + digit_t new_high; + digit_t low = digit_mul(X[i], y, &new_high); + Z[i] = digit_add3(low, high, carry, &carry); + high = new_high; + } + AddWorkEstimate(X.len()); + Z[X.len()] = carry + high; + for (int i = X.len() + 1; i < Z.len(); i++) Z[i] = 0; +} + +#define BODY(min, max) \ + for (int j = min; j <= max; j++) { \ + digit_t high; \ + digit_t low = digit_mul(X[j], Y[i - j], &high); \ + digit_t carrybit; \ + zi = digit_add2(zi, low, &carrybit); \ + carry += carrybit; \ + next = digit_add2(next, high, &carrybit); \ + next_carry += carrybit; \ + } \ + Z[i] = zi + +// Z := X * Y. +// O(n²) "schoolbook" multiplication algorithm. Optimized to minimize +// bounds and overflow checks: rather than looping over X for every digit +// of Y (or vice versa), we loop over Z. The {BODY} macro above is what +// computes one of Z's digits as a sum of the products of relevant digits +// of X and Y. This yields a nearly 2x improvement compared to more obvious +// implementations. +// This method is *highly* performance sensitive even for the advanced +// algorithms, which use this as the base case of their recursive calls. +void ProcessorImpl::MultiplySchoolbook(RWDigits Z, Digits X, Digits Y) { + DCHECK(IsDigitNormalized(X)); + DCHECK(IsDigitNormalized(Y)); + DCHECK(X.len() >= Y.len()); + DCHECK(Z.len() >= X.len() + Y.len()); + if (X.len() == 0 || Y.len() == 0) return Z.Clear(); + digit_t next, next_carry = 0, carry = 0; + // Unrolled first iteration: it's trivial. + Z[0] = digit_mul(X[0], Y[0], &next); + int i = 1; + // Unrolled second iteration: a little less setup. + if (i < Y.len()) { + digit_t zi = next; + next = 0; + BODY(0, 1); + i++; + } + // Main part: since X.len() >= Y.len() > i, no bounds checks are needed. + for (; i < Y.len(); i++) { + digit_t zi = digit_add2(next, carry, &carry); + next = next_carry + carry; + carry = 0; + next_carry = 0; + BODY(0, i); + AddWorkEstimate(i); + if (should_terminate()) return; + } + // Last part: i exceeds Y now, we have to be careful about bounds. + int loop_end = X.len() + Y.len() - 2; + for (; i <= loop_end; i++) { + int max_x_index = std::min(i, X.len() - 1); + int max_y_index = Y.len() - 1; + int min_x_index = i - max_y_index; + digit_t zi = digit_add2(next, carry, &carry); + next = next_carry + carry; + carry = 0; + next_carry = 0; + BODY(min_x_index, max_x_index); + AddWorkEstimate(max_x_index - min_x_index); + if (should_terminate()) return; + } + // Write the last digit, and zero out any extra space in Z. + Z[i++] = digit_add2(next, carry, &carry); + DCHECK(carry == 0); // NOLINT(readability/check) + for (; i < Z.len(); i++) Z[i] = 0; +} + +#undef BODY + +} // namespace bigint +} // namespace v8 diff --git a/deps/v8/src/bigint/vector-arithmetic.cc b/deps/v8/src/bigint/vector-arithmetic.cc index 9a28b168ba52b9..734b4439110872 100644 --- a/deps/v8/src/bigint/vector-arithmetic.cc +++ b/deps/v8/src/bigint/vector-arithmetic.cc @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/bigint/bigint.h" +#include "src/bigint/vector-arithmetic.h" namespace v8 { namespace bigint { diff --git a/deps/v8/src/bigint/vector-arithmetic.h b/deps/v8/src/bigint/vector-arithmetic.h new file mode 100644 index 00000000000000..617cb20b552a62 --- /dev/null +++ b/deps/v8/src/bigint/vector-arithmetic.h @@ -0,0 +1,20 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Helper functions that operate on {Digits} vectors of digits. + +#ifndef V8_BIGINT_VECTOR_ARITHMETIC_H_ +#define V8_BIGINT_VECTOR_ARITHMETIC_H_ + +#include "src/bigint/bigint.h" + +namespace v8 { +namespace bigint { + +inline bool IsDigitNormalized(Digits X) { return X.len() == 0 || X.msd() != 0; } + +} // namespace bigint +} // namespace v8 + +#endif // V8_BIGINT_VECTOR_ARITHMETIC_H_ diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc index c255184caeb0d7..0285b33e1f63ce 100644 --- a/deps/v8/src/builtins/accessors.cc +++ b/deps/v8/src/builtins/accessors.cc @@ -113,8 +113,7 @@ void Accessors::ReconfigureToDataProperty( v8::Local key, v8::Local val, const v8::PropertyCallbackInfo& info) { i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope stats_scope( - isolate, RuntimeCallCounterId::kReconfigureToDataProperty); + RCS_SCOPE(isolate, RuntimeCallCounterId::kReconfigureToDataProperty); HandleScope scope(isolate); Handle receiver = Utils::OpenHandle(*info.This()); Handle holder = @@ -155,8 +154,7 @@ Handle Accessors::MakeArgumentsIteratorInfo(Isolate* isolate) { void Accessors::ArrayLengthGetter( v8::Local name, const v8::PropertyCallbackInfo& info) { i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kArrayLengthGetter); + RCS_SCOPE(isolate, RuntimeCallCounterId::kArrayLengthGetter); DisallowGarbageCollection no_gc; HandleScope scope(isolate); JSArray holder = JSArray::cast(*Utils::OpenHandle(*info.Holder())); @@ -168,8 +166,7 @@ void Accessors::ArrayLengthSetter( v8::Local name, v8::Local val, const v8::PropertyCallbackInfo& info) { i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kArrayLengthSetter); + RCS_SCOPE(isolate, RuntimeCallCounterId::kArrayLengthSetter); HandleScope scope(isolate); DCHECK(Utils::OpenHandle(*name)->SameValue( @@ -206,7 +203,12 @@ void Accessors::ArrayLengthSetter( return; } - JSArray::SetLength(array, length); + if (JSArray::SetLength(array, length).IsNothing()) { + // TODO(victorgomes): AccessorNameBooleanSetterCallback does not handle + // exceptions. + FATAL("Fatal JavaScript invalid array length %u", length); + UNREACHABLE(); + } uint32_t actual_new_len = 0; CHECK(array->length().ToArrayLength(&actual_new_len)); @@ -282,8 +284,7 @@ Handle Accessors::MakeModuleNamespaceEntryInfo( void Accessors::StringLengthGetter( v8::Local name, const v8::PropertyCallbackInfo& info) { i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kStringLengthGetter); + RCS_SCOPE(isolate, RuntimeCallCounterId::kStringLengthGetter); DisallowGarbageCollection no_gc; HandleScope scope(isolate); @@ -330,8 +331,7 @@ static Handle GetFunctionPrototype(Isolate* isolate, void Accessors::FunctionPrototypeGetter( v8::Local name, const v8::PropertyCallbackInfo& info) { i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kFunctionPrototypeGetter); + RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionPrototypeGetter); HandleScope scope(isolate); Handle function = Handle::cast(Utils::OpenHandle(*info.Holder())); @@ -344,8 +344,7 @@ void Accessors::FunctionPrototypeSetter( v8::Local name, v8::Local val, const v8::PropertyCallbackInfo& info) { i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kFunctionPrototypeSetter); + RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionPrototypeSetter); HandleScope scope(isolate); Handle value = Utils::OpenHandle(*val); Handle object = @@ -367,8 +366,7 @@ Handle Accessors::MakeFunctionPrototypeInfo(Isolate* isolate) { void Accessors::FunctionLengthGetter( v8::Local name, const v8::PropertyCallbackInfo& info) { i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kFunctionLengthGetter); + RCS_SCOPE(isolate, RuntimeCallCounterId::kFunctionLengthGetter); HandleScope scope(isolate); Handle function = Handle::cast(Utils::OpenHandle(*info.Holder())); @@ -722,8 +720,7 @@ Handle Accessors::MakeFunctionCallerInfo(Isolate* isolate) { void Accessors::BoundFunctionLengthGetter( v8::Local name, const v8::PropertyCallbackInfo& info) { i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kBoundFunctionLengthGetter); + RCS_SCOPE(isolate, RuntimeCallCounterId::kBoundFunctionLengthGetter); HandleScope scope(isolate); Handle function = Handle::cast(Utils::OpenHandle(*info.Holder())); @@ -749,8 +746,7 @@ Handle Accessors::MakeBoundFunctionLengthInfo(Isolate* isolate) { void Accessors::BoundFunctionNameGetter( v8::Local name, const v8::PropertyCallbackInfo& info) { i::Isolate* isolate = reinterpret_cast(info.GetIsolate()); - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kBoundFunctionNameGetter); + RCS_SCOPE(isolate, RuntimeCallCounterId::kBoundFunctionNameGetter); HandleScope scope(isolate); Handle function = Handle::cast(Utils::OpenHandle(*info.Holder())); diff --git a/deps/v8/src/builtins/aggregate-error.tq b/deps/v8/src/builtins/aggregate-error.tq index 9c70ffcb0061a9..c811403274ddbb 100644 --- a/deps/v8/src/builtins/aggregate-error.tq +++ b/deps/v8/src/builtins/aggregate-error.tq @@ -19,8 +19,9 @@ transitioning javascript builtin AggregateErrorConstructor( // [[Writable]]: *true*, [[Enumerable]]: *false*, [[Configurable]]: *true* // c. Perform ! DefinePropertyOrThrow(_O_, *"message"*, _msgDesc_). const message: JSAny = arguments[1]; - const obj: JSObject = - ConstructAggregateErrorHelper(context, target, newTarget, message); + const options: JSAny = arguments[2]; + const obj: JSObject = ConstructAggregateErrorHelper( + context, target, newTarget, message, options); // 4. Let errorsList be ? IterableToList(errors). const errors: JSAny = arguments[0]; @@ -38,7 +39,7 @@ transitioning javascript builtin AggregateErrorConstructor( } extern transitioning runtime ConstructAggregateErrorHelper( - Context, JSFunction, JSAny, Object): JSObject; + Context, JSFunction, JSAny, Object, Object): JSObject; extern transitioning runtime ConstructInternalAggregateErrorHelper( Context, Object): JSObject; diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index 817d30fe26a467..83252446af8026 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -6,6 +6,7 @@ #include "src/api/api-arguments.h" #include "src/codegen/code-factory.h" +#include "src/codegen/interface-descriptors-inl.h" // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. #include "src/codegen/macro-assembler-inl.h" #include "src/codegen/register-configuration.h" @@ -118,7 +119,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // r0: number of arguments (untagged) // r1: constructor function // r3: new target - __ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(r1, r3, r0, InvokeType::kCall); // Restore context from the frame. __ ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); @@ -236,7 +237,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Push(r6); // Call the function. - __ InvokeFunctionWithNewTarget(r1, r3, r0, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(r1, r3, r0, InvokeType::kCall); // ----------- S t a t e ------------- // -- r0: constructor result @@ -337,7 +338,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Store input value into generator object. __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset)); __ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0, - kLRHasNotBeenSaved, kDontSaveFPRegs); + kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore); // Load suspended function and context. __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset)); @@ -388,16 +389,13 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset)); { Label done_loop, loop; - __ mov(r6, r3); - __ bind(&loop); - __ sub(r6, r6, Operand(1), SetCC); + __ sub(r3, r3, Operand(1), SetCC); __ b(lt, &done_loop); - __ add(scratch, r2, Operand(r6, LSL, kTaggedSizeLog2)); + __ add(scratch, r2, Operand(r3, LSL, kTaggedSizeLog2)); __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize)); __ Push(scratch); __ b(&loop); - __ bind(&done_loop); // Push receiver. @@ -799,8 +797,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, // Store code entry in the closure. __ str(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code, - kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore, + RememberedSetAction::kOmit, SmiCheck::kOmit); } static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, @@ -1051,7 +1049,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { __ ldr(feedback_vector, FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); - if (__ emit_debug_code()) { + if (FLAG_debug_code) { UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); __ CompareObjectType(feedback_vector, scratch, scratch, @@ -1124,7 +1122,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // Baseline code frames store the feedback vector where interpreter would // store the bytecode offset. - if (__ emit_debug_code()) { + if (FLAG_debug_code) { UseScratchRegisterScope temps(masm); Register scratch = temps.Acquire(); __ CompareObjectType(feedback_vector, scratch, scratch, @@ -1646,7 +1644,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ Jump(kJavaScriptCallCodeStartRegister); } -void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ ldr(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); @@ -1691,7 +1689,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { __ Abort(AbortReason::kInvalidBytecodeAdvance); } -void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { Generate_InterpreterEnterBytecode(masm); } @@ -1841,6 +1839,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { } void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { + __ ldr(kContextRegister, + MemOperand(fp, BaselineFrameConstants::kContextOffset)); return OnStackReplacement(masm, false); } @@ -2009,6 +2009,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { } // static +// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- @@ -2020,7 +2021,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // ----------------------------------- Register scratch = r8; - if (masm->emit_debug_code()) { + if (FLAG_debug_code) { // Allow r2 to be a FixedArray, or a FixedDoubleArray if r4 == 0. Label ok, fail; __ AssertNotSmi(r2); @@ -2278,7 +2279,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ ldrh(r2, FieldMemOperand(r2, SharedFunctionInfo::kFormalParameterCountOffset)); - __ InvokeFunctionCode(r1, no_reg, r2, r0, JUMP_FUNCTION); + __ InvokeFunctionCode(r1, no_reg, r2, r0, InvokeType::kJump); // The function is a "classConstructor", need to raise an exception. __ bind(&class_constructor); @@ -2640,6 +2641,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { // TODO(v8:10701): Implement for this platform. __ Trap(); } + +void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { + // Only needed on x64. + __ Trap(); +} #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -2652,12 +2658,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // sp: stack pointer (restored as callee's sp after C call) // cp: current context (C callee-saved) // - // If argv_mode == kArgvInRegister: + // If argv_mode == ArgvMode::kRegister: // r2: pointer to the first argument __ mov(r5, Operand(r1)); - if (argv_mode == kArgvInRegister) { + if (argv_mode == ArgvMode::kRegister) { // Move argv into the correct register. __ mov(r1, Operand(r2)); } else { @@ -2669,7 +2675,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Enter the exit frame that transitions from JavaScript to C++. FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame( - save_doubles == kSaveFPRegs, 0, + save_doubles == SaveFPRegsMode::kSave, 0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); // Store a copy of argc in callee-saved registers for later. @@ -2726,12 +2732,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // r0:r1: result // sp: stack pointer // fp: frame pointer - Register argc = argv_mode == kArgvInRegister + Register argc = argv_mode == ArgvMode::kRegister // We don't want to pop arguments so set argc to no_reg. ? no_reg // Callee-saved register r4 still holds argc. : r4; - __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc); + __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc); __ mov(pc, lr); // Handling of exception. @@ -2841,7 +2847,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { // If we reach this code, 30 <= exponent <= 83. // `TryInlineTruncateDoubleToI` above will have truncated any double with an // exponent lower than 30. - if (masm->emit_debug_code()) { + if (FLAG_debug_code) { // Scratch is exponent - 1. __ cmp(scratch, Operand(30 - 1)); __ Check(ge, AbortReason::kUnexpectedValue); @@ -2957,7 +2963,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, // No more valid handles (the result handle was the last one). Restore // previous handle scope. __ str(r4, MemOperand(r9, kNextOffset)); - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ ldr(r1, MemOperand(r9, kLevelOffset)); __ cmp(r1, r6); __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall); diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index d095d60b302727..3cf3f0153fc1e4 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -6,6 +6,7 @@ #include "src/api/api-arguments.h" #include "src/codegen/code-factory.h" +#include "src/codegen/interface-descriptors-inl.h" // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. #include "src/codegen/macro-assembler-inl.h" #include "src/codegen/register-configuration.h" @@ -99,7 +100,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { Label already_aligned; Register argc = x0; - if (__ emit_debug_code()) { + if (FLAG_debug_code) { // Check that FrameScope pushed the context on to the stack already. __ Peek(x2, 0); __ Cmp(x2, cp); @@ -176,7 +177,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // ----------------------------------- // Call the function. - __ InvokeFunctionWithNewTarget(x1, x3, argc, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(x1, x3, argc, InvokeType::kCall); // Restore the context from the frame. __ Ldr(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); @@ -219,7 +220,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ EnterFrame(StackFrame::CONSTRUCT); Label post_instantiation_deopt_entry, not_create_implicit_receiver; - if (__ emit_debug_code()) { + if (FLAG_debug_code) { // Check that FrameScope pushed the context on to the stack already. __ Peek(x2, 0); __ Cmp(x2, cp); @@ -336,7 +337,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Call the function. __ Mov(x0, x12); - __ InvokeFunctionWithNewTarget(x1, x3, x0, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(x1, x3, x0, InvokeType::kCall); // ----------- S t a t e ------------- // -- sp[0*kSystemPointerSize]: implicit receiver @@ -442,7 +443,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ StoreTaggedField( x0, FieldMemOperand(x1, JSGeneratorObject::kInputOrDebugPosOffset)); __ RecordWriteField(x1, JSGeneratorObject::kInputOrDebugPosOffset, x0, - kLRHasNotBeenSaved, kDontSaveFPRegs); + kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore); // Load suspended function and context. __ LoadTaggedPointerField( @@ -639,7 +640,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE // Initialize the pointer cage base register. - __ Mov(kPointerCageBaseRegister, x0); + __ LoadRootRelative(kPtrComprCageBaseRegister, + IsolateData::cage_base_offset()); #endif } @@ -925,7 +927,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Don't initialize the reserved registers. // x26 : root register (kRootRegister). // x27 : context pointer (cp). - // x28 : pointer cage base register (kPointerCageBaseRegister). + // x28 : pointer cage base register (kPtrComprCageBaseRegister). // x29 : frame pointer (fp). Handle builtin = is_construct @@ -966,8 +968,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, __ StoreTaggedField(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code, - kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore, + RememberedSetAction::kOmit, SmiCheck::kOmit); } static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, @@ -998,7 +1000,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, __ LeaveFrame(StackFrame::INTERPRETED); // Drop receiver + arguments. - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ Tst(params_size, kSystemPointerSize - 1); __ Check(eq, AbortReason::kUnexpectedValue); } @@ -1230,7 +1232,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedPointerField( feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ CompareObjectType(feedback_vector, x4, x4, FEEDBACK_VECTOR_TYPE); __ Assert(eq, AbortReason::kExpectedFeedbackVector); } @@ -1288,7 +1290,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // Baseline code frames store the feedback vector where interpreter would // store the bytecode offset. - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ CompareObjectType(feedback_vector, x4, x4, FEEDBACK_VECTOR_TYPE); __ Assert(eq, AbortReason::kExpectedFeedbackVector); } @@ -1859,7 +1861,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ Br(x17); } -void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ ldr(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); @@ -1903,7 +1905,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { __ Abort(AbortReason::kInvalidBytecodeAdvance); } -void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { Generate_InterpreterEnterBytecode(masm); } @@ -2087,6 +2089,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { } void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { + __ ldr(kContextRegister, + MemOperand(fp, BaselineFrameConstants::kContextOffset)); return OnStackReplacement(masm, false); } @@ -2385,6 +2389,7 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc, } // namespace // static +// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- @@ -2394,7 +2399,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // -- x4 : len (number of elements to push from args) // -- x3 : new.target (for [[Construct]]) // ----------------------------------- - if (masm->emit_debug_code()) { + if (FLAG_debug_code) { // Allow x2 to be a FixedArray, or a FixedDoubleArray if x4 == 0. Label ok, fail; __ AssertNotSmi(x2, AbortReason::kOperandIsNotAFixedArray); @@ -2618,7 +2623,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Ldrh(x2, FieldMemOperand(x2, SharedFunctionInfo::kFormalParameterCountOffset)); - __ InvokeFunctionCode(x1, no_reg, x2, x0, JUMP_FUNCTION); + __ InvokeFunctionCode(x1, no_reg, x2, x0, InvokeType::kJump); // The function is a "classConstructor", need to raise an exception. __ Bind(&class_constructor); @@ -3036,6 +3041,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { // TODO(v8:10701): Implement for this platform. __ Trap(); } + +void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { + // Only needed on x64. + __ Trap(); +} #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -3053,7 +3063,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Register parameters: // x0: argc (including receiver, untagged) // x1: target - // If argv_mode == kArgvInRegister: + // If argv_mode == ArgvMode::kRegister: // x11: argv (pointer to first argument) // // The stack on entry holds the arguments and the receiver, with the receiver @@ -3085,7 +3095,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // (arg[argc-2]), or just below the receiver in case there are no arguments. // - Adjust for the arg[] array. Register temp_argv = x11; - if (argv_mode == kArgvOnStack) { + if (argv_mode == ArgvMode::kStack) { __ SlotAddress(temp_argv, x0); // - Adjust for the receiver. __ Sub(temp_argv, temp_argv, 1 * kSystemPointerSize); @@ -3096,7 +3106,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Enter the exit frame. FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame( - save_doubles == kSaveFPRegs, x10, extra_stack_space, + save_doubles == SaveFPRegsMode::kSave, x10, extra_stack_space, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); // Poke callee-saved registers into reserved space. @@ -3177,8 +3187,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Peek(argc, 2 * kSystemPointerSize); __ Peek(target, 3 * kSystemPointerSize); - __ LeaveExitFrame(save_doubles == kSaveFPRegs, x10, x9); - if (argv_mode == kArgvOnStack) { + __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, x10, x9); + if (argv_mode == ArgvMode::kStack) { // Drop the remaining stack slots and return from the stub. __ DropArguments(x11); } @@ -3247,7 +3257,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Compute the handler entry address and jump to it. We use x17 here for the // jump target, as this jump can occasionally end up at the start of - // InterpreterEnterBytecodeDispatch, which when CFI is enabled starts with + // InterpreterEnterAtBytecode, which when CFI is enabled starts with // a "BTI c". UseScratchRegisterScope temps(masm); temps.Exclude(x17); @@ -3296,7 +3306,7 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { // signed overflow in the int64_t target. Since we've already handled // exponents >= 84, we can guarantee that 63 <= exponent < 84. - if (masm->emit_debug_code()) { + if (FLAG_debug_code) { __ Cmp(exponent, HeapNumber::kExponentBias + 63); // Exponents less than this should have been handled by the Fcvt case. __ Check(ge, AbortReason::kUnexpectedValue); @@ -3412,7 +3422,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, // No more valid handles (the result handle was the last one). Restore // previous handle scope. __ Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset)); - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ Ldr(w1, MemOperand(handle_scope_base, kLevelOffset)); __ Cmp(w1, level_reg); __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall); diff --git a/deps/v8/src/builtins/arraybuffer.tq b/deps/v8/src/builtins/arraybuffer.tq index 179c4b38fd2cc9..5794414443b455 100644 --- a/deps/v8/src/builtins/arraybuffer.tq +++ b/deps/v8/src/builtins/arraybuffer.tq @@ -9,21 +9,25 @@ transitioning javascript builtin ArrayBufferPrototypeGetByteLength( js-implicit context: NativeContext, receiver: JSAny)(): Number { // 1. Let O be the this value. // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]). + const functionName = 'get ArrayBuffer.prototype.byteLength'; const o = Cast(receiver) otherwise ThrowTypeError( - MessageTemplate::kIncompatibleMethodReceiver, - 'get ArrayBuffer.prototype.byteLength', receiver); + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception. if (IsSharedArrayBuffer(o)) { ThrowTypeError( - MessageTemplate::kIncompatibleMethodReceiver, - 'get ArrayBuffer.prototype.byteLength', receiver); + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); } - // 4. If IsDetachedBuffer(O) is true, throw a TypeError exception. + // 4. If IsResizableArrayBuffer(O) is true, throw a TypeError exception. + if (IsResizableArrayBuffer(o)) { + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + } + // 5. If IsDetachedBuffer(O) is true, throw a TypeError exception. // TODO(v8:4895): We don't actually throw here. - // 5. Let length be O.[[ArrayBufferByteLength]]. + // 6. Let length be O.[[ArrayBufferByteLength]]. const length = o.byte_length; - // 6. Return length. + // 7. Return length. return Convert(length); } @@ -32,15 +36,43 @@ transitioning javascript builtin SharedArrayBufferPrototypeGetByteLength( js-implicit context: NativeContext, receiver: JSAny)(): Number { // 1. Let O be the this value. // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]). + const functionName = 'get SharedArrayBuffer.prototype.byteLength'; const o = Cast(receiver) otherwise ThrowTypeError( - MessageTemplate::kIncompatibleMethodReceiver, - 'get SharedArrayBuffer.prototype.byteLength', receiver); - // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception. + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + // 3. Perform ? RequireInternalSlot(O, [[ArrayBufferData]]). if (!IsSharedArrayBuffer(o)) { ThrowTypeError( - MessageTemplate::kIncompatibleMethodReceiver, - 'get SharedArrayBuffer.prototype.byteLength', receiver); + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + } + // 4. If IsResizableArrayBuffer(O) is true, throw a TypeError exception. + if (IsResizableArrayBuffer(o)) { + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + } + // 5. Let length be O.[[ArrayBufferByteLength]]. + const length = o.byte_length; + // 6. Return length. + return Convert(length); +} + +// #sec-get-resizablearraybuffer.prototype.bytelength +transitioning javascript builtin ResizableArrayBufferPrototypeGetByteLength( + js-implicit context: NativeContext, receiver: JSAny)(): Number { + // 1. Let O be the this value. + // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]). + const functionName = 'get ResizableArrayBuffer.prototype.byteLength'; + const o = Cast(receiver) otherwise + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + if (!IsResizableArrayBuffer(o)) { + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + } + // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception. + if (IsSharedArrayBuffer(o)) { + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); } // 4. Let length be O.[[ArrayBufferByteLength]]. const length = o.byte_length; @@ -48,6 +80,55 @@ transitioning javascript builtin SharedArrayBufferPrototypeGetByteLength( return Convert(length); } +// #sec-get-resizablearraybuffer.prototype.maxbytelength +transitioning javascript builtin ResizableArrayBufferPrototypeGetMaxByteLength( + js-implicit context: NativeContext, receiver: JSAny)(): Number { + // 1. Let O be the this value. + // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]). + const functionName = 'get ResizableArrayBuffer.prototype.maxByteLength'; + const o = Cast(receiver) otherwise + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + if (!IsResizableArrayBuffer(o)) { + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + } + // 3. If IsSharedArrayBuffer(O) is true, throw a TypeError exception. + if (IsSharedArrayBuffer(o)) { + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + } + // 4. Let length be O.[[ArrayBufferMaxByteLength]]. + const length = o.max_byte_length; + // 5. Return length. + return Convert(length); +} + +// #sec-get-growablesharedarraybuffer.prototype.maxbytelength +transitioning javascript builtin +GrowableSharedArrayBufferPrototypeGetMaxByteLength( + js-implicit context: NativeContext, receiver: JSAny)(): Number { + // 1. Let O be the this value. + // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]). + const functionName = 'get GrowableSharedArrayBuffer.prototype.maxByteLength'; + const o = Cast(receiver) otherwise + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + if (!IsResizableArrayBuffer(o)) { + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + } + // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception. + if (!IsSharedArrayBuffer(o)) { + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, functionName, receiver); + } + // 4. Let length be O.[[ArrayBufferMaxByteLength]]. + const length = o.max_byte_length; + // 5. Return length. + return Convert(length); +} + // #sec-arraybuffer.isview transitioning javascript builtin ArrayBufferIsView(arg: JSAny): Boolean { // 1. If Type(arg) is not Object, return false. diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 08639c04daf6ee..fc84e1a2ce4ed5 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -141,6 +141,7 @@ intrinsic %MakeLazy( // template, but Torque doesn't understand how to use templates for extern // macros, so just add whatever overload definitions you need here. extern macro RunLazy(Lazy): Smi; +extern macro RunLazy(Lazy): JSAny; // A Smi value containing a bitfield struct as its integer data. @useParentTypeChecker type SmiTagged extends Smi; @@ -262,6 +263,8 @@ extern enum UpdateFeedbackMode { kOptionalFeedback, kGuaranteedFeedback } extern operator '==' macro UpdateFeedbackModeEqual( constexpr UpdateFeedbackMode, constexpr UpdateFeedbackMode): constexpr bool; +extern enum CallFeedbackContent extends int32 { kTarget, kReceiver } + extern enum UnicodeEncoding { UTF16, UTF32 } // Promise constants @@ -961,6 +964,8 @@ extern operator '|' macro ConstexprWord32Or( constexpr int32, constexpr int32): constexpr int32; extern operator '^' macro Word32Xor(int32, int32): int32; extern operator '^' macro Word32Xor(uint32, uint32): uint32; +extern operator '<<' macro ConstexprWord32Shl( + constexpr uint32, constexpr int32): uint32; extern operator '==' macro Word64Equal(int64, int64): bool; extern operator '==' macro Word64Equal(uint64, uint64): bool; @@ -1296,6 +1301,9 @@ macro GetFastAliasedArgumentsMap(implicit context: Context)(): Map { macro GetWeakCellMap(implicit context: Context)(): Map { return %GetClassMapConstant(); } +macro GetPrototypeApplyFunction(implicit context: Context)(): JSFunction { + return *NativeContextSlot(ContextSlot::FUNCTION_PROTOTYPE_APPLY_INDEX); +} // Call(Context, Target, Receiver, ...Args) // TODO(joshualitt): Assuming the context parameter is for throwing when Target @@ -1689,7 +1697,7 @@ extern transitioning runtime SetOwnPropertyIgnoreAttributes( namespace runtime { extern runtime -GetDerivedMap(Context, JSFunction, JSReceiver): Map; +GetDerivedMap(Context, JSFunction, JSReceiver, JSAny): Map; } extern macro IsDeprecatedMap(Map): bool; diff --git a/deps/v8/src/builtins/builtins-api.cc b/deps/v8/src/builtins/builtins-api.cc index 35e6cc393cb2de..b39bfc84a55187 100644 --- a/deps/v8/src/builtins/builtins-api.cc +++ b/deps/v8/src/builtins/builtins-api.cc @@ -23,8 +23,7 @@ namespace { // TODO(dcarney): CallOptimization duplicates this logic, merge. JSReceiver GetCompatibleReceiver(Isolate* isolate, FunctionTemplateInfo info, JSReceiver receiver) { - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kGetCompatibleReceiver); + RCS_SCOPE(isolate, RuntimeCallCounterId::kGetCompatibleReceiver); Object recv_type = info.signature(); // No signature, return holder. if (!recv_type.IsFunctionTemplateInfo()) return receiver; @@ -171,8 +170,7 @@ MaybeHandle Builtins::InvokeApiFunction(Isolate* isolate, Handle receiver, int argc, Handle args[], Handle new_target) { - RuntimeCallTimerScope timer(isolate, - RuntimeCallCounterId::kInvokeApiFunction); + RCS_SCOPE(isolate, RuntimeCallCounterId::kInvokeApiFunction); DCHECK(function->IsFunctionTemplateInfo() || (function->IsJSFunction() && JSFunction::cast(*function).shared().IsApiFunction())); diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index 6b522fda6c03dc..833627c7b41c17 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -10,6 +10,7 @@ #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" #include "src/codegen/code-stub-assembler.h" +#include "src/codegen/interface-descriptors-inl.h" #include "src/execution/frame-constants.h" #include "src/heap/factory-inl.h" #include "src/objects/allocation-site-inl.h" diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc index d3bbd980a55be0..6fe1bfc712f7a6 100644 --- a/deps/v8/src/builtins/builtins-array.cc +++ b/deps/v8/src/builtins/builtins-array.cc @@ -173,7 +173,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle SetLengthProperty( Handle array = Handle::cast(receiver); if (!JSArray::HasReadOnlyLength(array)) { DCHECK_LE(length, kMaxUInt32); - JSArray::SetLength(array, static_cast(length)); + MAYBE_RETURN_NULL( + JSArray::SetLength(array, static_cast(length))); return receiver; } } @@ -207,16 +208,16 @@ V8_WARN_UNUSED_RESULT Object GenericArrayFill(Isolate* isolate, return *receiver; } -V8_WARN_UNUSED_RESULT bool TryFastArrayFill( +V8_WARN_UNUSED_RESULT Maybe TryFastArrayFill( Isolate* isolate, BuiltinArguments* args, Handle receiver, Handle value, double start_index, double end_index) { // If indices are too large, use generic path since they are stored as // properties, not in the element backing store. - if (end_index > kMaxUInt32) return false; - if (!receiver->IsJSObject()) return false; + if (end_index > kMaxUInt32) return Just(false); + if (!receiver->IsJSObject()) return Just(false); if (!EnsureJSArrayWithWritableFastElements(isolate, receiver, args, 1, 1)) { - return false; + return Just(false); } Handle array = Handle::cast(receiver); @@ -240,14 +241,14 @@ V8_WARN_UNUSED_RESULT bool TryFastArrayFill( CHECK(DoubleToUint32IfEqualToSelf(end_index, &end)); ElementsAccessor* accessor = array->GetElementsAccessor(); - accessor->Fill(array, value, start, end); - return true; + RETURN_ON_EXCEPTION_VALUE(isolate, accessor->Fill(array, value, start, end), + Nothing()); + return Just(true); } } // namespace BUILTIN(ArrayPrototypeFill) { HandleScope scope(isolate); - if (isolate->debug_execution_mode() == DebugInfo::kSideEffects) { if (!isolate->debug()->PerformSideEffectCheckForObject(args.receiver())) { return ReadOnlyRoots(isolate).exception(); @@ -292,10 +293,12 @@ BUILTIN(ArrayPrototypeFill) { Handle value = args.atOrUndefined(isolate, 1); - if (TryFastArrayFill(isolate, &args, receiver, value, start_index, - end_index)) { - return *receiver; - } + bool success; + MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, success, + TryFastArrayFill(isolate, &args, receiver, value, start_index, + end_index)); + if (success) return *receiver; return GenericArrayFill(isolate, receiver, value, start_index, end_index); } @@ -385,7 +388,9 @@ BUILTIN(ArrayPush) { } ElementsAccessor* accessor = array->GetElementsAccessor(); - uint32_t new_length = accessor->Push(array, &args, to_add); + uint32_t new_length; + MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, new_length, accessor->Push(array, &args, to_add)); return *isolate->factory()->NewNumberFromUint((new_length)); } @@ -468,7 +473,8 @@ BUILTIN(ArrayPop) { Handle result; if (IsJSArrayFastElementMovingAllowed(isolate, JSArray::cast(*receiver))) { // Fast Elements Path - result = array->GetElementsAccessor()->Pop(array); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, array->GetElementsAccessor()->Pop(array)); } else { // Use Slow Lookup otherwise uint32_t new_length = len - 1; @@ -483,7 +489,9 @@ BUILTIN(ArrayPop) { isolate->factory()->length_string(), Object::TypeOf(isolate, array), array)); } - JSArray::SetLength(array, new_length); + bool set_len_ok; + MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, set_len_ok, JSArray::SetLength(array, new_length)); } return *result; @@ -595,7 +603,8 @@ BUILTIN(ArrayShift) { if (CanUseFastArrayShift(isolate, receiver)) { Handle array = Handle::cast(receiver); - return *array->GetElementsAccessor()->Shift(array); + RETURN_RESULT_OR_FAILURE(isolate, + array->GetElementsAccessor()->Shift(array)); } return GenericArrayShift(isolate, receiver, length); @@ -623,7 +632,9 @@ BUILTIN(ArrayUnshift) { DCHECK(!JSArray::HasReadOnlyLength(array)); ElementsAccessor* accessor = array->GetElementsAccessor(); - int new_length = accessor->Unshift(array, &args, to_add); + uint32_t new_length; + MAYBE_ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, new_length, accessor->Unshift(array, &args, to_add)); return Smi::FromInt(new_length); } @@ -742,7 +753,7 @@ class ArrayConcatVisitor { array, fast_elements() ? HOLEY_ELEMENTS : DICTIONARY_ELEMENTS); array->set_length(*length); array->set_elements(*storage_fixed_array()); - array->synchronized_set_map(*map); + array->set_map(*map, kReleaseStore); return array; } @@ -880,9 +891,11 @@ uint32_t EstimateElementCount(Isolate* isolate, Handle array) { #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS: TYPED_ARRAYS(TYPED_ARRAY_CASE) -#undef TYPED_ARRAY_CASE + RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE) // External arrays are always dense. return length; + +#undef TYPED_ARRAY_CASE case NO_ELEMENTS: return 0; case FAST_SLOPPY_ARGUMENTS_ELEMENTS: @@ -956,9 +969,7 @@ void CollectElementIndices(Isolate* isolate, Handle object, } #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS: - TYPED_ARRAYS(TYPED_ARRAY_CASE) -#undef TYPED_ARRAY_CASE - { + TYPED_ARRAYS(TYPED_ARRAY_CASE) { size_t length = Handle::cast(object)->length(); if (range <= length) { length = range; @@ -974,6 +985,11 @@ void CollectElementIndices(Isolate* isolate, Handle object, if (length == range) return; // All indices accounted for already. break; } + RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE) + // TODO(v8:11111): Support RAB / GSAB. + UNREACHABLE(); + +#undef TYPED_ARRAY_CASE case FAST_SLOPPY_ARGUMENTS_ELEMENTS: case SLOW_SLOPPY_ARGUMENTS_ELEMENTS: { DisallowGarbageCollection no_gc; @@ -1199,8 +1215,11 @@ bool IterateElements(Isolate* isolate, Handle receiver, break; #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) case TYPE##_ELEMENTS: TYPED_ARRAYS(TYPED_ARRAY_CASE) -#undef TYPED_ARRAY_CASE return IterateElementsSlow(isolate, receiver, length, visitor); + RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE) + // TODO(v8:11111): Support RAB / GSAB. + UNREACHABLE(); +#undef TYPED_ARRAY_CASE case FAST_STRING_WRAPPER_ELEMENTS: case SLOW_STRING_WRAPPER_ELEMENTS: // |array| is guaranteed to be an array or typed array. diff --git a/deps/v8/src/builtins/builtins-arraybuffer.cc b/deps/v8/src/builtins/builtins-arraybuffer.cc index 0f5f90518615c0..2d07847d5708f0 100644 --- a/deps/v8/src/builtins/builtins-arraybuffer.cc +++ b/deps/v8/src/builtins/builtins-arraybuffer.cc @@ -23,17 +23,43 @@ namespace internal { name)); \ } +#define CHECK_RESIZABLE(expected, name, method) \ + if (name->is_resizable() != expected) { \ + THROW_NEW_ERROR_RETURN_FAILURE( \ + isolate, \ + NewTypeError(MessageTemplate::kIncompatibleMethodReceiver, \ + isolate->factory()->NewStringFromAsciiChecked(method), \ + name)); \ + } + // ----------------------------------------------------------------------------- // ES#sec-arraybuffer-objects namespace { +bool RoundUpToPageSize(size_t byte_length, size_t page_size, + size_t max_allowed_byte_length, size_t* pages) { + size_t bytes_wanted = RoundUp(byte_length, page_size); + if (bytes_wanted > max_allowed_byte_length) { + return false; + } + *pages = bytes_wanted / page_size; + return true; +} + Object ConstructBuffer(Isolate* isolate, Handle target, Handle new_target, Handle length, - InitializedFlag initialized) { - SharedFlag shared = (*target != target->native_context().array_buffer_fun()) - ? SharedFlag::kShared - : SharedFlag::kNotShared; + Handle max_length, InitializedFlag initialized) { + SharedFlag shared = + (*target != target->native_context().array_buffer_fun() && + *target != target->native_context().resizable_array_buffer_fun()) + ? SharedFlag::kShared + : SharedFlag::kNotShared; + ResizableFlag resizable = + (*target == target->native_context().resizable_array_buffer_fun() || + *target == target->native_context().growable_shared_array_buffer_fun()) + ? ResizableFlag::kResizable + : ResizableFlag::kNotResizable; Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, result, @@ -42,9 +68,10 @@ Object ConstructBuffer(Isolate* isolate, Handle target, // Ensure that all fields are initialized because BackingStore::Allocate is // allowed to GC. Note that we cannot move the allocation of the ArrayBuffer // after BackingStore::Allocate because of the spec. - array_buffer->Setup(shared, nullptr); + array_buffer->Setup(shared, resizable, nullptr); size_t byte_length; + size_t max_byte_length = 0; if (!TryNumberToSize(*length, &byte_length) || byte_length > JSArrayBuffer::kMaxByteLength) { // ToNumber failed. @@ -52,8 +79,46 @@ Object ConstructBuffer(Isolate* isolate, Handle target, isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength)); } - auto backing_store = - BackingStore::Allocate(isolate, byte_length, shared, initialized); + std::unique_ptr backing_store; + if (resizable == ResizableFlag::kNotResizable) { + backing_store = + BackingStore::Allocate(isolate, byte_length, shared, initialized); + } else { + Handle number_max_length; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_max_length, + Object::ToInteger(isolate, max_length)); + + if (!TryNumberToSize(*number_max_length, &max_byte_length)) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength)); + } + if (byte_length > max_byte_length) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength)); + } + + size_t page_size = AllocatePageSize(); + size_t initial_pages; + if (!RoundUpToPageSize(byte_length, page_size, + JSArrayBuffer::kMaxByteLength, &initial_pages)) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength)); + } + + size_t max_pages; + if (!RoundUpToPageSize(max_byte_length, page_size, + JSArrayBuffer::kMaxByteLength, &max_pages)) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewRangeError(MessageTemplate::kInvalidArrayBufferMaxLength)); + } + constexpr bool kIsWasmMemory = false; + backing_store = BackingStore::TryAllocateAndPartiallyCommitMemory( + isolate, byte_length, page_size, initial_pages, max_pages, + kIsWasmMemory, shared); + } if (!backing_store) { // Allocation of backing store failed. THROW_NEW_ERROR_RETURN_FAILURE( @@ -61,6 +126,7 @@ Object ConstructBuffer(Isolate* isolate, Handle target, } array_buffer->Attach(std::move(backing_store)); + array_buffer->set_max_byte_length(max_byte_length); return *array_buffer; } @@ -71,7 +137,10 @@ BUILTIN(ArrayBufferConstructor) { HandleScope scope(isolate); Handle target = args.target(); DCHECK(*target == target->native_context().array_buffer_fun() || - *target == target->native_context().shared_array_buffer_fun()); + *target == target->native_context().shared_array_buffer_fun() || + *target == target->native_context().resizable_array_buffer_fun() || + *target == + target->native_context().growable_shared_array_buffer_fun()); if (args.new_target()->IsUndefined(isolate)) { // [[Call]] THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewTypeError(MessageTemplate::kConstructorNotFunction, @@ -87,10 +156,11 @@ BUILTIN(ArrayBufferConstructor) { if (number_length->Number() < 0.0) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength)); - } + } - return ConstructBuffer(isolate, target, new_target, number_length, - InitializedFlag::kZeroInitialized); + Handle max_length = args.atOrUndefined(isolate, 2); + return ConstructBuffer(isolate, target, new_target, number_length, max_length, + InitializedFlag::kZeroInitialized); } // This is a helper to construct an ArrayBuffer with uinitialized memory. @@ -101,7 +171,7 @@ BUILTIN(ArrayBufferConstructor_DoNotInitialize) { Handle target(isolate->native_context()->array_buffer_fun(), isolate); Handle length = args.atOrUndefined(isolate, 1); - return ConstructBuffer(isolate, target, target, length, + return ConstructBuffer(isolate, target, target, length, Handle(), InitializedFlag::kUninitialized); } @@ -119,6 +189,8 @@ static Object SliceHelper(BuiltinArguments args, Isolate* isolate, // * [SAB] If IsSharedArrayBuffer(O) is false, throw a TypeError exception. CHECK_SHARED(is_shared, array_buffer, kMethodName); + CHECK_RESIZABLE(false, array_buffer, kMethodName); + // * [AB] If IsDetachedBuffer(buffer) is true, throw a TypeError exception. if (!is_shared && array_buffer->was_detached()) { THROW_NEW_ERROR_RETURN_FAILURE( @@ -280,5 +352,158 @@ BUILTIN(ArrayBufferPrototypeSlice) { return SliceHelper(args, isolate, kMethodName, false); } +static Object ResizeHelper(BuiltinArguments args, Isolate* isolate, + const char* kMethodName, bool is_shared) { + HandleScope scope(isolate); + + // 1 Let O be the this value. + // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxByteLength]]). + CHECK_RECEIVER(JSArrayBuffer, array_buffer, kMethodName); + CHECK_RESIZABLE(true, array_buffer, kMethodName); + + // [RAB] 3. If IsSharedArrayBuffer(O) is true, throw a *TypeError* exception + // [GSAB] 3. If IsSharedArrayBuffer(O) is false, throw a *TypeError* exception + CHECK_SHARED(is_shared, array_buffer, kMethodName); + + // Let newByteLength to ? ToIntegerOrInfinity(newLength). + Handle new_length = args.at(1); + Handle number_new_byte_length; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, number_new_byte_length, + Object::ToInteger(isolate, new_length)); + + // [RAB] If IsDetachedBuffer(O) is true, throw a TypeError exception. + if (!is_shared && array_buffer->was_detached()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kDetachedOperation, + isolate->factory()->NewStringFromAsciiChecked( + kMethodName))); + } + + // [RAB] If newByteLength < 0 or newByteLength > + // O.[[ArrayBufferMaxByteLength]], throw a RangeError exception. + + // [GSAB] If newByteLength < currentByteLength or newByteLength > + // O.[[ArrayBufferMaxByteLength]], throw a RangeError exception. + size_t new_byte_length; + if (!TryNumberToSize(*number_new_byte_length, &new_byte_length)) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferResizeLength, + isolate->factory()->NewStringFromAsciiChecked( + kMethodName))); + } + + if (is_shared && new_byte_length < array_buffer->byte_length()) { + // GrowableSharedArrayBuffer is only allowed to grow. + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferResizeLength, + isolate->factory()->NewStringFromAsciiChecked( + kMethodName))); + } + + if (new_byte_length > array_buffer->max_byte_length()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferResizeLength, + isolate->factory()->NewStringFromAsciiChecked( + kMethodName))); + } + + size_t page_size = AllocatePageSize(); + size_t new_committed_pages; + bool round_return_value = + RoundUpToPageSize(new_byte_length, page_size, + JSArrayBuffer::kMaxByteLength, &new_committed_pages); + CHECK(round_return_value); + + // [RAB] Let hostHandled be ? HostResizeArrayBuffer(O, newByteLength). + // [GSAB] Let hostHandled be ? HostGrowArrayBuffer(O, newByteLength). + // If hostHandled is handled, return undefined. + + // TODO(v8:11111): Wasm integration. + + if (!is_shared) { + // [RAB] Let oldBlock be O.[[ArrayBufferData]]. + // [RAB] Let newBlock be ? CreateByteDataBlock(newByteLength). + // [RAB] Let copyLength be min(newByteLength, O.[[ArrayBufferByteLength]]). + // [RAB] Perform CopyDataBlockBytes(newBlock, 0, oldBlock, 0, copyLength). + // [RAB] NOTE: Neither creation of the new Data Block nor copying from the + // old Data Block are observable. Implementations reserve the right to + // implement this method as in-place growth or shrinkage. + if (array_buffer->GetBackingStore()->ResizeInPlace( + isolate, new_byte_length, new_committed_pages * page_size) != + BackingStore::ResizeOrGrowResult::kSuccess) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewRangeError(MessageTemplate::kOutOfMemory, + isolate->factory()->NewStringFromAsciiChecked( + kMethodName))); + } + // [RAB] Set O.[[ArrayBufferByteLength]] to newLength. + array_buffer->set_byte_length(new_byte_length); + } else { + // [GSAB] (Detailed description of the algorithm omitted.) + auto result = array_buffer->GetBackingStore()->GrowInPlace( + isolate, new_byte_length, new_committed_pages * page_size); + if (result == BackingStore::ResizeOrGrowResult::kFailure) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewRangeError(MessageTemplate::kOutOfMemory, + isolate->factory()->NewStringFromAsciiChecked( + kMethodName))); + } + if (result == BackingStore::ResizeOrGrowResult::kRace) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewRangeError( + MessageTemplate::kInvalidArrayBufferResizeLength, + isolate->factory()->NewStringFromAsciiChecked(kMethodName))); + } + // Invariant: byte_length for a GSAB is 0 (it needs to be read from the + // BackingStore). + CHECK_EQ(0, array_buffer->byte_length()); + } + return ReadOnlyRoots(isolate).undefined_value(); +} + +// ES #sec-get-growablesharedarraybuffer.prototype.bytelength +// get GrowableSharedArrayBuffer.prototype.byteLength +BUILTIN(GrowableSharedArrayBufferPrototypeGetByteLength) { + const char* const kMethodName = + "get GrowableSharedArrayBuffer.prototype.byteLength"; + HandleScope scope(isolate); + + // 1. Let O be the this value. + // 2. Perform ? RequireInternalSlot(O, [[ArrayBufferMaxLength]]). + CHECK_RECEIVER(JSArrayBuffer, array_buffer, kMethodName); + CHECK_RESIZABLE(true, array_buffer, kMethodName); + // 3. If IsSharedArrayBuffer(O) is false, throw a TypeError exception. + CHECK_SHARED(true, array_buffer, kMethodName); + + // 4. Let length be ArrayBufferByteLength(O, SeqCst). + + // Invariant: byte_length for GSAB is 0 (it needs to be read from the + // BackingStore). + DCHECK_EQ(0, array_buffer->byte_length()); + + size_t byte_length = + array_buffer->GetBackingStore()->byte_length(std::memory_order_seq_cst); + + // 5. Return length. + return *isolate->factory()->NewNumberFromSize(byte_length); +} + +// ES #sec-resizablearraybuffer.prototype.resize +// ResizableArrayBuffer.prototype.resize(new_size)) +BUILTIN(ResizableArrayBufferPrototypeResize) { + const char* const kMethodName = "ResizableArrayBuffer.prototype.resize"; + constexpr bool kIsShared = false; + return ResizeHelper(args, isolate, kMethodName, kIsShared); +} + +// ES #sec-growablesharedarraybuffer.prototype.grow +// GrowableSharedArrayBuffer.prototype.grow(new_size)) +BUILTIN(GrowableSharedArrayBufferPrototypeGrow) { + const char* const kMethodName = "GrowableSharedArrayBuffer.prototype.grow"; + constexpr bool kIsShared = true; + return ResizeHelper(args, isolate, kMethodName, kIsShared); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc index 664f57aadb2eb7..89bf77d0b07518 100644 --- a/deps/v8/src/builtins/builtins-call-gen.cc +++ b/deps/v8/src/builtins/builtins-call-gen.cc @@ -64,38 +64,45 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) { masm->isolate()->builtins()->CallFunction()); } +// TODO(cbruni): Try reusing code between builtin versions to avoid binary +// overhead. +TF_BUILTIN(Call_ReceiverIsNullOrUndefined_Baseline_Compact, + CallOrConstructBuiltinsAssembler) { + auto receiver = UndefinedConstant(); + CallReceiver(Builtins::kCall_ReceiverIsNullOrUndefined, receiver); +} + TF_BUILTIN(Call_ReceiverIsNullOrUndefined_Baseline, CallOrConstructBuiltinsAssembler) { - auto target = Parameter(Descriptor::kFunction); auto argc = UncheckedParameter(Descriptor::kActualArgumentsCount); - auto context = LoadContextFromBaseline(); - auto feedback_vector = LoadFeedbackVectorFromBaseline(); auto slot = UncheckedParameter(Descriptor::kSlot); - CollectCallFeedback(target, context, feedback_vector, slot); - TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target, - argc); + auto receiver = UndefinedConstant(); + CallReceiver(Builtins::kCall_ReceiverIsNullOrUndefined, argc, + slot, receiver); +} + +TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_Baseline_Compact, + CallOrConstructBuiltinsAssembler) { + CallReceiver(Builtins::kCall_ReceiverIsNotNullOrUndefined); } TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_Baseline, CallOrConstructBuiltinsAssembler) { - auto target = Parameter(Descriptor::kFunction); auto argc = UncheckedParameter(Descriptor::kActualArgumentsCount); - auto context = LoadContextFromBaseline(); - auto feedback_vector = LoadFeedbackVectorFromBaseline(); auto slot = UncheckedParameter(Descriptor::kSlot); - CollectCallFeedback(target, context, feedback_vector, slot); - TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target, - argc); + CallReceiver(Builtins::kCall_ReceiverIsNotNullOrUndefined, argc, + slot); +} + +TF_BUILTIN(Call_ReceiverIsAny_Baseline_Compact, + CallOrConstructBuiltinsAssembler) { + CallReceiver(Builtins::kCall_ReceiverIsAny); } TF_BUILTIN(Call_ReceiverIsAny_Baseline, CallOrConstructBuiltinsAssembler) { - auto target = Parameter(Descriptor::kFunction); auto argc = UncheckedParameter(Descriptor::kActualArgumentsCount); - auto context = LoadContextFromBaseline(); - auto feedback_vector = LoadFeedbackVectorFromBaseline(); auto slot = UncheckedParameter(Descriptor::kSlot); - CollectCallFeedback(target, context, feedback_vector, slot); - TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc); + CallReceiver(Builtins::kCall_ReceiverIsAny, argc, slot); } TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback, @@ -105,7 +112,9 @@ TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback, auto context = Parameter(Descriptor::kContext); auto feedback_vector = Parameter(Descriptor::kFeedbackVector); auto slot = UncheckedParameter(Descriptor::kSlot); - CollectCallFeedback(target, context, feedback_vector, slot); + auto receiver = Parameter(Descriptor::kReceiver); + CollectCallFeedback( + target, [=] { return receiver; }, context, feedback_vector, slot); TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target, argc); } @@ -117,7 +126,9 @@ TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_WithFeedback, auto context = Parameter(Descriptor::kContext); auto feedback_vector = Parameter(Descriptor::kFeedbackVector); auto slot = UncheckedParameter(Descriptor::kSlot); - CollectCallFeedback(target, context, feedback_vector, slot); + auto receiver = Parameter(Descriptor::kReceiver); + CollectCallFeedback( + target, [=] { return receiver; }, context, feedback_vector, slot); TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target, argc); } @@ -128,7 +139,9 @@ TF_BUILTIN(Call_ReceiverIsAny_WithFeedback, CallOrConstructBuiltinsAssembler) { auto context = Parameter(Descriptor::kContext); auto feedback_vector = Parameter(Descriptor::kFeedbackVector); auto slot = UncheckedParameter(Descriptor::kSlot); - CollectCallFeedback(target, context, feedback_vector, slot); + auto receiver = Parameter(Descriptor::kReceiver); + CollectCallFeedback( + target, [=] { return receiver; }, context, feedback_vector, slot); TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc); } @@ -449,6 +462,43 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithSpread( } } +template +void CallOrConstructBuiltinsAssembler::CallReceiver( + Builtins::Name id, base::Optional> receiver) { + static_assert(std::is_same::value, + "Incompatible Descriptor"); + auto bitfield = UncheckedParameter(Descriptor::kBitField); + TNode argc = + Signed(DecodeWord32< + CallTrampoline_Baseline_CompactDescriptor::ArgumentCountField>( + bitfield)); + TNode slot = ChangeUint32ToWord( + DecodeWord32( + bitfield)); + CallReceiver(id, argc, slot, receiver); +} + +template +void CallOrConstructBuiltinsAssembler::CallReceiver( + Builtins::Name id, TNode argc, TNode slot, + base::Optional> maybe_receiver) { + auto target = Parameter(Descriptor::kFunction); + auto context = LoadContextFromBaseline(); + auto feedback_vector = LoadFeedbackVectorFromBaseline(); + LazyNode receiver = [=] { + if (maybe_receiver) { + return *maybe_receiver; + } else { + CodeStubArguments args(this, argc); + return args.GetReceiver(); + } + }; + + CollectCallFeedback(target, receiver, context, feedback_vector, slot); + TailCallBuiltin(id, context, target, argc); +} + TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) { auto target = Parameter(Descriptor::kTarget); base::Optional> new_target = base::nullopt; @@ -464,7 +514,9 @@ TF_BUILTIN(CallWithArrayLike_WithFeedback, CallOrConstructBuiltinsAssembler) { auto context = Parameter(Descriptor::kContext); auto feedback_vector = Parameter(Descriptor::kFeedbackVector); auto slot = UncheckedParameter(Descriptor::kSlot); - CollectCallFeedback(target, context, feedback_vector, slot); + auto receiver = Parameter(Descriptor::kReceiver); + CollectCallFeedback( + target, [=] { return receiver; }, context, feedback_vector, slot); CallOrConstructWithArrayLike(target, new_target, arguments_list, context); } @@ -485,7 +537,10 @@ TF_BUILTIN(CallWithSpread_Baseline, CallOrConstructBuiltinsAssembler) { auto context = LoadContextFromBaseline(); auto feedback_vector = LoadFeedbackVectorFromBaseline(); auto slot = UncheckedParameter(Descriptor::kSlot); - CollectCallFeedback(target, context, feedback_vector, slot); + CodeStubArguments args(this, args_count); + CollectCallFeedback( + target, [=] { return args.GetReceiver(); }, context, feedback_vector, + slot); CallOrConstructWithSpread(target, new_target, spread, args_count, context); } @@ -497,7 +552,9 @@ TF_BUILTIN(CallWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) { auto context = Parameter(Descriptor::kContext); auto feedback_vector = Parameter(Descriptor::kFeedbackVector); auto slot = UncheckedParameter(Descriptor::kSlot); - CollectCallFeedback(target, context, feedback_vector, slot); + auto receiver = Parameter(Descriptor::kReceiver); + CollectCallFeedback( + target, [=] { return receiver; }, context, feedback_vector, slot); CallOrConstructWithSpread(target, new_target, spread, args_count, context); } diff --git a/deps/v8/src/builtins/builtins-call-gen.h b/deps/v8/src/builtins/builtins-call-gen.h index c938662d5e5b5f..ff4d998ff3a399 100644 --- a/deps/v8/src/builtins/builtins-call-gen.h +++ b/deps/v8/src/builtins/builtins-call-gen.h @@ -30,6 +30,13 @@ class CallOrConstructBuiltinsAssembler : public CodeStubAssembler { TNode spread, TNode args_count, TNode context); + template + void CallReceiver(Builtins::Name id, + base::Optional> = base::nullopt); + template + void CallReceiver(Builtins::Name id, TNode argc, TNode slot, + base::Optional> = base::nullopt); + enum class CallFunctionTemplateMode : uint8_t { kCheckAccess, kCheckCompatibleReceiver, diff --git a/deps/v8/src/builtins/builtins-debug-gen.cc b/deps/v8/src/builtins/builtins-debug-gen.cc deleted file mode 100644 index 9d47cf160065ae..00000000000000 --- a/deps/v8/src/builtins/builtins-debug-gen.cc +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2016 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/builtins/builtins-utils.h" -#include "src/builtins/builtins.h" -#include "src/debug/debug.h" -#include "src/objects/objects-inl.h" - -namespace v8 { -namespace internal { - -void Builtins::Generate_FrameDropperTrampoline(MacroAssembler* masm) { - DebugCodegen::GenerateFrameDropperTrampoline(masm); -} - -void Builtins::Generate_HandleDebuggerStatement(MacroAssembler* masm) { - DebugCodegen::GenerateHandleDebuggerStatement(masm); -} - -} // namespace internal -} // namespace v8 diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index b0e608418eb5ec..78255a30e93a52 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -50,8 +50,13 @@ namespace internal { ASM(Call_ReceiverIsNullOrUndefined, CallTrampoline) \ ASM(Call_ReceiverIsNotNullOrUndefined, CallTrampoline) \ ASM(Call_ReceiverIsAny, CallTrampoline) \ + TFC(Call_ReceiverIsNullOrUndefined_Baseline_Compact, \ + CallTrampoline_Baseline_Compact) \ TFC(Call_ReceiverIsNullOrUndefined_Baseline, CallTrampoline_Baseline) \ + TFC(Call_ReceiverIsNotNullOrUndefined_Baseline_Compact, \ + CallTrampoline_Baseline_Compact) \ TFC(Call_ReceiverIsNotNullOrUndefined_Baseline, CallTrampoline_Baseline) \ + TFC(Call_ReceiverIsAny_Baseline_Compact, CallTrampoline_Baseline_Compact) \ TFC(Call_ReceiverIsAny_Baseline, CallTrampoline_Baseline) \ TFC(Call_ReceiverIsNullOrUndefined_WithFeedback, \ CallTrampoline_WithFeedback) \ @@ -133,13 +138,13 @@ namespace internal { InterpreterPushArgsThenConstruct) \ ASM(InterpreterPushArgsThenConstructWithFinalSpread, \ InterpreterPushArgsThenConstruct) \ - ASM(InterpreterEnterBytecodeAdvance, Dummy) \ - ASM(InterpreterEnterBytecodeDispatch, Dummy) \ + ASM(InterpreterEnterAtBytecode, Dummy) \ + ASM(InterpreterEnterAtNextBytecode, Dummy) \ ASM(InterpreterOnStackReplacement, ContextOnly) \ \ /* Baseline Compiler */ \ ASM(BaselineOutOfLinePrologue, BaselineOutOfLinePrologue) \ - ASM(BaselineOnStackReplacement, ContextOnly) \ + ASM(BaselineOnStackReplacement, Void) \ ASM(BaselineLeaveFrame, BaselineLeaveFrame) \ ASM(BaselineEnterAtBytecode, Void) \ ASM(BaselineEnterAtNextBytecode, Void) \ @@ -200,8 +205,6 @@ namespace internal { \ /* Debugger */ \ TFJ(DebugBreakTrampoline, kDontAdaptArgumentsSentinel) \ - ASM(FrameDropperTrampoline, FrameDropperTrampoline) \ - ASM(HandleDebuggerStatement, ContextOnly) \ \ /* Type conversions */ \ TFC(ToNumber, TypeConversion) \ @@ -770,6 +773,11 @@ namespace internal { ASM(RegExpInterpreterTrampoline, CCall) \ ASM(RegExpExperimentalTrampoline, CCall) \ \ + /* ResizableArrayBuffer & GrowableSharedArrayBuffer */ \ + CPP(ResizableArrayBufferPrototypeResize) \ + CPP(GrowableSharedArrayBufferPrototypeGrow) \ + CPP(GrowableSharedArrayBufferPrototypeGetByteLength) \ + \ /* Set */ \ TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \ TFJ(SetPrototypeHas, 1, kReceiver, kKey) \ @@ -863,6 +871,7 @@ namespace internal { IF_WASM(ASM, GenericJSToWasmWrapper, Dummy) \ IF_WASM(ASM, WasmCompileLazy, Dummy) \ IF_WASM(ASM, WasmDebugBreak, Dummy) \ + IF_WASM(ASM, WasmOnStackReplace, Dummy) \ IF_WASM(TFC, WasmFloat32ToNumber, WasmFloat32ToNumber) \ IF_WASM(TFC, WasmFloat64ToNumber, WasmFloat64ToNumber) \ IF_WASM(TFC, WasmI32AtomicWait32, WasmI32AtomicWait32) \ @@ -983,6 +992,7 @@ namespace internal { CPP(CollatorPrototypeCompare) \ /* ecma402 #sec-intl.collator.supportedlocalesof */ \ CPP(CollatorSupportedLocalesOf) \ + /* ecma402 #sec-intl.collator.prototype.resolvedoptions */ \ CPP(CollatorPrototypeResolvedOptions) \ /* ecma402 #sup-date.prototype.tolocaledatestring */ \ CPP(DatePrototypeToLocaleDateString) \ @@ -1028,21 +1038,46 @@ namespace internal { CPP(ListFormatSupportedLocalesOf) \ /* ecma402 #sec-intl-locale-constructor */ \ CPP(LocaleConstructor) \ + /* ecma402 #sec-Intl.Locale.prototype.baseName */ \ CPP(LocalePrototypeBaseName) \ + /* ecma402 #sec-Intl.Locale.prototype.calendar */ \ CPP(LocalePrototypeCalendar) \ + /* ecma402 #sec-Intl.Locale.prototype.calendars */ \ + CPP(LocalePrototypeCalendars) \ + /* ecma402 #sec-Intl.Locale.prototype.caseFirst */ \ CPP(LocalePrototypeCaseFirst) \ + /* ecma402 #sec-Intl.Locale.prototype.collation */ \ CPP(LocalePrototypeCollation) \ + /* ecma402 #sec-Intl.Locale.prototype.collations */ \ + CPP(LocalePrototypeCollations) \ + /* ecma402 #sec-Intl.Locale.prototype.hourCycle */ \ CPP(LocalePrototypeHourCycle) \ + /* ecma402 #sec-Intl.Locale.prototype.hourCycles */ \ + CPP(LocalePrototypeHourCycles) \ + /* ecma402 #sec-Intl.Locale.prototype.language */ \ CPP(LocalePrototypeLanguage) \ /* ecma402 #sec-Intl.Locale.prototype.maximize */ \ CPP(LocalePrototypeMaximize) \ /* ecma402 #sec-Intl.Locale.prototype.minimize */ \ CPP(LocalePrototypeMinimize) \ + /* ecma402 #sec-Intl.Locale.prototype.numeric */ \ CPP(LocalePrototypeNumeric) \ + /* ecma402 #sec-Intl.Locale.prototype.numberingSystem */ \ CPP(LocalePrototypeNumberingSystem) \ + /* ecma402 #sec-Intl.Locale.prototype.numberingSystems */ \ + CPP(LocalePrototypeNumberingSystems) \ + /* ecma402 #sec-Intl.Locale.prototype.region */ \ CPP(LocalePrototypeRegion) \ + /* ecma402 #sec-Intl.Locale.prototype.script */ \ CPP(LocalePrototypeScript) \ + /* ecma402 #sec-Intl.Locale.prototype.textInfo */ \ + CPP(LocalePrototypeTextInfo) \ + /* ecma402 #sec-Intl.Locale.prototype.timezones */ \ + CPP(LocalePrototypeTimeZones) \ + /* ecma402 #sec-Intl.Locale.prototype.toString */ \ CPP(LocalePrototypeToString) \ + /* ecma402 #sec-Intl.Locale.prototype.weekInfo */ \ + CPP(LocalePrototypeWeekInfo) \ /* ecma402 #sec-intl.numberformat */ \ CPP(NumberFormatConstructor) \ /* ecma402 #sec-number-format-functions */ \ @@ -1057,6 +1092,7 @@ namespace internal { CPP(NumberFormatSupportedLocalesOf) \ /* ecma402 #sec-intl.pluralrules */ \ CPP(PluralRulesConstructor) \ + /* ecma402 #sec-intl.pluralrules.prototype.resolvedoptions */ \ CPP(PluralRulesPrototypeResolvedOptions) \ /* ecma402 #sec-intl.pluralrules.prototype.select */ \ CPP(PluralRulesPrototypeSelect) \ diff --git a/deps/v8/src/builtins/builtins-error.cc b/deps/v8/src/builtins/builtins-error.cc index 840298eacbf532..44dce9224a3473 100644 --- a/deps/v8/src/builtins/builtins-error.cc +++ b/deps/v8/src/builtins/builtins-error.cc @@ -18,9 +18,12 @@ namespace internal { // ES6 section 19.5.1.1 Error ( message ) BUILTIN(ErrorConstructor) { HandleScope scope(isolate); + Handle options = FLAG_harmony_error_cause + ? args.atOrUndefined(isolate, 2) + : isolate->factory()->undefined_value(); RETURN_RESULT_OR_FAILURE( isolate, ErrorUtils::Construct(isolate, args.target(), args.new_target(), - args.atOrUndefined(isolate, 1))); + args.atOrUndefined(isolate, 1), options)); } // static diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc index 2e9d7e24e4f252..b2d6e223e16ea0 100644 --- a/deps/v8/src/builtins/builtins-generator-gen.cc +++ b/deps/v8/src/builtins/builtins-generator-gen.cc @@ -205,7 +205,7 @@ TF_BUILTIN(GeneratorPrototypeThrow, GeneratorBuiltinsAssembler) { // TODO(cbruni): Merge with corresponding bytecode handler. TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) { auto generator = Parameter(Descriptor::kGeneratorObject); - auto context = Parameter(Descriptor::kContext); + auto context = LoadContextFromBaseline(); StoreJSGeneratorObjectContext(generator, context); auto suspend_id = SmiTag(UncheckedParameter(Descriptor::kSuspendId)); StoreJSGeneratorObjectContinuation(generator, suspend_id); diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc index 3cbd626b8e4f53..19a31b81a7ced5 100644 --- a/deps/v8/src/builtins/builtins-handler-gen.cc +++ b/deps/v8/src/builtins/builtins-handler-gen.cc @@ -183,28 +183,39 @@ TF_BUILTIN(ElementsTransitionAndStore_NoTransitionHandleCOW, // All elements kinds handled by EmitElementStore. Specifically, this includes // fast elements and fixed typed array elements. -#define ELEMENTS_KINDS(V) \ - V(PACKED_SMI_ELEMENTS) \ - V(HOLEY_SMI_ELEMENTS) \ - V(PACKED_ELEMENTS) \ - V(PACKED_NONEXTENSIBLE_ELEMENTS) \ - V(PACKED_SEALED_ELEMENTS) \ - V(HOLEY_ELEMENTS) \ - V(HOLEY_NONEXTENSIBLE_ELEMENTS) \ - V(HOLEY_SEALED_ELEMENTS) \ - V(PACKED_DOUBLE_ELEMENTS) \ - V(HOLEY_DOUBLE_ELEMENTS) \ - V(UINT8_ELEMENTS) \ - V(INT8_ELEMENTS) \ - V(UINT16_ELEMENTS) \ - V(INT16_ELEMENTS) \ - V(UINT32_ELEMENTS) \ - V(INT32_ELEMENTS) \ - V(FLOAT32_ELEMENTS) \ - V(FLOAT64_ELEMENTS) \ - V(UINT8_CLAMPED_ELEMENTS) \ - V(BIGUINT64_ELEMENTS) \ - V(BIGINT64_ELEMENTS) +#define ELEMENTS_KINDS(V) \ + V(PACKED_SMI_ELEMENTS) \ + V(HOLEY_SMI_ELEMENTS) \ + V(PACKED_ELEMENTS) \ + V(PACKED_NONEXTENSIBLE_ELEMENTS) \ + V(PACKED_SEALED_ELEMENTS) \ + V(HOLEY_ELEMENTS) \ + V(HOLEY_NONEXTENSIBLE_ELEMENTS) \ + V(HOLEY_SEALED_ELEMENTS) \ + V(PACKED_DOUBLE_ELEMENTS) \ + V(HOLEY_DOUBLE_ELEMENTS) \ + V(UINT8_ELEMENTS) \ + V(INT8_ELEMENTS) \ + V(UINT16_ELEMENTS) \ + V(INT16_ELEMENTS) \ + V(UINT32_ELEMENTS) \ + V(INT32_ELEMENTS) \ + V(FLOAT32_ELEMENTS) \ + V(FLOAT64_ELEMENTS) \ + V(UINT8_CLAMPED_ELEMENTS) \ + V(BIGUINT64_ELEMENTS) \ + V(BIGINT64_ELEMENTS) \ + V(RAB_GSAB_UINT8_ELEMENTS) \ + V(RAB_GSAB_INT8_ELEMENTS) \ + V(RAB_GSAB_UINT16_ELEMENTS) \ + V(RAB_GSAB_INT16_ELEMENTS) \ + V(RAB_GSAB_UINT32_ELEMENTS) \ + V(RAB_GSAB_INT32_ELEMENTS) \ + V(RAB_GSAB_FLOAT32_ELEMENTS) \ + V(RAB_GSAB_FLOAT64_ELEMENTS) \ + V(RAB_GSAB_UINT8_CLAMPED_ELEMENTS) \ + V(RAB_GSAB_BIGUINT64_ELEMENTS) \ + V(RAB_GSAB_BIGINT64_ELEMENTS) void HandlerBuiltinsAssembler::DispatchByElementsKind( TNode elements_kind, const ElementsKindSwitchCase& case_function, diff --git a/deps/v8/src/builtins/builtins-ic-gen.cc b/deps/v8/src/builtins/builtins-ic-gen.cc index 81bf6379eceef4..e172b5a129b533 100644 --- a/deps/v8/src/builtins/builtins-ic-gen.cc +++ b/deps/v8/src/builtins/builtins-ic-gen.cc @@ -10,70 +10,221 @@ namespace v8 { namespace internal { -#define IC_BUILTIN(Name) \ - void Builtins::Generate_##Name(compiler::CodeAssemblerState* state) { \ - AccessorAssembler assembler(state); \ - assembler.Generate##Name(); \ - } +void Builtins::Generate_LoadIC(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadIC(); +} +void Builtins::Generate_LoadIC_Megamorphic( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadIC_Megamorphic(); +} +void Builtins::Generate_LoadIC_Noninlined(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadIC_Noninlined(); +} +void Builtins::Generate_LoadIC_NoFeedback(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadIC_NoFeedback(); +} +void Builtins::Generate_LoadICTrampoline(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadICTrampoline(); +} +void Builtins::Generate_LoadICBaseline(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadICBaseline(); +} +void Builtins::Generate_LoadICTrampoline_Megamorphic( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadICTrampoline_Megamorphic(); +} +void Builtins::Generate_LoadSuperIC(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadSuperIC(); +} +void Builtins::Generate_LoadSuperICBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadSuperICBaseline(); +} +void Builtins::Generate_KeyedLoadIC(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedLoadIC(); +} +void Builtins::Generate_KeyedLoadIC_Megamorphic( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedLoadIC_Megamorphic(); +} +void Builtins::Generate_KeyedLoadIC_PolymorphicName( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedLoadIC_PolymorphicName(); +} +void Builtins::Generate_KeyedLoadICTrampoline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedLoadICTrampoline(); +} +void Builtins::Generate_KeyedLoadICBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedLoadICBaseline(); +} +void Builtins::Generate_KeyedLoadICTrampoline_Megamorphic( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedLoadICTrampoline_Megamorphic(); +} +void Builtins::Generate_LoadGlobalIC_NoFeedback( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadGlobalIC_NoFeedback(); +} +void Builtins::Generate_StoreGlobalIC(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreGlobalIC(); +} +void Builtins::Generate_StoreGlobalICTrampoline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreGlobalICTrampoline(); +} +void Builtins::Generate_StoreGlobalICBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreGlobalICBaseline(); +} +void Builtins::Generate_StoreIC(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreIC(); +} +void Builtins::Generate_StoreICTrampoline(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreICTrampoline(); +} +void Builtins::Generate_StoreICBaseline(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreICBaseline(); +} +void Builtins::Generate_KeyedStoreIC(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedStoreIC(); +} +void Builtins::Generate_KeyedStoreICTrampoline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedStoreICTrampoline(); +} +void Builtins::Generate_KeyedStoreICBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedStoreICBaseline(); +} +void Builtins::Generate_StoreInArrayLiteralIC( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreInArrayLiteralIC(); +} +void Builtins::Generate_StoreInArrayLiteralICBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateStoreInArrayLiteralICBaseline(); +} +void Builtins::Generate_CloneObjectIC(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateCloneObjectIC(); +} +void Builtins::Generate_CloneObjectICBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateCloneObjectICBaseline(); +} +void Builtins::Generate_CloneObjectIC_Slow( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateCloneObjectIC_Slow(); +} +void Builtins::Generate_KeyedHasIC(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedHasIC(); +} +void Builtins::Generate_KeyedHasICBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedHasICBaseline(); +} +void Builtins::Generate_KeyedHasIC_Megamorphic( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedHasIC_Megamorphic(); +} +void Builtins::Generate_KeyedHasIC_PolymorphicName( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateKeyedHasIC_PolymorphicName(); +} + +void Builtins::Generate_LoadGlobalIC(compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadGlobalIC(TypeofMode::kNotInside); +} -#define IC_BUILTIN_PARAM(BuiltinName, GeneratorName, parameter) \ - void Builtins::Generate_##BuiltinName(compiler::CodeAssemblerState* state) { \ - AccessorAssembler assembler(state); \ - assembler.Generate##GeneratorName(parameter); \ - } +void Builtins::Generate_LoadGlobalICInsideTypeof( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadGlobalIC(TypeofMode::kInside); +} -IC_BUILTIN(LoadIC) -IC_BUILTIN(LoadIC_Megamorphic) -IC_BUILTIN(LoadIC_Noninlined) -IC_BUILTIN(LoadIC_NoFeedback) -IC_BUILTIN(LoadICTrampoline) -IC_BUILTIN(LoadICBaseline) -IC_BUILTIN(LoadICTrampoline_Megamorphic) -IC_BUILTIN(LoadSuperIC) -IC_BUILTIN(LoadSuperICBaseline) -IC_BUILTIN(KeyedLoadIC) -IC_BUILTIN(KeyedLoadIC_Megamorphic) -IC_BUILTIN(KeyedLoadIC_PolymorphicName) -IC_BUILTIN(KeyedLoadICTrampoline) -IC_BUILTIN(KeyedLoadICBaseline) -IC_BUILTIN(KeyedLoadICTrampoline_Megamorphic) -IC_BUILTIN(LoadGlobalIC_NoFeedback) -IC_BUILTIN(StoreGlobalIC) -IC_BUILTIN(StoreGlobalICTrampoline) -IC_BUILTIN(StoreGlobalICBaseline) -IC_BUILTIN(StoreIC) -IC_BUILTIN(StoreICTrampoline) -IC_BUILTIN(StoreICBaseline) -IC_BUILTIN(KeyedStoreIC) -IC_BUILTIN(KeyedStoreICTrampoline) -IC_BUILTIN(KeyedStoreICBaseline) -IC_BUILTIN(StoreInArrayLiteralIC) -IC_BUILTIN(StoreInArrayLiteralICBaseline) -IC_BUILTIN(CloneObjectIC) -IC_BUILTIN(CloneObjectICBaseline) -IC_BUILTIN(CloneObjectIC_Slow) -IC_BUILTIN(KeyedHasIC) -IC_BUILTIN(KeyedHasICBaseline) -IC_BUILTIN(KeyedHasIC_Megamorphic) -IC_BUILTIN(KeyedHasIC_PolymorphicName) +void Builtins::Generate_LoadGlobalICTrampoline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadGlobalICTrampoline(TypeofMode::kNotInside); +} -IC_BUILTIN_PARAM(LoadGlobalIC, LoadGlobalIC, NOT_INSIDE_TYPEOF) -IC_BUILTIN_PARAM(LoadGlobalICInsideTypeof, LoadGlobalIC, INSIDE_TYPEOF) -IC_BUILTIN_PARAM(LoadGlobalICTrampoline, LoadGlobalICTrampoline, - NOT_INSIDE_TYPEOF) -IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofTrampoline, LoadGlobalICTrampoline, - INSIDE_TYPEOF) -IC_BUILTIN_PARAM(LoadGlobalICBaseline, LoadGlobalICBaseline, NOT_INSIDE_TYPEOF) -IC_BUILTIN_PARAM(LoadGlobalICInsideTypeofBaseline, LoadGlobalICBaseline, - INSIDE_TYPEOF) -IC_BUILTIN_PARAM(LookupGlobalICBaseline, LookupGlobalICBaseline, - NOT_INSIDE_TYPEOF) -IC_BUILTIN_PARAM(LookupGlobalICInsideTypeofBaseline, LookupGlobalICBaseline, - INSIDE_TYPEOF) -IC_BUILTIN_PARAM(LookupContextBaseline, LookupContextBaseline, - NOT_INSIDE_TYPEOF) -IC_BUILTIN_PARAM(LookupContextInsideTypeofBaseline, LookupContextBaseline, - INSIDE_TYPEOF) +void Builtins::Generate_LoadGlobalICInsideTypeofTrampoline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadGlobalICTrampoline(TypeofMode::kInside); +} + +void Builtins::Generate_LoadGlobalICBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadGlobalICBaseline(TypeofMode::kNotInside); +} + +void Builtins::Generate_LoadGlobalICInsideTypeofBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLoadGlobalICBaseline(TypeofMode::kInside); +} + +void Builtins::Generate_LookupGlobalICBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLookupGlobalICBaseline(TypeofMode::kNotInside); +} + +void Builtins::Generate_LookupGlobalICInsideTypeofBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLookupGlobalICBaseline(TypeofMode::kInside); +} + +void Builtins::Generate_LookupContextBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLookupContextBaseline(TypeofMode::kNotInside); +} + +void Builtins::Generate_LookupContextInsideTypeofBaseline( + compiler::CodeAssemblerState* state) { + AccessorAssembler assembler(state); + assembler.GenerateLookupContextBaseline(TypeofMode::kInside); +} TF_BUILTIN(DynamicCheckMaps, CodeStubAssembler) { auto map = Parameter(Descriptor::kMap); diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index 0c4131dba96a7f..274709b46a0510 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -7,6 +7,7 @@ #include "src/builtins/builtins-utils-gen.h" #include "src/builtins/builtins.h" #include "src/codegen/code-stub-assembler.h" +#include "src/codegen/interface-descriptors-inl.h" #include "src/codegen/macro-assembler.h" #include "src/execution/frame-constants.h" #include "src/heap/memory-chunk.h" @@ -172,11 +173,11 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { } TNode ShouldSkipFPRegs(TNode mode) { - return TaggedEqual(mode, SmiConstant(kDontSaveFPRegs)); + return TaggedEqual(mode, SmiConstant(SaveFPRegsMode::kIgnore)); } TNode ShouldEmitRememberSet(TNode remembered_set) { - return TaggedEqual(remembered_set, SmiConstant(EMIT_REMEMBERED_SET)); + return TaggedEqual(remembered_set, SmiConstant(RememberedSetAction::kEmit)); } template @@ -188,7 +189,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { BIND(&dont_save_fp); { CallCFunctionWithCallerSavedRegisters( - function, MachineTypeOf::value, kDontSaveFPRegs, + function, MachineTypeOf::value, SaveFPRegsMode::kIgnore, std::make_pair(MachineTypeOf::value, arg0), std::make_pair(MachineTypeOf::value, arg1)); Goto(next); @@ -197,7 +198,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { BIND(&save_fp); { CallCFunctionWithCallerSavedRegisters( - function, MachineTypeOf::value, kSaveFPRegs, + function, MachineTypeOf::value, SaveFPRegsMode::kSave, std::make_pair(MachineTypeOf::value, arg0), std::make_pair(MachineTypeOf::value, arg1)); Goto(next); @@ -213,7 +214,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { BIND(&dont_save_fp); { CallCFunctionWithCallerSavedRegisters( - function, MachineTypeOf::value, kDontSaveFPRegs, + function, MachineTypeOf::value, SaveFPRegsMode::kIgnore, std::make_pair(MachineTypeOf::value, arg0), std::make_pair(MachineTypeOf::value, arg1), std::make_pair(MachineTypeOf::value, arg2)); @@ -223,7 +224,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { BIND(&save_fp); { CallCFunctionWithCallerSavedRegisters( - function, MachineTypeOf::value, kSaveFPRegs, + function, MachineTypeOf::value, SaveFPRegsMode::kSave, std::make_pair(MachineTypeOf::value, arg0), std::make_pair(MachineTypeOf::value, arg1), std::make_pair(MachineTypeOf::value, arg2)); @@ -821,8 +822,9 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) { Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver)); const bool builtin_exit_frame = true; - TNode code = HeapConstant(CodeFactory::CEntry( - isolate(), 1, kDontSaveFPRegs, kArgvOnStack, builtin_exit_frame)); + TNode code = + HeapConstant(CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, + ArgvMode::kStack, builtin_exit_frame)); // Unconditionally push argc, target and new target as extra stack arguments. // They will be used by stack frame iterators when constructing stack trace. @@ -891,54 +893,54 @@ TF_BUILTIN(AbortCSAAssert, CodeStubAssembler) { void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvOnStack, false); + Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false); } void Builtins::Generate_CEntry_Return1_DontSaveFPRegs_ArgvOnStack_BuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvOnStack, true); + Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true); } void Builtins:: Generate_CEntry_Return1_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 1, kDontSaveFPRegs, kArgvInRegister, false); + Generate_CEntry(masm, 1, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false); } void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_NoBuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 1, kSaveFPRegs, kArgvOnStack, false); + Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, false); } void Builtins::Generate_CEntry_Return1_SaveFPRegs_ArgvOnStack_BuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 1, kSaveFPRegs, kArgvOnStack, true); + Generate_CEntry(masm, 1, SaveFPRegsMode::kSave, ArgvMode::kStack, true); } void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvOnStack, false); + Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, false); } void Builtins::Generate_CEntry_Return2_DontSaveFPRegs_ArgvOnStack_BuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvOnStack, true); + Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kStack, true); } void Builtins:: Generate_CEntry_Return2_DontSaveFPRegs_ArgvInRegister_NoBuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 2, kDontSaveFPRegs, kArgvInRegister, false); + Generate_CEntry(masm, 2, SaveFPRegsMode::kIgnore, ArgvMode::kRegister, false); } void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_NoBuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, false); + Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, false); } void Builtins::Generate_CEntry_Return2_SaveFPRegs_ArgvOnStack_BuiltinExit( MacroAssembler* masm) { - Generate_CEntry(masm, 2, kSaveFPRegs, kArgvOnStack, true); + Generate_CEntry(masm, 2, SaveFPRegsMode::kSave, ArgvMode::kStack, true); } #if !defined(V8_TARGET_ARCH_ARM) && !defined(V8_TARGET_ARCH_MIPS) @@ -956,7 +958,7 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) { // TODO(v8:11421): Remove #if once baseline compiler is ported to other // architectures. #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ - V8_TARGET_ARCH_ARM + V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) { EmitReturnBaseline(masm); } diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc index fe32a484a3e65c..6febc81c3a503f 100644 --- a/deps/v8/src/builtins/builtins-intl.cc +++ b/deps/v8/src/builtins/builtins-intl.cc @@ -668,6 +668,49 @@ BUILTIN(LocalePrototypeMinimize) { RETURN_RESULT_OR_FAILURE(isolate, JSLocale::Minimize(isolate, locale)); } +BUILTIN(LocalePrototypeCalendars) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.calendars"); + RETURN_RESULT_OR_FAILURE(isolate, JSLocale::Calendars(isolate, locale)); +} + +BUILTIN(LocalePrototypeCollations) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.collations"); + RETURN_RESULT_OR_FAILURE(isolate, JSLocale::Collations(isolate, locale)); +} + +BUILTIN(LocalePrototypeHourCycles) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.hourCycles"); + RETURN_RESULT_OR_FAILURE(isolate, JSLocale::HourCycles(isolate, locale)); +} + +BUILTIN(LocalePrototypeNumberingSystems) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.numberingSystems"); + RETURN_RESULT_OR_FAILURE(isolate, + JSLocale::NumberingSystems(isolate, locale)); +} + +BUILTIN(LocalePrototypeTextInfo) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.textInfo"); + RETURN_RESULT_OR_FAILURE(isolate, JSLocale::TextInfo(isolate, locale)); +} + +BUILTIN(LocalePrototypeTimeZones) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.timeZones"); + RETURN_RESULT_OR_FAILURE(isolate, JSLocale::TimeZones(isolate, locale)); +} + +BUILTIN(LocalePrototypeWeekInfo) { + HandleScope scope(isolate); + CHECK_RECEIVER(JSLocale, locale, "Intl.Locale.prototype.weekInfo"); + RETURN_RESULT_OR_FAILURE(isolate, JSLocale::WeekInfo(isolate, locale)); +} + BUILTIN(RelativeTimeFormatSupportedLocalesOf) { HandleScope scope(isolate); Handle locales = args.atOrUndefined(isolate, 1); diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc index 8af0bef95d2f41..4749ee094bce65 100644 --- a/deps/v8/src/builtins/builtins-lazy-gen.cc +++ b/deps/v8/src/builtins/builtins-lazy-gen.cc @@ -154,20 +154,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode function) { GotoIf(InstanceTypeEqual(sfi_data_type.value(), BASELINE_DATA_TYPE), &baseline); - // Finally, check for presence of an NCI cached Code object - if an entry - // possibly exists, call into runtime to query the cache. - TNode flags2 = - LoadObjectField(shared, SharedFunctionInfo::kFlags2Offset); - TNode may_have_cached_code = - IsSetWord32(flags2); - code = Select( - may_have_cached_code, - [=]() { - return CAST(CallRuntime(Runtime::kTryInstallNCICode, - Parameter(Descriptor::kContext), - function)); - }, - [=]() { return sfi_code; }); + code = sfi_code; Goto(&tailcall_code); BIND(&baseline); diff --git a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc index 1ec9e350f672d6..281e9234dc773c 100644 --- a/deps/v8/src/builtins/builtins-microtask-queue-gen.cc +++ b/deps/v8/src/builtins/builtins-microtask-queue-gen.cc @@ -473,8 +473,7 @@ void MicrotaskQueueBuiltinsAssembler::RunAllPromiseHooks( TNode promise_or_capability) { Label hook(this, Label::kDeferred), done_hook(this); TNode promiseHookFlags = PromiseHookFlags(); - Branch(IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate( - promiseHookFlags), &hook, &done_hook); + Branch(NeedsAnyPromiseHooks(promiseHookFlags), &hook, &done_hook); BIND(&hook); { switch (type) { diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index 23648efb98bd57..e59d2a00ac3b75 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -1014,6 +1014,12 @@ TF_BUILTIN(RegExpExecInternal, RegExpBuiltinsAssembler) { TNode RegExpBuiltinsAssembler::FlagsGetter(TNode context, TNode regexp, bool is_fastpath) { + TVARIABLE(String, result); + Label runtime(this, Label::kDeferred), done(this, &result); + if (is_fastpath) { + GotoIfForceSlowPath(&runtime); + } + Isolate* isolate = this->isolate(); const TNode int_one = IntPtrConstant(1); @@ -1110,7 +1116,7 @@ TNode RegExpBuiltinsAssembler::FlagsGetter(TNode context, // corresponding char for each set flag. { - const TNode result = AllocateSeqOneByteString(var_length.value()); + const TNode string = AllocateSeqOneByteString(var_length.value()); TVARIABLE(IntPtrT, var_offset, IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag)); @@ -1120,7 +1126,7 @@ TNode RegExpBuiltinsAssembler::FlagsGetter(TNode context, Label next(this); \ GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \ const TNode value = Int32Constant(CHAR); \ - StoreNoWriteBarrier(MachineRepresentation::kWord8, result, \ + StoreNoWriteBarrier(MachineRepresentation::kWord8, string, \ var_offset.value(), value); \ var_offset = IntPtrAdd(var_offset.value(), int_one); \ Goto(&next); \ @@ -1137,7 +1143,26 @@ TNode RegExpBuiltinsAssembler::FlagsGetter(TNode context, CASE_FOR_FLAG(JSRegExp::kSticky, 'y'); #undef CASE_FOR_FLAG - return result; + if (is_fastpath) { +#ifdef V8_ENABLE_FORCE_SLOW_PATH + result = string; + Goto(&done); + + BIND(&runtime); + { + result = + CAST(CallRuntime(Runtime::kRegExpStringFromFlags, context, regexp)); + Goto(&done); + } + + BIND(&done); + return result.value(); +#else + return string; +#endif + } else { + return string; + } } } diff --git a/deps/v8/src/builtins/builtins-trace.cc b/deps/v8/src/builtins/builtins-trace.cc index cf85ce9948b547..24baf59522b1d7 100644 --- a/deps/v8/src/builtins/builtins-trace.cc +++ b/deps/v8/src/builtins/builtins-trace.cc @@ -9,6 +9,7 @@ #include "src/json/json-stringifier.h" #include "src/logging/counters.h" #include "src/objects/objects-inl.h" +#include "src/tracing/traced-value.h" #if defined(V8_USE_PERFETTO) #include "protos/perfetto/trace/track_event/debug_annotation.pbzero.h" diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc index 65b1ab2f2b93e3..d333a61e3951b1 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.cc +++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc @@ -123,13 +123,26 @@ TF_BUILTIN(TypedArrayPrototypeByteLength, TypedArrayBuiltinsAssembler) { // Check if the {receiver} is actually a JSTypedArray. ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName); - // Default to zero if the {receiver}s buffer was detached. + TNode receiver_array = CAST(receiver); TNode receiver_buffer = - LoadJSArrayBufferViewBuffer(CAST(receiver)); - TNode byte_length = Select( - IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); }, - [=] { return LoadJSArrayBufferViewByteLength(CAST(receiver)); }); - Return(ChangeUintPtrToTagged(byte_length)); + LoadJSArrayBufferViewBuffer(receiver_array); + + Label variable_length(this), normal(this); + Branch(IsVariableLengthTypedArray(receiver_array), &variable_length, &normal); + BIND(&variable_length); + { + Return(ChangeUintPtrToTagged(LoadVariableLengthJSTypedArrayByteLength( + context, receiver_array, receiver_buffer))); + } + + BIND(&normal); + { + // Default to zero if the {receiver}s buffer was detached. + TNode byte_length = Select( + IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); }, + [=] { return LoadJSArrayBufferViewByteLength(receiver_array); }); + Return(ChangeUintPtrToTagged(byte_length)); + } } // ES6 #sec-get-%typedarray%.prototype.byteoffset @@ -159,13 +172,29 @@ TF_BUILTIN(TypedArrayPrototypeLength, TypedArrayBuiltinsAssembler) { // Check if the {receiver} is actually a JSTypedArray. ThrowIfNotInstanceType(context, receiver, JS_TYPED_ARRAY_TYPE, kMethodName); - // Default to zero if the {receiver}s buffer was detached. + TNode receiver_array = CAST(receiver); TNode receiver_buffer = - LoadJSArrayBufferViewBuffer(CAST(receiver)); - TNode length = Select( - IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); }, - [=] { return LoadJSTypedArrayLength(CAST(receiver)); }); - Return(ChangeUintPtrToTagged(length)); + LoadJSArrayBufferViewBuffer(receiver_array); + + Label variable_length(this), normal(this); + Branch(IsVariableLengthTypedArray(receiver_array), &variable_length, &normal); + BIND(&variable_length); + { + Label miss(this); + Return(ChangeUintPtrToTagged(LoadVariableLengthJSTypedArrayLength( + receiver_array, receiver_buffer, &miss))); + BIND(&miss); + Return(ChangeUintPtrToTagged(UintPtrConstant(0))); + } + + BIND(&normal); + { + // Default to zero if the {receiver}s buffer was detached. + TNode length = Select( + IsDetachedBuffer(receiver_buffer), [=] { return UintPtrConstant(0); }, + [=] { return LoadJSTypedArrayLength(receiver_array); }); + Return(ChangeUintPtrToTagged(length)); + } } TNode TypedArrayBuiltinsAssembler::IsUint8ElementsKind( @@ -322,17 +351,18 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind( int32_t elements_kinds[] = { #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) TYPE##_ELEMENTS, - TYPED_ARRAYS(TYPED_ARRAY_CASE) + TYPED_ARRAYS(TYPED_ARRAY_CASE) RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE) #undef TYPED_ARRAY_CASE }; #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) Label if_##type##array(this); TYPED_ARRAYS(TYPED_ARRAY_CASE) + RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE) #undef TYPED_ARRAY_CASE Label* elements_kind_labels[] = { #define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) &if_##type##array, - TYPED_ARRAYS(TYPED_ARRAY_CASE) + TYPED_ARRAYS(TYPED_ARRAY_CASE) RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE) #undef TYPED_ARRAY_CASE }; STATIC_ASSERT(arraysize(elements_kinds) == arraysize(elements_kind_labels)); @@ -350,6 +380,15 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind( TYPED_ARRAYS(TYPED_ARRAY_CASE) #undef TYPED_ARRAY_CASE +#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype) \ + BIND(&if_##type##array); \ + { \ + case_function(TYPE##_ELEMENTS, sizeof(ctype), 0); \ + Goto(&next); \ + } + RAB_GSAB_TYPED_ARRAYS(TYPED_ARRAY_CASE) +#undef TYPED_ARRAY_CASE + BIND(&if_unknown_type); Unreachable(); @@ -374,7 +413,7 @@ void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr( IntPtrSub(full_base, Signed(ChangeUint32ToWord(compressed_base))); // Add JSTypedArray::ExternalPointerCompensationForOnHeapArray() to offset. DCHECK_EQ( - isolate()->isolate_root(), + isolate()->cage_base(), JSTypedArray::ExternalPointerCompensationForOnHeapArray(isolate())); // See JSTypedArray::SetOnHeapDataPtr() for details. offset = Unsigned(IntPtrAdd(offset, ptr_compr_cage_base)); diff --git a/deps/v8/src/builtins/builtins-typed-array.cc b/deps/v8/src/builtins/builtins-typed-array.cc index fdadc7a554cbae..bb936e6e463ef9 100644 --- a/deps/v8/src/builtins/builtins-typed-array.cc +++ b/deps/v8/src/builtins/builtins-typed-array.cc @@ -154,7 +154,8 @@ BUILTIN(TypedArrayPrototypeFill) { DCHECK_LE(end, len); DCHECK_LE(count, len); - return ElementsAccessor::ForKind(kind)->Fill(array, obj_value, start, end); + RETURN_RESULT_OR_FAILURE(isolate, ElementsAccessor::ForKind(kind)->Fill( + array, obj_value, start, end)); } BUILTIN(TypedArrayPrototypeIncludes) { diff --git a/deps/v8/src/builtins/builtins-utils.h b/deps/v8/src/builtins/builtins-utils.h index e5f420a20de9a0..b9146ab6253b79 100644 --- a/deps/v8/src/builtins/builtins-utils.h +++ b/deps/v8/src/builtins/builtins-utils.h @@ -85,8 +85,7 @@ class BuiltinArguments : public JavaScriptArguments { V8_NOINLINE static Address Builtin_Impl_Stats_##name( \ int args_length, Address* args_object, Isolate* isolate) { \ BuiltinArguments args(args_length, args_object); \ - RuntimeCallTimerScope timer(isolate, \ - RuntimeCallCounterId::kBuiltin_##name); \ + RCS_SCOPE(isolate, RuntimeCallCounterId::kBuiltin_##name); \ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.runtime"), \ "V8.Builtin_" #name); \ return CONVERT_OBJECT(Builtin_Impl_##name(args, isolate)); \ diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc index 0704d8681bae43..eb9311d0c6236c 100644 --- a/deps/v8/src/builtins/builtins-wasm-gen.cc +++ b/deps/v8/src/builtins/builtins-wasm-gen.cc @@ -9,7 +9,6 @@ #include "src/codegen/interface-descriptors.h" #include "src/objects/objects-inl.h" #include "src/wasm/wasm-objects.h" -#include "src/wasm/wasm-opcodes.h" namespace v8 { namespace internal { diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq index 2bec3d86be062d..d7d2eb6aa6d9b0 100644 --- a/deps/v8/src/builtins/cast.tq +++ b/deps/v8/src/builtins/cast.tq @@ -329,6 +329,24 @@ Cast(o: Object): Number|TheHole labels CastError { } } +Cast(o: Object): Context|Zero|Undefined + labels CastError { + typeswitch (o) { + case (o: Context): { + return o; + } + case (o: Zero): { + return o; + } + case (o: Undefined): { + return o; + } + case (Object): { + goto CastError; + } + } +} + macro Cast(o: HeapObject): A labels CastError; diff --git a/deps/v8/src/builtins/constructor.tq b/deps/v8/src/builtins/constructor.tq index add6db03052a76..d929c7f485fce2 100644 --- a/deps/v8/src/builtins/constructor.tq +++ b/deps/v8/src/builtins/constructor.tq @@ -15,6 +15,8 @@ extern runtime CreateObjectLiteral( namespace constructor { +extern builtin FastNewClosure( + Context, SharedFunctionInfo, FeedbackCell): JSFunction; extern builtin FastNewObject(Context, JSFunction, JSReceiver): JSObject; extern enum AllocationSiteMode { @@ -42,6 +44,15 @@ extern macro ConstructorBuiltinsAssembler::CreateShallowObjectLiteral( extern macro ConstructorBuiltinsAssembler::CreateEmptyObjectLiteral(Context): JSObject; +extern macro LoadContextFromBaseline(): Context; + +builtin FastNewClosureBaseline( + sharedFunctionInfo: SharedFunctionInfo, + feedbackCell: FeedbackCell): JSFunction { + const context = LoadContextFromBaseline(); + tail FastNewClosure(context, sharedFunctionInfo, feedbackCell); +} + builtin FastNewFunctionContextEval(implicit context: Context)( scopeInfo: ScopeInfo, slots: uint32): Context { return FastNewFunctionContext(scopeInfo, slots, context, kEvalScope); diff --git a/deps/v8/src/builtins/conversion.tq b/deps/v8/src/builtins/conversion.tq index 5a2dccd068c817..636f49a024d813 100644 --- a/deps/v8/src/builtins/conversion.tq +++ b/deps/v8/src/builtins/conversion.tq @@ -45,11 +45,30 @@ builtin NumberToString(implicit context: Context)(input: Number): String { } // ES6 section 7.1.2 ToBoolean ( argument ) -builtin ToBoolean(implicit context: Context)(input: JSAny): Boolean { +builtin ToBoolean(input: JSAny): Boolean { BranchIfToBooleanIsTrue(input) otherwise return TrueConstant(), return FalseConstant(); } +struct ToBooleanForBaselineJumpResult { + value: JSAny; + is_to_boolean: Smi; +} +// ToBoolean for baseline code jumps, which +// a) returns the original value as the first return value, to avoid needing +// to save it in the caller, and +// b) returns the true/false value as a Smi, to make the baseline-side +// comparison cheaper. +builtin ToBooleanForBaselineJump(input: JSAny): ToBooleanForBaselineJumpResult { + try { + BranchIfToBooleanIsTrue(input) otherwise IsTrue, IsFalse; + } label IsTrue { + return ToBooleanForBaselineJumpResult{value: input, is_to_boolean: 1}; + } label IsFalse { + return ToBooleanForBaselineJumpResult{value: input, is_to_boolean: 0}; + } +} + transitioning builtin ToLength(implicit context: Context)(input: JSAny): Number { // We might need to loop once for ToNumber conversion. diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index 44b71bed915dd4..4993de4816f5ae 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -8,6 +8,7 @@ #include "src/base/bits-iterator.h" #include "src/base/iterator.h" #include "src/codegen/code-factory.h" +#include "src/codegen/interface-descriptors-inl.h" // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. #include "src/codegen/macro-assembler-inl.h" #include "src/codegen/register-configuration.h" @@ -116,7 +117,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // edx: new target // Reload context from the frame. __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset)); - __ InvokeFunction(edi, edx, eax, CALL_FUNCTION); + __ InvokeFunction(edi, edx, eax, InvokeType::kCall); // Restore context from the frame. __ mov(esi, Operand(ebp, ConstructFrameConstants::kContextOffset)); @@ -245,7 +246,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Restore and and call the constructor function. __ mov(edi, Operand(ebp, ConstructFrameConstants::kConstructorOffset)); - __ InvokeFunction(edi, edx, eax, CALL_FUNCTION); + __ InvokeFunction(edi, edx, eax, InvokeType::kCall); // ----------- S t a t e ------------- // -- eax: constructor result @@ -597,7 +598,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Store input value into generator object. __ mov(FieldOperand(edx, JSGeneratorObject::kInputOrDebugPosOffset), eax); __ RecordWriteField(edx, JSGeneratorObject::kInputOrDebugPosOffset, eax, ecx, - kDontSaveFPRegs); + SaveFPRegsMode::kIgnore); // Load suspended function and context. __ mov(edi, FieldOperand(edx, JSGeneratorObject::kFunctionOffset)); @@ -645,15 +646,12 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset)); { Label done_loop, loop; - __ mov(edi, ecx); - __ bind(&loop); - __ dec(edi); + __ dec(ecx); __ j(less, &done_loop); __ Push( - FieldOperand(ebx, edi, times_tagged_size, FixedArray::kHeaderSize)); + FieldOperand(ebx, ecx, times_tagged_size, FixedArray::kHeaderSize)); __ jmp(&loop); - __ bind(&done_loop); } @@ -740,7 +738,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, __ mov(FieldOperand(closure, JSFunction::kCodeOffset), optimized_code); __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below. __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, - kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + SaveFPRegsMode::kIgnore, RememberedSetAction::kOmit, + SmiCheck::kOmit); } static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, @@ -1458,7 +1457,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl( // and edi are used as scratch registers. Generate_InterpreterPushZeroAndArgsAndReturnAddress( masm, eax, ecx, edx, edi, - InterpreterPushArgsThenConstructDescriptor::kStackArgumentsCount, + InterpreterPushArgsThenConstructDescriptor::GetStackParameterCount(), &stack_overflow); // Call the appropriate constructor. eax and ecx already contain intended @@ -1591,7 +1590,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ jmp(kJavaScriptCallCodeStartRegister); } -void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ mov(kInterpreterBytecodeArrayRegister, Operand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp)); @@ -1636,7 +1635,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { __ Abort(AbortReason::kInvalidBytecodeAdvance); } -void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { Generate_InterpreterEnterBytecode(masm); } // static @@ -1666,7 +1665,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { __ mov(feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); __ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset)); - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, scratch); __ Assert(equal, AbortReason::kExpectedFeedbackVector); } @@ -1939,7 +1938,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // arguments to the receiver. __ bind(&no_arguments); { - __ Set(eax, 0); + __ Move(eax, 0); __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); } } @@ -2108,6 +2107,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { } // static +// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- @@ -2133,7 +2133,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, __ pop(kArgumentsList); __ PushReturnAddressFrom(edx); - if (masm->emit_debug_code()) { + if (FLAG_debug_code) { // Allow kArgumentsList to be a FixedArray, or a FixedDoubleArray if // kArgumentsLength == 0. Label ok, fail; @@ -2294,7 +2294,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ AllocateStackSpace(scratch); // Include return address and receiver. __ add(eax, Immediate(2)); - __ Set(current, 0); + __ Move(current, 0); __ jmp(&check); // Loop. __ bind(©); @@ -2443,7 +2443,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ movzx_w( ecx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset)); - __ InvokeFunctionCode(edi, no_reg, ecx, eax, JUMP_FUNCTION); + __ InvokeFunctionCode(edi, no_reg, ecx, eax, InvokeType::kJump); // The function is a "classConstructor", need to raise an exception. __ bind(&class_constructor); { @@ -2788,6 +2788,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { } void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { + __ mov(kContextRegister, + MemOperand(ebp, BaselineFrameConstants::kContextOffset)); return OnStackReplacement(masm, false); } @@ -2896,6 +2898,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { // TODO(v8:10701): Implement for this platform. __ Trap(); } + +void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { + // Only needed on x64. + __ Trap(); +} #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -2908,7 +2915,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // esi: current context (C callee-saved) // edi: JS function of the caller (C callee-saved) // - // If argv_mode == kArgvInRegister: + // If argv_mode == ArgvMode::kRegister: // ecx: pointer to the first argument STATIC_ASSERT(eax == kRuntimeCallArgCountRegister); @@ -2928,8 +2935,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, int arg_stack_space = 3; // Enter the exit frame that transitions from JavaScript to C++. - if (argv_mode == kArgvInRegister) { - DCHECK(save_doubles == kDontSaveFPRegs); + if (argv_mode == ArgvMode::kRegister) { + DCHECK(save_doubles == SaveFPRegsMode::kIgnore); DCHECK(!builtin_exit_frame); __ EnterApiExitFrame(arg_stack_space, edi); @@ -2938,7 +2945,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ mov(edi, eax); } else { __ EnterExitFrame( - arg_stack_space, save_doubles == kSaveFPRegs, + arg_stack_space, save_doubles == SaveFPRegsMode::kSave, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); } @@ -2985,7 +2992,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, } // Exit the JavaScript to C++ exit frame. - __ LeaveExitFrame(save_doubles == kSaveFPRegs, argv_mode == kArgvOnStack); + __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, + argv_mode == ArgvMode::kStack); __ ret(0); // Handling of exception. @@ -3148,7 +3156,7 @@ Operand ApiParameterOperand(int index) { // stores the pointer to the reserved slot into esi. void PrepareCallApiFunction(MacroAssembler* masm, int argc, Register scratch) { __ EnterApiExitFrame(argc, scratch); - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ mov(esi, Immediate(bit_cast(kZapValue))); } } @@ -3961,9 +3969,16 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, __ movsd(Operand(esi, dst_offset), xmm0); } + if (FLAG_debug_code) { + const int kTopMask = 0x3800; + __ push(eax); + __ fwait(); + __ fnstsw_ax(); + __ test(eax, Immediate(kTopMask)); + __ Assert(zero, AbortReason::kFpuTopIsNotZeroInDeoptimizer); + __ pop(eax); + } // Clear FPU all exceptions. - // TODO(ulan): Find out why the TOP register is not zero here in some cases, - // and check that the generated code never deoptimizes with unbalanced stack. __ fnclex(); // Mark the stack as not iterable for the CPU profiler which won't be able to diff --git a/deps/v8/src/builtins/ic-callable.tq b/deps/v8/src/builtins/ic-callable.tq index 85525c4c683a1e..dd29e8bf5e2705 100644 --- a/deps/v8/src/builtins/ic-callable.tq +++ b/deps/v8/src/builtins/ic-callable.tq @@ -6,6 +6,10 @@ namespace ic { namespace callable { extern macro IncrementCallCount(FeedbackVector, uintptr): void; +const kCallFeedbackContentFieldMask: constexpr int32 + generates 'FeedbackNexus::CallFeedbackContentField::kMask'; +const kCallFeedbackContentFieldShift: constexpr uint32 + generates 'FeedbackNexus::CallFeedbackContentField::kShift'; macro IsMonomorphic(feedback: MaybeObject, target: JSAny): bool { return IsWeakReferenceToObject(feedback, target); @@ -50,8 +54,42 @@ macro TransitionToMegamorphic(implicit context: Context)( ReportFeedbackUpdate(feedbackVector, slotId, 'Call:TransitionMegamorphic'); } +macro TaggedEqualPrototypeApplyFunction(implicit context: Context)( + target: JSAny): bool { + return TaggedEqual(target, GetPrototypeApplyFunction()); +} + +macro FeedbackValueIsReceiver(implicit context: Context)( + feedbackVector: FeedbackVector, slotId: uintptr): bool { + const callCount: intptr = SmiUntag(Cast(LoadFeedbackVectorSlot( + feedbackVector, slotId, kTaggedSize)) otherwise return false); + return (callCount & IntPtrConstant(kCallFeedbackContentFieldMask)) != + IntPtrConstant(0); +} + +macro SetCallFeedbackContent(implicit context: Context)( + feedbackVector: FeedbackVector, slotId: uintptr, + callFeedbackContent: constexpr CallFeedbackContent): void { + // Load the call count field from the feecback vector. + const callCount: intptr = SmiUntag(Cast(LoadFeedbackVectorSlot( + feedbackVector, slotId, kTaggedSize)) otherwise return ); + // The second lowest bits of the call count are used to state whether the + // feedback collected is a target or a receiver. Change that bit based on the + // callFeedbackContent input. + const callFeedbackContentFieldMask: intptr = + ~IntPtrConstant(kCallFeedbackContentFieldMask); + const newCount: intptr = (callCount & callFeedbackContentFieldMask) | + Convert(Signed( + %RawConstexprCast(callFeedbackContent) + << kCallFeedbackContentFieldShift)); + StoreFeedbackVectorSlot( + feedbackVector, slotId, SmiTag(newCount), SKIP_WRITE_BARRIER, + kTaggedSize); + ReportFeedbackUpdate(feedbackVector, slotId, 'Call:SetCallFeedbackContent'); +} + macro CollectCallFeedback( - maybeTarget: JSAny, context: Context, + maybeTarget: JSAny, maybeReceiver: Lazy, context: Context, maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void { // TODO(v8:9891): Remove this assert once all callers are ported to Torque. // This assert ensures correctness of maybeFeedbackVector's type which can @@ -72,7 +110,24 @@ macro CollectCallFeedback( // If cleared, we have a new chance to become monomorphic. const feedbackValue: HeapObject = - MaybeObjectToStrong(feedback) otherwise TryInitializeAsMonomorphic; + MaybeObjectToStrong(feedback) otherwise TryReinitializeAsMonomorphic; + + if (FeedbackValueIsReceiver(feedbackVector, slotId) && + TaggedEqualPrototypeApplyFunction(maybeTarget)) { + // If the Receiver is recorded and the target is + // Function.prototype.apply, check whether we can stay monomorphic based + // on the receiver. + if (IsMonomorphic(feedback, RunLazy(maybeReceiver))) { + return; + } else { + // If not, reinitialize the feedback with target. + SetCallFeedbackContent( + feedbackVector, slotId, CallFeedbackContent::kTarget); + TryInitializeAsMonomorphic(maybeTarget, feedbackVector, slotId) + otherwise TransitionToMegamorphic; + return; + } + } // Try transitioning to a feedback cell. // Check if {target}s feedback cell matches the {feedbackValue}. @@ -92,8 +147,20 @@ macro CollectCallFeedback( StoreWeakReferenceInFeedbackVector(feedbackVector, slotId, feedbackCell); ReportFeedbackUpdate(feedbackVector, slotId, 'Call:FeedbackVectorCell'); + } label TryReinitializeAsMonomorphic { + SetCallFeedbackContent( + feedbackVector, slotId, CallFeedbackContent::kTarget); + goto TryInitializeAsMonomorphic; } label TryInitializeAsMonomorphic { - TryInitializeAsMonomorphic(maybeTarget, feedbackVector, slotId) + let recordedFunction = maybeTarget; + if (TaggedEqualPrototypeApplyFunction(maybeTarget)) { + recordedFunction = RunLazy(maybeReceiver); + SetCallFeedbackContent( + feedbackVector, slotId, CallFeedbackContent::kReceiver); + } else { + assert(!FeedbackValueIsReceiver(feedbackVector, slotId)); + } + TryInitializeAsMonomorphic(recordedFunction, feedbackVector, slotId) otherwise TransitionToMegamorphic; } label TransitionToMegamorphic { TransitionToMegamorphic(feedbackVector, slotId); diff --git a/deps/v8/src/builtins/ic.tq b/deps/v8/src/builtins/ic.tq index 49d4e78fa55851..a9e92cf63ec4ae 100644 --- a/deps/v8/src/builtins/ic.tq +++ b/deps/v8/src/builtins/ic.tq @@ -8,10 +8,10 @@ namespace ic { @export macro CollectCallFeedback( - maybeTarget: JSAny, context: Context, + maybeTarget: JSAny, maybeReceiver: Lazy, context: Context, maybeFeedbackVector: Undefined|FeedbackVector, slotId: uintptr): void { callable::CollectCallFeedback( - maybeTarget, context, maybeFeedbackVector, slotId); + maybeTarget, maybeReceiver, context, maybeFeedbackVector, slotId); } @export @@ -51,10 +51,15 @@ macro IsUninitialized(feedback: MaybeObject): bool { } extern macro LoadFeedbackVectorSlot(FeedbackVector, uintptr): MaybeObject; +extern macro LoadFeedbackVectorSlot( + FeedbackVector, uintptr, constexpr int32): MaybeObject; extern operator '[]' macro LoadFeedbackVectorSlot( FeedbackVector, intptr): MaybeObject; extern macro StoreFeedbackVectorSlot( FeedbackVector, uintptr, MaybeObject): void; +extern macro StoreFeedbackVectorSlot( + FeedbackVector, uintptr, MaybeObject, constexpr WriteBarrierMode, + constexpr int32): void; extern macro StoreWeakReferenceInFeedbackVector( FeedbackVector, uintptr, HeapObject): MaybeObject; extern macro ReportFeedbackUpdate(FeedbackVector, uintptr, constexpr string); diff --git a/deps/v8/src/builtins/iterator.tq b/deps/v8/src/builtins/iterator.tq index 05993ea6d770e5..150e3d2cb57fe5 100644 --- a/deps/v8/src/builtins/iterator.tq +++ b/deps/v8/src/builtins/iterator.tq @@ -78,8 +78,8 @@ extern macro LoadContextFromBaseline(): Context; extern macro LoadFeedbackVectorFromBaseline(): FeedbackVector; transitioning builtin GetIteratorBaseline( - context: Context, receiver: JSAny, loadSlot: TaggedIndex, - callSlot: TaggedIndex): JSAny { + receiver: JSAny, loadSlot: TaggedIndex, callSlot: TaggedIndex): JSAny { + const context: Context = LoadContextFromBaseline(); const feedback: FeedbackVector = LoadFeedbackVectorFromBaseline(); const iteratorMethod: JSAny = LoadIC(context, receiver, IteratorSymbolConstant(), loadSlot, feedback); @@ -97,12 +97,18 @@ transitioning builtin CreateAsyncFromSyncIteratorBaseline(syncIterator: JSAny): return CreateAsyncFromSyncIterator(context, syncIterator); } +macro GetLazyReceiver(receiver: JSAny): JSAny { + return receiver; +} + transitioning builtin CallIteratorWithFeedback( context: Context, receiver: JSAny, iteratorMethod: JSAny, callSlot: Smi, feedback: Undefined|FeedbackVector): JSAny { // TODO(v8:10047): Use TaggedIndex here once TurboFan supports it. const callSlotUnTagged: uintptr = Unsigned(SmiUntag(callSlot)); - ic::CollectCallFeedback(iteratorMethod, context, feedback, callSlotUnTagged); + ic::CollectCallFeedback( + iteratorMethod, %MakeLazy('GetLazyReceiver', receiver), + context, feedback, callSlotUnTagged); const iteratorCallable: Callable = Cast(iteratorMethod) otherwise ThrowCalledNonCallable(iteratorMethod); return Call(context, iteratorCallable, receiver); diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index 1d8e80bdf87748..6ff2ed4b5c3bdb 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -6,6 +6,7 @@ #include "src/api/api-arguments.h" #include "src/codegen/code-factory.h" +#include "src/codegen/interface-descriptors-inl.h" #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frame-constants.h" @@ -102,7 +103,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // a0: number of arguments (untagged) // a1: constructor function // a3: new target - __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall); // Restore context from the frame. __ lw(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); @@ -226,7 +227,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Push(s0); // Call the function. - __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall); // ----------- S t a t e ------------- // -- v0: constructor result @@ -637,7 +638,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Store input value into generator object. __ sw(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset)); __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3, - kRAHasNotBeenSaved, kDontSaveFPRegs); + kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore); // Load suspended function and context. __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); @@ -761,8 +762,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, __ sw(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below. __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, - kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore, + RememberedSetAction::kOmit, SmiCheck::kOmit); } static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, @@ -1406,7 +1407,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ Jump(kJavaScriptCallCodeStartRegister); } -void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { // Advance the current bytecode offset stored within the given interpreter // stack frame. This simulates what all bytecode handlers do upon completion // of the underlying operation. @@ -1453,7 +1454,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { __ Abort(AbortReason::kInvalidBytecodeAdvance); } -void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { Generate_InterpreterEnterBytecode(masm); } @@ -1756,7 +1757,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // -- t0 : len (number of elements to push from args) // -- a3 : new.target (for [[Construct]]) // ----------------------------------- - if (masm->emit_debug_code()) { + if (FLAG_debug_code) { // Allow a2 to be a FixedArray, or a FixedDoubleArray if t0 == 0. Label ok, fail; __ AssertNotSmi(a2); @@ -2005,7 +2006,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ lhu(a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); - __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION); + __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump); // The function is a "classConstructor", need to raise an exception. __ bind(&class_constructor); @@ -2364,6 +2365,12 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } + +void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { + // Only needed on x64. + __ Trap(); +} + #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -2376,10 +2383,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // sp: stack pointer (restored as callee's sp after C call) // cp: current context (C callee-saved) // - // If argv_mode == kArgvInRegister: + // If argv_mode == ArgvMode::kRegister: // a2: pointer to the first argument - if (argv_mode == kArgvInRegister) { + if (argv_mode == ArgvMode::kRegister) { // Move argv into the correct register. __ mov(s1, a2); } else { @@ -2391,7 +2398,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Enter the exit frame that transitions from JavaScript to C++. FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame( - save_doubles == kSaveFPRegs, 0, + save_doubles == SaveFPRegsMode::kSave, 0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); // s0: number of arguments including receiver (C callee-saved) @@ -2440,12 +2447,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // v0:v1: result // sp: stack pointer // fp: frame pointer - Register argc = argv_mode == kArgvInRegister + Register argc = argv_mode == ArgvMode::kRegister // We don't want to pop arguments so set argc to no_reg. ? no_reg // s0: still holds argc (callee-saved). : s0; - __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN); + __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN); // Handling of exception. __ bind(&exception_returned); @@ -2698,7 +2705,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, // No more valid handles (the result handle was the last one). Restore // previous handle scope. __ sw(s0, MemOperand(s5, kNextOffset)); - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ lw(a1, MemOperand(s5, kLevelOffset)); __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2)); diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index c029188f146869..9d0156e9278098 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -6,6 +6,7 @@ #include "src/api/api-arguments.h" #include "src/codegen/code-factory.h" +#include "src/codegen/interface-descriptors-inl.h" #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frame-constants.h" @@ -102,7 +103,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // a0: number of arguments (untagged) // a1: constructor function // a3: new target - __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall); // Restore context from the frame. __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); @@ -227,7 +228,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Push(a6); // Call the function. - __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall); // ----------- S t a t e ------------- // -- v0: constructor result @@ -324,7 +325,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Store input value into generator object. __ Sd(v0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset)); __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, v0, a3, - kRAHasNotBeenSaved, kDontSaveFPRegs); + kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore); // Load suspended function and context. __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); @@ -777,8 +778,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, __ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below. __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, - kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore, + RememberedSetAction::kOmit, SmiCheck::kOmit); } static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, @@ -1424,7 +1425,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ Jump(kJavaScriptCallCodeStartRegister); } -void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { // Advance the current bytecode offset stored within the given interpreter // stack frame. This simulates what all bytecode handlers do upon completion // of the underlying operation. @@ -1471,7 +1472,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { __ Abort(AbortReason::kInvalidBytecodeAdvance); } -void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { Generate_InterpreterEnterBytecode(masm); } @@ -1815,7 +1816,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // -- a4 : len (number of elements to push from args) // -- a3 : new.target (for [[Construct]]) // ----------------------------------- - if (masm->emit_debug_code()) { + if (FLAG_debug_code) { // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0. Label ok, fail; __ AssertNotSmi(a2); @@ -2073,7 +2074,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Lhu(a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); - __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION); + __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump); // The function is a "classConstructor", need to raise an exception. __ bind(&class_constructor); @@ -2395,7 +2396,11 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ Lbu(a1, MemOperand(a1)); __ Branch(&push_doubles, le, a1, Operand(zero_reg)); // Save vector registers. - __ MultiPushMSA(fp_regs); + { + CpuFeatureScope msa_scope( + masm, MIPS_SIMD, CpuFeatureScope::CheckPolicy::kDontCheckSupported); + __ MultiPushMSA(fp_regs); + } __ Branch(&simd_pushed); __ bind(&push_doubles); __ MultiPushFPU(fp_regs); @@ -2419,7 +2424,11 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { __ Lbu(a1, MemOperand(a1)); __ Branch(&pop_doubles, le, a1, Operand(zero_reg)); // Pop vector registers. - __ MultiPopMSA(fp_regs); + { + CpuFeatureScope msa_scope( + masm, MIPS_SIMD, CpuFeatureScope::CheckPolicy::kDontCheckSupported); + __ MultiPopMSA(fp_regs); + } __ Branch(&simd_popped); __ bind(&pop_doubles); __ Daddu(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize); @@ -2456,6 +2465,12 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } + +void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { + // Only needed on x64. + __ Trap(); +} + #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -2468,10 +2483,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // sp: stack pointer (restored as callee's sp after C call) // cp: current context (C callee-saved) // - // If argv_mode == kArgvInRegister: + // If argv_mode == ArgvMode::kRegister: // a2: pointer to the first argument - if (argv_mode == kArgvInRegister) { + if (argv_mode == ArgvMode::kRegister) { // Move argv into the correct register. __ mov(s1, a2); } else { @@ -2483,7 +2498,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Enter the exit frame that transitions from JavaScript to C++. FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame( - save_doubles == kSaveFPRegs, 0, + save_doubles == SaveFPRegsMode::kSave, 0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); // s0: number of arguments including receiver (C callee-saved) @@ -2532,12 +2547,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // v0:v1: result // sp: stack pointer // fp: frame pointer - Register argc = argv_mode == kArgvInRegister + Register argc = argv_mode == ArgvMode::kRegister // We don't want to pop arguments so set argc to no_reg. ? no_reg // s0: still holds argc (callee-saved). : s0; - __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN); + __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN); // Handling of exception. __ bind(&exception_returned); @@ -2794,7 +2809,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, // No more valid handles (the result handle was the last one). Restore // previous handle scope. __ Sd(s0, MemOperand(s5, kNextOffset)); - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ Lw(a1, MemOperand(s5, kLevelOffset)); __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2)); diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index bc467c9ff9ff20..35d817d3a26e9d 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -6,6 +6,7 @@ #include "src/api/api-arguments.h" #include "src/codegen/code-factory.h" +#include "src/codegen/interface-descriptors-inl.h" // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. #include "src/codegen/macro-assembler-inl.h" #include "src/codegen/register-configuration.h" @@ -113,13 +114,13 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // r6: new target { ConstantPoolUnavailableScope constant_pool_unavailable(masm); - __ InvokeFunctionWithNewTarget(r4, r6, r3, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(r4, r6, r3, InvokeType::kCall); } // Restore context from the frame. - __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); + __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); // Restore smi-tagged arguments count from the frame. - __ LoadP(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); + __ LoadU64(scratch, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); // Leave construct frame. } @@ -229,8 +230,8 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // ----------------------------------- // Restore constructor function and argument count. - __ LoadP(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset)); - __ LoadP(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); + __ LoadU64(r4, MemOperand(fp, ConstructFrameConstants::kConstructorOffset)); + __ LoadU64(r3, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); __ SmiUntag(r3); Label stack_overflow; @@ -245,7 +246,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Call the function. { ConstantPoolUnavailableScope constant_pool_unavailable(masm); - __ InvokeFunctionWithNewTarget(r4, r6, r3, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(r4, r6, r3, InvokeType::kCall); } // ----------- S t a t e ------------- @@ -275,12 +276,12 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { // Throw away the result of the constructor invocation and use the // on-stack receiver as the result. __ bind(&use_receiver); - __ LoadP(r3, MemOperand(sp)); + __ LoadU64(r3, MemOperand(sp)); __ JumpIfRoot(r3, RootIndex::kTheHoleValue, &do_throw); __ bind(&leave_and_return); // Restore smi-tagged arguments count from the frame. - __ LoadP(r4, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); + __ LoadU64(r4, MemOperand(fp, ConstructFrameConstants::kLengthOffset)); // Leave construct frame. __ LeaveFrame(StackFrame::CONSTRUCT); @@ -305,13 +306,13 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ bind(&do_throw); // Restore the context from the frame. - __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); + __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject); __ bkpt(0); __ bind(&stack_overflow); // Restore the context from the frame. - __ LoadP(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); + __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); __ CallRuntime(Runtime::kThrowStackOverflow); // Unreachable code. __ bkpt(0); @@ -347,7 +348,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ StoreTaggedField( r3, FieldMemOperand(r4, JSGeneratorObject::kInputOrDebugPosOffset), r0); __ RecordWriteField(r4, JSGeneratorObject::kInputOrDebugPosOffset, r3, r6, - kLRHasNotBeenSaved, kDontSaveFPRegs); + kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore); // Load suspended function and context. __ LoadTaggedPointerField( @@ -373,7 +374,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { ExternalReference::debug_suspended_generator_address(masm->isolate()); __ Move(scratch, debug_suspended_generator); - __ LoadP(scratch, MemOperand(scratch)); + __ LoadU64(scratch, MemOperand(scratch)); __ cmp(scratch, r4); __ beq(&prepare_step_in_suspended_generator); __ bind(&stepping_prepared); @@ -402,19 +403,16 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { FieldMemOperand(r4, JSGeneratorObject::kParametersAndRegistersOffset)); { Label done_loop, loop; - __ mr(r9, r6); - __ bind(&loop); - __ subi(r9, r9, Operand(1)); - __ cmpi(r9, Operand::Zero()); + __ subi(r6, r6, Operand(1)); + __ cmpi(r6, Operand::Zero()); __ blt(&done_loop); - __ ShiftLeftImm(r10, r9, Operand(kTaggedSizeLog2)); + __ ShiftLeftImm(r10, r6, Operand(kTaggedSizeLog2)); __ add(scratch, r5, r10); __ LoadAnyTaggedField(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize)); __ Push(scratch); __ b(&loop); - __ bind(&done_loop); // Push receiver. @@ -554,7 +552,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, // Save copies of the top frame descriptor on the stack. __ Move(r3, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, masm->isolate())); - __ LoadP(r0, MemOperand(r3)); + __ LoadU64(r0, MemOperand(r3)); __ push(r0); // Clear c_entry_fp, now we've pushed its previous value to the stack. @@ -574,7 +572,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, ExternalReference::Create(IsolateAddressId::kJSEntrySPAddress, masm->isolate()); __ Move(r3, js_entry_sp); - __ LoadP(scratch, MemOperand(r3)); + __ LoadU64(scratch, MemOperand(r3)); __ cmpi(scratch, Operand::Zero()); __ bne(&non_outermost_js); __ StoreP(fp, MemOperand(r3)); @@ -663,7 +661,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, __ MultiPop(kCalleeSaved); // Return - __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize)); + __ LoadU64(r0, MemOperand(sp, kStackFrameLRSlot * kSystemPointerSize)); __ mtlr(r0); __ blr(); } @@ -703,7 +701,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, ExternalReference context_address = ExternalReference::Create( IsolateAddressId::kContextAddress, masm->isolate()); __ Move(cp, context_address); - __ LoadP(cp, MemOperand(cp)); + __ LoadU64(cp, MemOperand(cp)); // Push the function. __ Push(r5); @@ -734,7 +732,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, __ mtctr(r7); __ bind(&loop); __ LoadPU(r9, MemOperand(r8, -kSystemPointerSize)); // read next parameter - __ LoadP(r0, MemOperand(r9)); // dereference handle + __ LoadU64(r0, MemOperand(r9)); // dereference handle __ push(r0); // push parameter __ bdnz(&loop); __ bind(&done); @@ -800,23 +798,23 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, FieldMemOperand(closure, JSFunction::kCodeOffset), r0); __ mr(scratch1, optimized_code); // Write barrier clobbers scratch1 below. __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, - kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore, + RememberedSetAction::kOmit, SmiCheck::kOmit); } static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, Register scratch2) { Register params_size = scratch1; // Get the size of the formal parameters + receiver (in bytes). - __ LoadP(params_size, - MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ LoadU64(params_size, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); __ lwz(params_size, FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset)); Register actual_params_size = scratch2; // Compute the size of the actual parameters + receiver (in bytes). - __ LoadP(actual_params_size, - MemOperand(fp, StandardFrameConstants::kArgCOffset)); + __ LoadU64(actual_params_size, + MemOperand(fp, StandardFrameConstants::kArgCOffset)); __ ShiftLeftImm(actual_params_size, actual_params_size, Operand(kSystemPointerSizeLog2)); __ addi(actual_params_size, actual_params_size, Operand(kSystemPointerSize)); @@ -869,7 +867,7 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, __ LoadTaggedPointerField( scratch, FieldMemOperand(optimized_code_entry, Code::kCodeDataContainerOffset)); - __ LoadWordArith( + __ LoadS32( scratch, FieldMemOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset)); __ TestBit(scratch, Code::kMarkedForDeoptimizationBit, r0); @@ -1082,9 +1080,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { Register optimization_state = r7; // Read off the optimization state in the feedback vector. - __ LoadWord(optimization_state, - FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset), - r0); + __ LoadU32(optimization_state, + FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset), + r0); // Check if the optimized code slot is not empty or has a optimization marker. Label has_optimized_code_or_marker; @@ -1097,7 +1095,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ bind(¬_optimized); // Increment invocation count for the function. - __ LoadWord( + __ LoadU32( r8, FieldMemOperand(feedback_vector, FeedbackVector::kInvocationCountOffset), r0); @@ -1165,10 +1163,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // If the bytecode array has a valid incoming new target or generator object // register, initialize it with incoming value which was passed in r6. Label no_incoming_new_target_or_generator_register; - __ LoadWordArith( - r8, FieldMemOperand( - kInterpreterBytecodeArrayRegister, - BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); + __ LoadS32(r8, + FieldMemOperand( + kInterpreterBytecodeArrayRegister, + BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset)); __ cmpi(r8, Operand::Zero()); __ beq(&no_incoming_new_target_or_generator_register); __ ShiftLeftImm(r8, r8, Operand(kSystemPointerSizeLog2)); @@ -1205,10 +1203,10 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // or the interpreter tail calling a builtin and then a dispatch. // Get bytecode array and bytecode offset from the stack frame. - __ LoadP(kInterpreterBytecodeArrayRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ LoadP(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + __ LoadU64(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ LoadU64(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister); // Either return, or advance to the next bytecode and dispatch. @@ -1238,8 +1236,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { // After the call, restore the bytecode array, bytecode offset and accumulator // registers again. Also, restore the bytecode offset in the stack to its // previous value. - __ LoadP(kInterpreterBytecodeArrayRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ LoadU64(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); __ mov(kInterpreterBytecodeOffsetRegister, Operand(BytecodeArray::kHeaderSize - kHeapObjectTag)); __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); @@ -1313,7 +1311,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl( // Pass the spread in the register r3. // r2 already points to the penultimate argument, the spread // lies in the next interpreter register. - __ LoadP(r5, MemOperand(r5, -kSystemPointerSize)); + __ LoadU64(r5, MemOperand(r5, -kSystemPointerSize)); } // Call the target. @@ -1364,7 +1362,7 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl( // r4 already points to the penultimate argument, the spread // lies in the next interpreter register. __ subi(r7, r7, Operand(kSystemPointerSize)); - __ LoadP(r5, MemOperand(r7)); + __ LoadU64(r5, MemOperand(r7)); } else { __ AssertUndefinedOrAllocationSite(r5, r8); } @@ -1406,7 +1404,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { // custom copy of the interpreter entry trampoline for profiling. If so, // get the custom trampoline, otherwise grab the entry address of the global // trampoline. - __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + __ LoadU64(r5, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); __ LoadTaggedPointerField( r5, FieldMemOperand(r5, JSFunction::kSharedFunctionInfoOffset)); __ LoadTaggedPointerField( @@ -1425,7 +1423,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ Move(r5, ExternalReference:: address_of_interpreter_entry_trampoline_instruction_start( masm->isolate())); - __ LoadP(r5, MemOperand(r5)); + __ LoadU64(r5, MemOperand(r5)); __ bind(&trampoline_loaded); __ addi(r0, r5, Operand(interpreter_entry_return_pc_offset.value())); @@ -1437,8 +1435,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { ExternalReference::interpreter_dispatch_table_address(masm->isolate())); // Get the bytecode array pointer from the frame. - __ LoadP(kInterpreterBytecodeArrayRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ LoadU64(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); if (FLAG_debug_code) { // Check function data field is actually a BytecodeArray object. @@ -1453,8 +1451,8 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { } // Get the target bytecode offset from the frame. - __ LoadP(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + __ LoadU64(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister); if (FLAG_debug_code) { @@ -1478,12 +1476,12 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ Jump(kJavaScriptCallCodeStartRegister); } -void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. - __ LoadP(kInterpreterBytecodeArrayRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); - __ LoadP(kInterpreterBytecodeOffsetRegister, - MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + __ LoadU64(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + __ LoadU64(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); __ SmiUntag(kInterpreterBytecodeOffsetRegister); Label enter_bytecode, function_entry_bytecode; @@ -1524,7 +1522,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { __ Abort(AbortReason::kInvalidBytecodeAdvance); } -void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { Generate_InterpreterEnterBytecode(masm); } @@ -1567,7 +1565,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, __ subi(r3, r3, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); } - __ LoadP( + __ LoadU64( fp, MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); // Load builtin index (stored as a Smi) and use it to get the builtin start @@ -1609,7 +1607,7 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { } DCHECK_EQ(kInterpreterAccumulatorRegister.code(), r3.code()); - __ LoadP(r3, MemOperand(sp, 0 * kSystemPointerSize)); + __ LoadU64(r3, MemOperand(sp, 0 * kSystemPointerSize)); __ addi(sp, sp, Operand(1 * kSystemPointerSize)); __ Ret(); } @@ -1677,13 +1675,13 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { __ mr(r5, r8); Label done; - __ LoadP(r4, MemOperand(sp)); // receiver + __ LoadU64(r4, MemOperand(sp)); // receiver __ cmpi(r3, Operand(1)); __ blt(&done); - __ LoadP(r8, MemOperand(sp, kSystemPointerSize)); // thisArg + __ LoadU64(r8, MemOperand(sp, kSystemPointerSize)); // thisArg __ cmpi(r3, Operand(2)); __ blt(&done); - __ LoadP(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray + __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ bind(&done); __ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2)); @@ -1762,13 +1760,13 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) { Label done; __ cmpi(r3, Operand(1)); __ blt(&done); - __ LoadP(r4, MemOperand(sp, kSystemPointerSize)); // thisArg + __ LoadU64(r4, MemOperand(sp, kSystemPointerSize)); // thisArg __ cmpi(r3, Operand(2)); __ blt(&done); - __ LoadP(r8, MemOperand(sp, 2 * kSystemPointerSize)); // argArray + __ LoadU64(r8, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ cmpi(r3, Operand(3)); __ blt(&done); - __ LoadP(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray + __ LoadU64(r5, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); __ ShiftLeftImm(ip, r3, Operand(kSystemPointerSizeLog2)); @@ -1812,14 +1810,14 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { __ mr(r7, r4); __ cmpi(r3, Operand(1)); __ blt(&done); - __ LoadP(r4, MemOperand(sp, kSystemPointerSize)); // thisArg + __ LoadU64(r4, MemOperand(sp, kSystemPointerSize)); // thisArg __ mr(r6, r4); __ cmpi(r3, Operand(2)); __ blt(&done); - __ LoadP(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray + __ LoadU64(r5, MemOperand(sp, 2 * kSystemPointerSize)); // argArray __ cmpi(r3, Operand(3)); __ blt(&done); - __ LoadP(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray + __ LoadU64(r6, MemOperand(sp, 3 * kSystemPointerSize)); // argArray __ bind(&done); __ ShiftLeftImm(r0, r3, Operand(kSystemPointerSizeLog2)); __ add(sp, sp, r0); @@ -1847,6 +1845,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { } // static +// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- @@ -1859,7 +1858,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Register scratch = ip; - if (masm->emit_debug_code()) { + if (FLAG_debug_code) { // Allow r5 to be a FixedArray, or a FixedDoubleArray if r7 == 0. Label ok, fail; __ AssertNotSmi(r5); @@ -1963,7 +1962,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, } Label stack_done, stack_overflow; - __ LoadP(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset)); + __ LoadU64(r8, MemOperand(fp, StandardFrameConstants::kArgCOffset)); __ sub(r8, r8, r5, LeaveOE, SetRC); __ ble(&stack_done, cr0); { @@ -2125,7 +2124,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ LoadHalfWord( r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset)); - __ InvokeFunctionCode(r4, no_reg, r5, r3, JUMP_FUNCTION); + __ InvokeFunctionCode(r4, no_reg, r5, r3, InvokeType::kJump); // The function is a "classConstructor", need to raise an exception. __ bind(&class_constructor); @@ -2511,6 +2510,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { // TODO(v8:10701): Implement for this platform. __ Trap(); } + +void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { + // Only needed on x64. + __ Trap(); +} #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -2523,12 +2527,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // sp: stack pointer (restored as callee's sp after C call) // cp: current context (C callee-saved) // - // If argv_mode == kArgvInRegister: + // If argv_mode == ArgvMode::kRegister: // r5: pointer to the first argument __ mr(r15, r4); - if (argv_mode == kArgvInRegister) { + if (argv_mode == ArgvMode::kRegister) { // Move argv into the correct register. __ mr(r4, r5); } else { @@ -2552,7 +2556,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, } __ EnterExitFrame( - save_doubles, arg_stack_space, + save_doubles == SaveFPRegsMode::kSave, arg_stack_space, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); // Store a copy of argc in callee-saved registers for later. @@ -2584,8 +2588,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // If return value is on the stack, pop it to registers. if (needs_return_buffer) { - __ LoadP(r4, MemOperand(r3, kSystemPointerSize)); - __ LoadP(r3, MemOperand(r3)); + __ LoadU64(r4, MemOperand(r3, kSystemPointerSize)); + __ LoadU64(r3, MemOperand(r3)); } // Check result for exception sentinel. @@ -2601,7 +2605,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, IsolateAddressId::kPendingExceptionAddress, masm->isolate()); __ Move(r6, pending_exception_address); - __ LoadP(r6, MemOperand(r6)); + __ LoadU64(r6, MemOperand(r6)); __ CompareRoot(r6, RootIndex::kTheHoleValue); // Cannot use check here as it attempts to generate call into runtime. __ beq(&okay); @@ -2613,12 +2617,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // r3:r4: result // sp: stack pointer // fp: frame pointer - Register argc = argv_mode == kArgvInRegister + Register argc = argv_mode == ArgvMode::kRegister // We don't want to pop arguments so set argc to no_reg. ? no_reg // r14: still holds argc (callee-saved). : r14; - __ LeaveExitFrame(save_doubles, argc); + __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc); __ blr(); // Handling of exception. @@ -2653,11 +2657,11 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Retrieve the handler context, SP and FP. __ Move(cp, pending_handler_context_address); - __ LoadP(cp, MemOperand(cp)); + __ LoadU64(cp, MemOperand(cp)); __ Move(sp, pending_handler_sp_address); - __ LoadP(sp, MemOperand(sp)); + __ LoadU64(sp, MemOperand(sp)); __ Move(fp, pending_handler_fp_address); - __ LoadP(fp, MemOperand(fp)); + __ LoadU64(fp, MemOperand(fp)); // If the handler is a JS frame, restore the context to the frame. Note that // the context will be set to (cp == 0) for non-JS frames. @@ -2685,10 +2689,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Compute the handler entry address and jump to it. ConstantPoolUnavailableScope constant_pool_unavailable(masm); __ Move(ip, pending_handler_entrypoint_address); - __ LoadP(ip, MemOperand(ip)); + __ LoadU64(ip, MemOperand(ip)); if (FLAG_enable_embedded_constant_pool) { __ Move(kConstantPoolRegister, pending_handler_constant_pool_address); - __ LoadP(kConstantPoolRegister, MemOperand(kConstantPoolRegister)); + __ LoadU64(kConstantPoolRegister, MemOperand(kConstantPoolRegister)); } __ Jump(ip); } @@ -2873,8 +2877,8 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, // r15 - next_address->kLimitOffset // r16 - next_address->kLevelOffset __ Move(r17, next_address); - __ LoadP(r14, MemOperand(r17, kNextOffset)); - __ LoadP(r15, MemOperand(r17, kLimitOffset)); + __ LoadU64(r14, MemOperand(r17, kNextOffset)); + __ LoadU64(r15, MemOperand(r17, kLimitOffset)); __ lwz(r16, MemOperand(r17, kLevelOffset)); __ addi(r16, r16, Operand(1)); __ stw(r16, MemOperand(r17, kLevelOffset)); @@ -2887,19 +2891,19 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, Label return_value_loaded; // load value from ReturnValue - __ LoadP(r3, return_value_operand); + __ LoadU64(r3, return_value_operand); __ bind(&return_value_loaded); // No more valid handles (the result handle was the last one). Restore // previous handle scope. __ StoreP(r14, MemOperand(r17, kNextOffset)); - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ lwz(r4, MemOperand(r17, kLevelOffset)); __ cmp(r4, r16); __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall); } __ subi(r16, r16, Operand(1)); __ stw(r16, MemOperand(r17, kLevelOffset)); - __ LoadP(r0, MemOperand(r17, kLimitOffset)); + __ LoadU64(r0, MemOperand(r17, kLimitOffset)); __ cmp(r15, r0); __ bne(&delete_allocated_handles); @@ -2907,7 +2911,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, __ bind(&leave_exit_frame); // LeaveExitFrame expects unwind space to be in a register. if (stack_space_operand != nullptr) { - __ LoadP(r14, *stack_space_operand); + __ LoadU64(r14, *stack_space_operand); } else { __ mov(r14, Operand(stack_space)); } @@ -2916,7 +2920,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, // Check if the function scheduled an exception. __ LoadRoot(r14, RootIndex::kTheHoleValue); __ Move(r15, ExternalReference::scheduled_exception_address(isolate)); - __ LoadP(r15, MemOperand(r15)); + __ LoadU64(r15, MemOperand(r15)); __ cmp(r14, r15); __ bne(&promote_scheduled_exception); @@ -3151,8 +3155,8 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ LoadTaggedPointerField( scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset)); - __ LoadP(api_function_address, - FieldMemOperand(scratch, Foreign::kForeignAddressOffset)); + __ LoadU64(api_function_address, + FieldMemOperand(scratch, Foreign::kForeignAddressOffset)); // +3 is to skip prolog, return address and name handle. MemOperand return_value_operand( @@ -3174,13 +3178,14 @@ void Builtins::Generate_DirectCEntry(MacroAssembler* masm) { if (ABI_USES_FUNCTION_DESCRIPTORS) { // AIX/PPC64BE Linux use a function descriptor; - __ LoadP(ToRegister(ABI_TOC_REGISTER), - MemOperand(temp2, kSystemPointerSize)); - __ LoadP(temp2, MemOperand(temp2, 0)); // Instruction address + __ LoadU64(ToRegister(ABI_TOC_REGISTER), + MemOperand(temp2, kSystemPointerSize)); + __ LoadU64(temp2, MemOperand(temp2, 0)); // Instruction address } __ Call(temp2); // Call the C++ function. - __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize)); + __ LoadU64(r0, + MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize)); __ mtlr(r0); __ blr(); } @@ -3230,9 +3235,6 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, const int kSavedRegistersAreaSize = (kNumberOfRegisters * kSystemPointerSize) + kDoubleRegsSize; - // Get the bailout id is passed as r29 by the caller. - __ mr(r5, r29); - __ mov(r5, Operand(Deoptimizer::kFixedExitSizeMarker)); // Get the address of the location in the code object (r6) (return // address for lazy deoptimization) and compute the fp-to-sp delta in @@ -3246,9 +3248,10 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, __ PrepareCallCFunction(6, r8); __ li(r3, Operand::Zero()); Label context_check; - __ LoadP(r4, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); + __ LoadU64(r4, + MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset)); __ JumpIfSmi(r4, &context_check); - __ LoadP(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + __ LoadU64(r3, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); __ bind(&context_check); __ li(r4, Operand(static_cast(deopt_kind))); // r5: bailout id already loaded. @@ -3263,14 +3266,14 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, // Preserve "deoptimizer" object in register r3 and get the input // frame descriptor pointer to r4 (deoptimizer->input_); - __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset())); + __ LoadU64(r4, MemOperand(r3, Deoptimizer::input_offset())); // Copy core registers into FrameDescription::registers_[kNumRegisters]. DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters); for (int i = 0; i < kNumberOfRegisters; i++) { int offset = (i * kSystemPointerSize) + FrameDescription::registers_offset(); - __ LoadP(r5, MemOperand(sp, i * kSystemPointerSize)); + __ LoadU64(r5, MemOperand(sp, i * kSystemPointerSize)); __ StoreP(r5, MemOperand(r4, offset)); } @@ -3302,7 +3305,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, // Compute a pointer to the unwinding limit in register r5; that is // the first stack slot not part of the input frame. - __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset())); + __ LoadU64(r5, MemOperand(r4, FrameDescription::frame_size_offset())); __ add(r5, r5, sp); // Unwind the stack down to - but not including - the unwinding @@ -3331,28 +3334,29 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, } __ pop(r3); // Restore deoptimizer object (class Deoptimizer). - __ LoadP(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset())); + __ LoadU64(sp, MemOperand(r3, Deoptimizer::caller_frame_top_offset())); // Replace the current (input) frame with the output frames. Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; // Outer loop state: r7 = current "FrameDescription** output_", // r4 = one past the last FrameDescription**. __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset())); - __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_. + __ LoadU64(r7, + MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_. __ ShiftLeftImm(r4, r4, Operand(kSystemPointerSizeLog2)); __ add(r4, r7, r4); __ b(&outer_loop_header); __ bind(&outer_push_loop); // Inner loop state: r5 = current FrameDescription*, r6 = loop index. - __ LoadP(r5, MemOperand(r7, 0)); // output_[ix] - __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset())); + __ LoadU64(r5, MemOperand(r7, 0)); // output_[ix] + __ LoadU64(r6, MemOperand(r5, FrameDescription::frame_size_offset())); __ b(&inner_loop_header); __ bind(&inner_push_loop); __ addi(r6, r6, Operand(-sizeof(intptr_t))); __ add(r9, r5, r6); - __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset())); + __ LoadU64(r9, MemOperand(r9, FrameDescription::frame_content_offset())); __ push(r9); __ bind(&inner_loop_header); @@ -3364,7 +3368,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, __ cmp(r7, r4); __ blt(&outer_push_loop); - __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset())); + __ LoadU64(r4, MemOperand(r3, Deoptimizer::input_offset())); for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { int code = config->GetAllocatableDoubleCode(i); const DoubleRegister dreg = DoubleRegister::from_code(code); @@ -3373,9 +3377,9 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, } // Push pc, and continuation from the last output frame. - __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset())); + __ LoadU64(r9, MemOperand(r5, FrameDescription::pc_offset())); __ push(r9); - __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset())); + __ LoadU64(r9, MemOperand(r5, FrameDescription::continuation_offset())); __ push(r9); // Restore the registers from the last output frame. @@ -3388,7 +3392,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, int offset = (i * kSystemPointerSize) + FrameDescription::registers_offset(); if ((restored_regs & (1 << i)) != 0) { - __ LoadP(ToRegister(i), MemOperand(scratch, offset)); + __ LoadU64(ToRegister(i), MemOperand(scratch, offset)); } } } @@ -3465,11 +3469,12 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kSlot); Register handler_arg = descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kHandler); - __ LoadP(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset)); - __ LoadP( + __ LoadU64(handler_arg, + MemOperand(fp, CommonFrameConstants::kCallerPCOffset)); + __ LoadU64( slot_arg, MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset)); - __ LoadP( + __ LoadU64( handler_arg, MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset)); diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc index 04907f5268a0d5..afd9a1fca1cb13 100644 --- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc +++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc @@ -6,6 +6,7 @@ #include "src/api/api-arguments.h" #include "src/codegen/code-factory.h" +#include "src/codegen/interface-descriptors-inl.h" #include "src/debug/debug.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frame-constants.h" @@ -100,7 +101,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // a0: number of arguments (untagged) // a1: constructor function // a3: new target - __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall); // Restore context from the frame. __ Ld(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); @@ -225,7 +226,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Push(a6); // Call the function. - __ InvokeFunctionWithNewTarget(a1, a3, a0, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall); // ----------- S t a t e ------------- // -- a0: constructor result @@ -300,12 +301,16 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) { Generate_JSBuiltinsConstructStubHelper(masm); } -static void GetSharedFunctionInfoBytecode(MacroAssembler* masm, - Register sfi_data, - Register scratch1) { +// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under +// the more general dispatch. +static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm, + Register sfi_data, + Register scratch1, + Label* is_baseline) { Label done; __ GetObjectType(sfi_data, scratch1, scratch1); + __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE)); __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE)); __ Ld(sfi_data, FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset)); @@ -325,7 +330,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Store input value into generator object. __ Sd(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset)); __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0, a3, - kRAHasNotBeenSaved, kDontSaveFPRegs); + kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore); // Load suspended function and context. __ Ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); @@ -388,12 +393,14 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { // Underlying function needs to have bytecode available. if (FLAG_debug_code) { + Label is_baseline; __ Ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); __ Ld(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset)); - GetSharedFunctionInfoBytecode(masm, a3, a0); + GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, a0, &is_baseline); __ GetObjectType(a3, a3, a3); __ Assert(eq, AbortReason::kMissingBytecodeArray, a3, Operand(BYTECODE_ARRAY_TYPE)); + __ bind(&is_baseline); } // Resume (Ignition/TurboFan) generator object. @@ -763,8 +770,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, __ Sd(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset)); __ Move(scratch1, optimized_code); // Write barrier clobbers scratch1 below. __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, - kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore, + RememberedSetAction::kOmit, SmiCheck::kOmit); } static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, @@ -964,6 +971,184 @@ static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm, __ bind(&end); } +// Read off the optimization state in the feedback vector and check if there +// is optimized code or a optimization marker that needs to be processed. +static void LoadOptimizationStateAndJumpIfNeedsProcessing( + MacroAssembler* masm, Register optimization_state, Register feedback_vector, + Label* has_optimized_code_or_marker) { + __ RecordComment("[ Check optimization state"); + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); + __ Lw(optimization_state, + FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); + __ And( + scratch, optimization_state, + Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask)); + __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg)); + __ RecordComment("]"); +} + +static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( + MacroAssembler* masm, Register optimization_state, + Register feedback_vector) { + Label maybe_has_optimized_code; + // Check if optimized code marker is available + __ And( + t0, optimization_state, + Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker)); + __ Branch(&maybe_has_optimized_code, eq, t0, Operand(zero_reg)); + + Register optimization_marker = optimization_state; + __ DecodeField(optimization_marker); + MaybeOptimizeCode(masm, feedback_vector, optimization_marker); + + __ bind(&maybe_has_optimized_code); + Register optimized_code_entry = optimization_state; + __ Ld(optimization_marker, + FieldMemOperand(feedback_vector, + FeedbackVector::kMaybeOptimizedCodeOffset)); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, a5); +} + +// static +void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { + UseScratchRegisterScope temps(masm); + temps.Include(kScratchReg.bit() | kScratchReg2.bit()); + auto descriptor = Builtins::CallInterfaceDescriptorFor( + Builtins::kBaselineOutOfLinePrologue); + Register closure = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kClosure); + // Load the feedback vector from the closure. + Register feedback_vector = temps.Acquire(); + __ Ld(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + if (FLAG_debug_code) { + __ GetObjectType(feedback_vector, t0, t0); + __ Assert(eq, AbortReason::kExpectedFeedbackVector, t0, + Operand(FEEDBACK_VECTOR_TYPE)); + } + + // Check for an optimization marker. + Label has_optimized_code_or_marker; + Register optimization_state = temps.Acquire(); + LoadOptimizationStateAndJumpIfNeedsProcessing( + masm, optimization_state, feedback_vector, &has_optimized_code_or_marker); + + // Increment invocation count for the function. + { + Register invocation_count = t0; + __ Lw(invocation_count, + FieldMemOperand(feedback_vector, + FeedbackVector::kInvocationCountOffset)); + __ Add32(invocation_count, invocation_count, Operand(1)); + __ Sw(invocation_count, + FieldMemOperand(feedback_vector, + FeedbackVector::kInvocationCountOffset)); + } + + __ RecordComment("[ Frame Setup"); + FrameScope frame_scope(masm, StackFrame::MANUAL); + // Normally the first thing we'd do here is Push(lr, fp), but we already + // entered the frame in BaselineCompiler::Prologue, as we had to use the + // value lr before the call to this BaselineOutOfLinePrologue builtin. + + Register callee_context = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kCalleeContext); + Register callee_js_function = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kClosure); + __ Push(callee_context, callee_js_function); + DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister); + DCHECK_EQ(callee_js_function, kJSFunctionRegister); + + Register argc = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount); + // We'll use the bytecode for both code age/OSR resetting, and pushing onto + // the frame, so load it into a register. + Register bytecodeArray = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); + + // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset + // are 8-bit fields next to each other, so we could just optimize by writing + // a 16-bit. These static asserts guard our assumption is valid. + STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == + BytecodeArray::kOsrNestingLevelOffset + kCharSize); + STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0); + __ Sh(zero_reg, + FieldMemOperand(bytecodeArray, BytecodeArray::kOsrNestingLevelOffset)); + + __ Push(argc, bytecodeArray); + + // Baseline code frames store the feedback vector where interpreter would + // store the bytecode offset. + if (FLAG_debug_code) { + __ GetObjectType(feedback_vector, t0, t0); + __ Assert(eq, AbortReason::kExpectedFeedbackVector, t0, + Operand(FEEDBACK_VECTOR_TYPE)); + } + // Our stack is currently aligned. We have have to push something along with + // the feedback vector to keep it that way -- we may as well start + // initialising the register frame. + // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves + // `undefined` in the accumulator register, to skip the load in the baseline + // code. + __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue); + __ Push(feedback_vector, kInterpreterAccumulatorRegister); + __ RecordComment("]"); + + __ RecordComment("[ Stack/interrupt check"); + Label call_stack_guard; + { + // Stack check. This folds the checks for both the interrupt stack limit + // check and the real stack limit into one by just checking for the + // interrupt limit. The interrupt limit is either equal to the real stack + // limit or tighter. By ensuring we have space until that limit after + // building the frame we can quickly precheck both at once. + Register frame_size = t0; + __ Ld(frame_size, + FieldMemOperand(bytecodeArray, BytecodeArray::kFrameSizeOffset)); + Register sp_minus_frame_size = frame_size; + __ Sub64(sp_minus_frame_size, sp, frame_size); + Register interrupt_limit = t1; + __ LoadStackLimit(interrupt_limit, + MacroAssembler::StackLimitKind::kInterruptStackLimit); + __ Branch(&call_stack_guard, Uless, sp_minus_frame_size, + Operand(interrupt_limit)); + __ RecordComment("]"); + } + + // Do "fast" return to the caller pc in lr. + // TODO(v8:11429): Document this frame setup better. + __ Ret(); + + __ bind(&has_optimized_code_or_marker); + { + __ RecordComment("[ Optimized marker check"); + // Drop the frame created by the baseline call. + __ Pop(fp, ra); + MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state, + feedback_vector); + __ Trap(); + __ RecordComment("]"); + } + + __ bind(&call_stack_guard); + { + Register new_target = descriptor.GetRegisterParameter( + BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget); + + FrameScope frame_scope(masm, StackFrame::INTERNAL); + __ RecordComment("[ Stack/interrupt call"); + // Save incoming new target or generator + __ Push(zero_reg, new_target); + __ CallRuntime(Runtime::kStackGuard); + __ Pop(new_target, zero_reg); + __ RecordComment("]"); + } + __ Ret(); + temps.Exclude(kScratchReg.bit() | kScratchReg2.bit()); +} + // Generate code for entering a JS function with the interpreter. // On entry to the function the receiver and arguments have been pushed on the // stack left to right. @@ -989,8 +1174,9 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); __ Ld(kInterpreterBytecodeArrayRegister, FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset)); - GetSharedFunctionInfoBytecode(masm, kInterpreterBytecodeArrayRegister, - kScratchReg); + Label is_baseline; + GetSharedFunctionInfoBytecodeOrBaseline( + masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline); // The bytecode array could have been flushed from the shared function info, // if so, call into CompileLazy. @@ -1188,6 +1374,44 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { FeedbackVector::kMaybeOptimizedCodeOffset)); TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, a5); + __ bind(&is_baseline); + { + // Load the feedback vector from the closure. + __ Ld(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ Ld(feedback_vector, + FieldMemOperand(feedback_vector, Cell::kValueOffset)); + + Label install_baseline_code; + // Check if feedback vector is valid. If not, call prepare for baseline to + // allocate it. + __ Ld(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset)); + __ Lh(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset)); + __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE)); + + // Read off the optimization state in the feedback vector. + // TODO(v8:11429): Is this worth doing here? Baseline code will check it + // anyway... + __ Ld(optimization_state, + FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset)); + + // Check if there is optimized code or a optimization marker that needes to + // be processed. + __ And( + t0, optimization_state, + Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask)); + __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg)); + + // Load the baseline code into the closure. + __ Ld(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister, + BaselineData::kBaselineCodeOffset)); + static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch"); + ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t0, t1); + __ JumpCodeObject(a2); + + __ bind(&install_baseline_code); + GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode); + } __ bind(&compile_lazy); GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy); @@ -1407,7 +1631,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ Jump(kJavaScriptCallCodeStartRegister); } -void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { // Advance the current bytecode offset stored within the given interpreter // stack frame. This simulates what all bytecode handlers do upon completion // of the underlying operation. @@ -1454,7 +1678,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { __ Abort(AbortReason::kInvalidBytecodeAdvance); } -void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { Generate_InterpreterEnterBytecode(masm); } @@ -1503,12 +1727,12 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); // Load builtin index (stored as a Smi) and use it to get the builtin start // address from the builtins table. - __ Pop(t0); + __ Pop(t6); __ Add64(sp, sp, Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); __ Pop(ra); - __ LoadEntryFromBuiltinIndex(t0); - __ Jump(t0); + __ LoadEntryFromBuiltinIndex(t6); + __ Jump(t6); } } // namespace @@ -1542,7 +1766,20 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { __ Ret(); } -void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { +void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) { + Register optimized_code_entry = kJavaScriptCallCodeStartRegister; + TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, t0); +} +namespace { + +void Generate_OSREntry(MacroAssembler* masm, Register entry_address, + Operand offset = Operand(int64_t(0))) { + __ Add64(ra, entry_address, offset); + // And "return" to the OSR entry point of the function. + __ Ret(); +} + +void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { { FrameScope scope(masm, StackFrame::INTERNAL); __ CallRuntime(Runtime::kCompileForOnStackReplacement); @@ -1550,11 +1787,11 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { // If the code object is null, just return to the caller. __ Ret(eq, a0, Operand(Smi::zero())); - - // Drop the handler frame that is be sitting on top of the actual - // JavaScript frame. This is the case then OSR is triggered from bytecode. - __ LeaveFrame(StackFrame::STUB); - + if (is_interpreter) { + // Drop the handler frame that is be sitting on top of the actual + // JavaScript frame. This is the case then OSR is triggered from bytecode. + __ LeaveFrame(StackFrame::STUB); + } // Load deoptimization data from the code object. // = [#deoptimization_data_offset] __ Ld(a1, MemOperand(a0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); @@ -1568,9 +1805,18 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { // Compute the target address = code_obj + header_size + osr_offset // = + #header_size + __ Add64(a0, a0, a1); - __ Add64(ra, a0, Code::kHeaderSize - kHeapObjectTag); - // And "return" to the OSR entry point of the function. - __ Ret(); + Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag)); +} +} // namespace + +void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { + return OnStackReplacement(masm, true); +} + +void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { + __ Ld(kContextRegister, + MemOperand(fp, StandardFrameConstants::kContextOffset)); + return OnStackReplacement(masm, false); } // static @@ -1808,7 +2054,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // -- a4 : len (number of elements to push from args) // -- a3 : new.target (for [[Construct]]) // ----------------------------------- - if (masm->emit_debug_code()) { + if (FLAG_debug_code) { // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0. Label ok, fail; __ AssertNotSmi(a2); @@ -2070,7 +2316,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ Lhu(a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset)); - __ InvokeFunctionCode(a1, no_reg, a2, a0, JUMP_FUNCTION); + __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump); // The function is a "classConstructor", need to raise an exception. __ bind(&class_constructor); @@ -2438,10 +2684,10 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // sp: stack pointer (restored as callee's sp after C call) // cp: current context (C callee-saved) // - // If argv_mode == kArgvInRegister: + // If argv_mode == ArgvMode::kRegister: // a2: pointer to the first argument - if (argv_mode == kArgvInRegister) { + if (argv_mode == ArgvMode::kRegister) { // Move argv into the correct register. __ Move(s1, a2); } else { @@ -2453,7 +2699,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // Enter the exit frame that transitions from JavaScript to C++. FrameScope scope(masm, StackFrame::MANUAL); __ EnterExitFrame( - save_doubles == kSaveFPRegs, 0, + save_doubles == SaveFPRegsMode::kSave, 0, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); // s3: number of arguments including receiver (C callee-saved) @@ -2502,12 +2748,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // a0:a1: result // sp: stack pointer // fp: frame pointer - Register argc = argv_mode == kArgvInRegister + Register argc = argv_mode == ArgvMode::kRegister // We don't want to pop arguments so set argc to no_reg. ? no_reg // s3: still holds argc (callee-saved). : s3; - __ LeaveExitFrame(save_doubles == kSaveFPRegs, argc, EMIT_RETURN); + __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN); // Handling of exception. __ bind(&exception_returned); @@ -2689,6 +2935,10 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ Trap(); } +void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { + // Only needed on x64. + __ Trap(); +} namespace { int AddressOffset(ExternalReference ref0, ExternalReference ref1) { @@ -2762,7 +3012,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, // No more valid handles (the result handle was the last one). Restore // previous handle scope. __ Sd(s3, MemOperand(s5, kNextOffset)); - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ Lw(a1, MemOperand(s5, kLevelOffset)); __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2)); @@ -3228,9 +3478,9 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, } } - __ pop(t3); // Get continuation, leave pc on stack. + __ pop(t6); // Get continuation, leave pc on stack. __ pop(ra); - __ Jump(t3); + __ Jump(t6); __ stop(); } @@ -3252,6 +3502,146 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy); } +namespace { + +// Converts an interpreter frame into a baseline frame and continues execution +// in baseline code (baseline code has to exist on the shared function info), +// either at the start or the end of the current bytecode. +void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode, + bool is_osr = false) { + __ Push(zero_reg, kInterpreterAccumulatorRegister); + Label start; + __ bind(&start); + + // Get function from the frame. + Register closure = a1; + __ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset)); + + // Replace BytecodeOffset with the feedback vector. + Register feedback_vector = a2; + __ Ld(feedback_vector, + FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); + __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); + Label install_baseline_code; + // Check if feedback vector is valid. If not, call prepare for baseline to + // allocate it. + __ GetObjectType(feedback_vector, t0, t0); + __ Branch(&install_baseline_code, eq, t0, Operand(FEEDBACK_VECTOR_TYPE)); + // Save BytecodeOffset from the stack frame. + __ SmiUntag(kInterpreterBytecodeOffsetRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + // Replace BytecodeOffset with the feedback vector. + __ Sd(feedback_vector, + MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); + feedback_vector = no_reg; + + // Get the Code object from the shared function info. + UseScratchRegisterScope temps(masm); + Register code_obj = temps.Acquire(); + __ Ld(code_obj, + FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset)); + __ Ld(code_obj, + FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); + __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); + + // Compute baseline pc for bytecode offset. + __ Push(zero_reg, kInterpreterAccumulatorRegister); + ExternalReference get_baseline_pc_extref; + if (next_bytecode || is_osr) { + get_baseline_pc_extref = + ExternalReference::baseline_pc_for_next_executed_bytecode(); + } else { + get_baseline_pc_extref = + ExternalReference::baseline_pc_for_bytecode_offset(); + } + + Register get_baseline_pc = a3; + __ li(get_baseline_pc, get_baseline_pc_extref); + + // If the code deoptimizes during the implicit function entry stack interrupt + // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is + // not a valid bytecode offset. + // TODO(pthier): Investigate if it is feasible to handle this special case + // in TurboFan instead of here. + Label valid_bytecode_offset, function_entry_bytecode; + if (!is_osr) { + __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister, + Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + + kFunctionEntryBytecodeOffset)); + } + + __ Sub64(kInterpreterBytecodeOffsetRegister, + kInterpreterBytecodeOffsetRegister, + (BytecodeArray::kHeaderSize - kHeapObjectTag)); + + __ bind(&valid_bytecode_offset); + // Get bytecode array from the stack frame. + __ Ld(kInterpreterBytecodeArrayRegister, + MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); + { + Register arg_reg_1 = a0; + Register arg_reg_2 = a1; + Register arg_reg_3 = a2; + __ Move(arg_reg_1, code_obj); + __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister); + __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister); + FrameScope scope(masm, StackFrame::INTERNAL); + __ CallCFunction(get_baseline_pc, 3, 0); + } + __ Add64(code_obj, code_obj, kReturnRegister0); + __ Pop(kInterpreterAccumulatorRegister, zero_reg); + + if (is_osr) { + // Reset the OSR loop nesting depth to disarm back edges. + // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm + // Sparkplug here. + __ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister, + BytecodeArray::kOsrNestingLevelOffset)); + Generate_OSREntry(masm, code_obj, + Operand(Code::kHeaderSize - kHeapObjectTag)); + } else { + __ Add64(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag); + __ Jump(code_obj); + } + __ Trap(); // Unreachable. + + if (!is_osr) { + __ bind(&function_entry_bytecode); + // If the bytecode offset is kFunctionEntryOffset, get the start address of + // the first bytecode. + __ li(kInterpreterBytecodeOffsetRegister, Operand(int64_t(0))); + if (next_bytecode) { + __ li(get_baseline_pc, + ExternalReference::baseline_pc_for_bytecode_offset()); + } + __ Branch(&valid_bytecode_offset); + } + + __ bind(&install_baseline_code); + { + FrameScope scope(masm, StackFrame::INTERNAL); + __ Push(closure); + __ CallRuntime(Runtime::kInstallBaselineCode, 1); + } + // Retry from the start after installing baseline code. + __ Branch(&start); +} + +} // namespace + +void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) { + Generate_BaselineEntry(masm, false); +} + +void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { + Generate_BaselineEntry(masm, true); +} + +void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline( + MacroAssembler* masm) { + Generate_BaselineEntry(masm, false, true); +} + void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { FrameScope scope(masm, StackFrame::MANUAL); __ EnterFrame(StackFrame::INTERNAL); diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index 7711af6e9014ba..0272621ac01d76 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -6,6 +6,7 @@ #include "src/api/api-arguments.h" #include "src/codegen/code-factory.h" +#include "src/codegen/interface-descriptors-inl.h" // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. #include "src/codegen/macro-assembler-inl.h" #include "src/codegen/register-configuration.h" @@ -110,7 +111,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // r3: constructor function // r5: new target - __ InvokeFunctionWithNewTarget(r3, r5, r2, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(r3, r5, r2, InvokeType::kCall); // Restore context from the frame. __ LoadU64(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset)); @@ -238,7 +239,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Push(r8); // Call the function. - __ InvokeFunctionWithNewTarget(r3, r5, r2, CALL_FUNCTION); + __ InvokeFunctionWithNewTarget(r3, r5, r2, InvokeType::kCall); // ----------- S t a t e ------------- // -- r0: constructor result @@ -339,7 +340,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ StoreTaggedField( r2, FieldMemOperand(r3, JSGeneratorObject::kInputOrDebugPosOffset), r0); __ RecordWriteField(r3, JSGeneratorObject::kInputOrDebugPosOffset, r2, r5, - kLRHasNotBeenSaved, kDontSaveFPRegs); + kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore); // Load suspended function and context. __ LoadTaggedPointerField( @@ -395,18 +396,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { FieldMemOperand(r3, JSGeneratorObject::kParametersAndRegistersOffset)); { Label done_loop, loop; - __ mov(r8, r5); - __ bind(&loop); - __ SubS64(r8, r8, Operand(1)); + __ SubS64(r5, r5, Operand(1)); __ blt(&done_loop); - __ ShiftLeftU64(r1, r8, Operand(kTaggedSizeLog2)); + __ ShiftLeftU64(r1, r5, Operand(kTaggedSizeLog2)); __ la(scratch, MemOperand(r4, r1)); __ LoadAnyTaggedField(scratch, FieldMemOperand(scratch, FixedArray::kHeaderSize)); __ Push(scratch); __ b(&loop); - __ bind(&done_loop); // Push receiver. @@ -857,8 +855,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, __ mov(scratch1, optimized_code); // Write barrier clobbers scratch1 below. __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, - kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, - OMIT_SMI_CHECK); + kLRHasNotBeenSaved, SaveFPRegsMode::kIgnore, + RememberedSetAction::kOmit, SmiCheck::kOmit); } static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, @@ -1527,7 +1525,7 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ Jump(kJavaScriptCallCodeStartRegister); } -void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ LoadU64(kInterpreterBytecodeArrayRegister, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); @@ -1573,7 +1571,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { __ Abort(AbortReason::kInvalidBytecodeAdvance); } -void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { Generate_InterpreterEnterBytecode(masm); } @@ -1890,6 +1888,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { } // static +// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- @@ -1902,7 +1901,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Register scratch = ip; - if (masm->emit_debug_code()) { + if (FLAG_debug_code) { // Allow r4 to be a FixedArray, or a FixedDoubleArray if r6 == 0. Label ok, fail; __ AssertNotSmi(r4); @@ -2177,7 +2176,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ LoadU16( r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset)); - __ InvokeFunctionCode(r3, no_reg, r4, r2, JUMP_FUNCTION); + __ InvokeFunctionCode(r3, no_reg, r4, r2, InvokeType::kJump); // The function is a "classConstructor", need to raise an exception. __ bind(&class_constructor); @@ -2549,6 +2548,11 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { // TODO(v8:10701): Implement for this platform. __ Trap(); } + +void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { + // Only needed on x64. + __ Trap(); +} #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -2561,12 +2565,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // sp: stack pointer (restored as callee's sp after C call) // cp: current context (C callee-saved) // - // If argv_mode == kArgvInRegister: + // If argv_mode == ArgvMode::kRegister: // r4: pointer to the first argument __ mov(r7, r3); - if (argv_mode == kArgvInRegister) { + if (argv_mode == ArgvMode::kRegister) { // Move argv into the correct register. __ mov(r3, r4); } else { @@ -2594,7 +2598,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, #endif __ EnterExitFrame( - save_doubles, arg_stack_space, + save_doubles == SaveFPRegsMode::kSave, arg_stack_space, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); // Store a copy of argc, argv in callee-saved registers for later. @@ -2657,12 +2661,12 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // r2:r3: result // sp: stack pointer // fp: frame pointer - Register argc = argv_mode == kArgvInRegister + Register argc = argv_mode == ArgvMode::kRegister // We don't want to pop arguments so set argc to no_reg. ? no_reg // r6: still holds argc (callee-saved). : r6; - __ LeaveExitFrame(save_doubles, argc); + __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc); __ b(r14); // Handling of exception. @@ -2916,7 +2920,7 @@ static void CallApiFunctionAndReturn(MacroAssembler* masm, // No more valid handles (the result handle was the last one). Restore // previous handle scope. __ StoreU64(r6, MemOperand(r9, kNextOffset)); - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ LoadU32(r3, MemOperand(r9, kLevelOffset)); __ CmpS64(r3, r8); __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall); @@ -3464,11 +3468,12 @@ void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kSlot); Register handler_arg = descriptor.GetRegisterParameter(DynamicCheckMapsDescriptor::kHandler); - __ LoadP(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset)); - __ LoadP( + __ LoadU64(handler_arg, + MemOperand(fp, CommonFrameConstants::kCallerPCOffset)); + __ LoadU64( slot_arg, MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset)); - __ LoadP( + __ LoadU64( handler_arg, MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset)); diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc index 348866c9bdeed4..fbcfab56f43d7a 100644 --- a/deps/v8/src/builtins/setup-builtins-internal.cc +++ b/deps/v8/src/builtins/setup-builtins-internal.cc @@ -11,7 +11,6 @@ #include "src/compiler/code-assembler.h" #include "src/execution/isolate.h" #include "src/handles/handles-inl.h" -#include "src/heap/heap-inl.h" // For Heap::code_range. #include "src/init/setup-isolate.h" #include "src/interpreter/bytecodes.h" #include "src/interpreter/interpreter-generator.h" @@ -42,10 +41,10 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate, return options; } - const base::AddressRegion& code_range = isolate->heap()->code_range(); + const base::AddressRegion& code_region = isolate->heap()->code_region(); bool pc_relative_calls_fit_in_code_range = - !code_range.is_empty() && - std::ceil(static_cast(code_range.size() / MB)) <= + !code_region.is_empty() && + std::ceil(static_cast(code_region.size() / MB)) <= kMaxPCRelativeCodeRangeInMB; options.isolate_independent_code = true; @@ -219,7 +218,7 @@ void SetupIsolateDelegate::PopulateWithPlaceholders(Isolate* isolate) { // static void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { - // Replace references from all code objects to placeholders. + // Replace references from all builtin code objects to placeholders. Builtins* builtins = isolate->builtins(); DisallowGarbageCollection no_gc; CodeSpaceMemoryModificationScope modification_scope(isolate->heap()); @@ -228,11 +227,8 @@ void SetupIsolateDelegate::ReplacePlaceholders(Isolate* isolate) { RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) | RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) | RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET); - HeapObjectIterator iterator(isolate->heap()); - for (HeapObject obj = iterator.Next(); !obj.is_null(); - obj = iterator.Next()) { - if (!obj.IsCode()) continue; - Code code = Code::cast(obj); + for (int i = 0; i < Builtins::builtin_count; i++) { + Code code = builtins->builtin(i); bool flush_icache = false; for (RelocIterator it(code, kRelocMask); !it.done(); it.next()) { RelocInfo* rinfo = it.rinfo(); diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq index 6e416ddd98fc16..6333ebf97fdbb3 100644 --- a/deps/v8/src/builtins/typed-array-createtypedarray.tq +++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq @@ -19,11 +19,17 @@ extern macro TypedArrayBuiltinsAssembler::SetupTypedArrayEmbedderFields( extern runtime ThrowInvalidTypedArrayAlignment(implicit context: Context)( Map, String): never; +extern runtime GrowableSharedArrayBufferByteLength(implicit context: Context)( + Object): JSAny; + transitioning macro AllocateTypedArray(implicit context: Context)( isOnHeap: constexpr bool, map: Map, buffer: JSArrayBuffer, - byteOffset: uintptr, byteLength: uintptr, length: uintptr): JSTypedArray { + byteOffset: uintptr, byteLength: uintptr, length: uintptr, + isLengthTracking: bool): JSTypedArray { let elements: ByteArray; if constexpr (isOnHeap) { + assert(!IsResizableArrayBuffer(buffer)); + assert(!isLengthTracking); elements = AllocateByteArray(byteLength); } else { elements = kEmptyByteArray; @@ -53,6 +59,9 @@ transitioning macro AllocateTypedArray(implicit context: Context)( typedArray.byte_offset = byteOffset; typedArray.byte_length = byteLength; typedArray.length = length; + typedArray.bit_field.is_length_tracking = isLengthTracking; + typedArray.bit_field.is_backed_by_rab = + IsResizableArrayBuffer(buffer) && !IsSharedArrayBuffer(buffer); typed_array::AllocateJSTypedArrayExternalPointerEntry(typedArray); if constexpr (isOnHeap) { typed_array::SetJSTypedArrayOnHeapDataPtr(typedArray, elements, byteOffset); @@ -88,8 +97,10 @@ transitioning macro TypedArrayInitialize(implicit context: Context)( const buffer = AllocateEmptyOnHeapBuffer(byteLength); const isOnHeap: constexpr bool = true; + const isLengthTracking: constexpr bool = false; const typedArray = AllocateTypedArray( - isOnHeap, map, buffer, byteOffset, byteLength, length); + isOnHeap, map, buffer, byteOffset, byteLength, length, + isLengthTracking); if constexpr (initialize) { const backingStore = typedArray.data_ptr; @@ -107,8 +118,10 @@ transitioning macro TypedArrayInitialize(implicit context: Context)( } label AttachOffHeapBuffer(bufferObj: Object) { const buffer = Cast(bufferObj) otherwise unreachable; const isOnHeap: constexpr bool = false; + const isLengthTracking: constexpr bool = false; return AllocateTypedArray( - isOnHeap, map, buffer, byteOffset, byteLength, length); + isOnHeap, map, buffer, byteOffset, byteLength, length, + isLengthTracking); } } @@ -204,8 +217,26 @@ transitioning macro ConstructByTypedArray(implicit context: Context)( // 22.2.4.5 TypedArray ( buffer, byteOffset, length ) // ES #sec-typedarray-buffer-byteoffset-length transitioning macro ConstructByArrayBuffer(implicit context: Context)( - map: Map, buffer: JSArrayBuffer, byteOffset: JSAny, length: JSAny, - elementsInfo: typed_array::TypedArrayElementsInfo): JSTypedArray { + target: JSFunction, newTarget: JSReceiver, buffer: JSArrayBuffer, + byteOffset: JSAny, length: JSAny): JSTypedArray { + let map: Map; + const isLengthTracking: bool = + IsResizableArrayBuffer(buffer) && (length == Undefined); + // Pick the RAB / GSAB map (containing the corresponding RAB / GSAB + // ElementsKind). GSAB-backed non-length-tracking TypedArrays behave just like + // normal TypedArrays, so exclude them. + const rabGsab: bool = IsResizableArrayBuffer(buffer) && + (!IsSharedArrayBuffer(buffer) || isLengthTracking); + if (rabGsab) { + map = GetDerivedRabGsabMap(target, newTarget); + } else { + map = GetDerivedMap(target, newTarget); + } + + // 5. Let elementSize be the Number value of the Element Size value in Table + // 56 for constructorName. + const elementsInfo = GetTypedArrayElementsInfo(map); + try { // 6. Let offset be ? ToIndex(byteOffset). const offset: uintptr = ToIndex(byteOffset) otherwise IfInvalidOffset; @@ -226,7 +257,13 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)( } // 10. Let bufferByteLength be buffer.[[ArrayBufferByteLength]]. - const bufferByteLength: uintptr = buffer.byte_length; + let bufferByteLength: uintptr; + if (IsResizableArrayBuffer(buffer) && IsSharedArrayBuffer(buffer)) { + bufferByteLength = ToIndex(GrowableSharedArrayBufferByteLength(buffer)) + otherwise unreachable; + } else { + bufferByteLength = buffer.byte_length; + } // 11. If length is either not present or undefined, then if (length == Undefined) { @@ -261,7 +298,8 @@ transitioning macro ConstructByArrayBuffer(implicit context: Context)( const isOnHeap: constexpr bool = false; return AllocateTypedArray( - isOnHeap, map, buffer, offset, newByteLength, newLength); + isOnHeap, map, buffer, offset, newByteLength, newLength, + isLengthTracking); } label IfInvalidAlignment(problemString: String) deferred { ThrowInvalidTypedArrayAlignment(map, problemString); } label IfInvalidLength deferred { @@ -286,6 +324,8 @@ transitioning macro TypedArrayCreateByLength(implicit context: Context)( // ValidateTypedArray currently returns the array, not the ViewBuffer. const newTypedArray: JSTypedArray = ValidateTypedArray(context, newTypedArrayObj, methodName); + newTypedArray.bit_field.is_length_tracking = false; + newTypedArray.bit_field.is_backed_by_rab = false; if (IsDetachedBuffer(newTypedArray.buffer)) deferred { ThrowTypeError(MessageTemplate::kDetachedOperation, methodName); @@ -336,21 +376,16 @@ transitioning builtin CreateTypedArray( assert(IsConstructor(target)); // 4. Let O be ? AllocateTypedArray(constructorName, NewTarget, // "%TypedArrayPrototype%"). - const map = GetDerivedMap(target, newTarget); - - // 5. Let elementSize be the Number value of the Element Size value in Table - // 56 for constructorName. - const elementsInfo = GetTypedArrayElementsInfo(map); - try { typeswitch (arg1) { case (length: Smi): { goto IfConstructByLength(length); } case (buffer: JSArrayBuffer): { - return ConstructByArrayBuffer(map, buffer, arg2, arg3, elementsInfo); + return ConstructByArrayBuffer(target, newTarget, buffer, arg2, arg3); } case (typedArray: JSTypedArray): { + // TODO(v8:11111): Support RAB / GSAB. ConstructByTypedArray(typedArray) otherwise IfConstructByArrayLike; } case (obj: JSReceiver): { @@ -363,9 +398,18 @@ transitioning builtin CreateTypedArray( } } } label IfConstructByLength(length: JSAny) { + const map = GetDerivedMap(target, newTarget); + // 5. Let elementSize be the Number value of the Element Size value in Table + // 56 for constructorName. + const elementsInfo = GetTypedArrayElementsInfo(map); + return ConstructByLength(map, length, elementsInfo); } label IfConstructByArrayLike( arrayLike: JSReceiver, length: uintptr, bufferConstructor: JSReceiver) { + const map = GetDerivedMap(target, newTarget); + // 5. Let elementSize be the Number value of the Element Size value in Table + // 56 for constructorName. + const elementsInfo = GetTypedArrayElementsInfo(map); return ConstructByArrayLike( map, arrayLike, length, elementsInfo, bufferConstructor); } diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq index 05a15162040ed6..f859d1e0bf2082 100644 --- a/deps/v8/src/builtins/wasm.tq +++ b/deps/v8/src/builtins/wasm.tq @@ -287,8 +287,9 @@ builtin WasmAllocateRtt(typeIndex: intptr, parent: Map): Map { } builtin WasmAllocateStructWithRtt(rtt: Map): HeapObject { - const instanceSize: intptr = - unsafe::TimesTaggedSize(Convert(rtt.instance_size_in_words)); + const typeInfo: WasmTypeInfo = %RawDownCast( + rtt.constructor_or_back_pointer_or_native_context); + const instanceSize: intptr = SmiUntag(typeInfo.instance_size); const result: HeapObject = unsafe::Allocate( instanceSize, AllocationFlag::kAllowLargeObjectAllocation); *UnsafeConstCast(&result.map) = rtt; diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index 5b5e964ef95982..7fc7c5dec78d07 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -8,18 +8,19 @@ #include "src/base/bits-iterator.h" #include "src/base/iterator.h" #include "src/codegen/code-factory.h" -#include "src/common/globals.h" -#include "src/objects/code.h" +#include "src/codegen/interface-descriptors-inl.h" // For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop. #include "src/codegen/macro-assembler-inl.h" #include "src/codegen/register-configuration.h" #include "src/codegen/x64/assembler-x64.h" +#include "src/common/globals.h" #include "src/deoptimizer/deoptimizer.h" #include "src/execution/frame-constants.h" #include "src/execution/frames.h" #include "src/heap/heap-inl.h" #include "src/logging/counters.h" #include "src/objects/cell.h" +#include "src/objects/code.h" #include "src/objects/debug-objects.h" #include "src/objects/foreign.h" #include "src/objects/heap-number.h" @@ -118,7 +119,7 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) { // rax: number of arguments (untagged) // rdi: constructor function // rdx: new target - __ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION); + __ InvokeFunction(rdi, rdx, rax, InvokeType::kCall); // Restore smi-tagged arguments count from the frame. __ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset)); @@ -242,7 +243,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) { __ Push(r8); // Call the function. - __ InvokeFunction(rdi, rdx, rax, CALL_FUNCTION); + __ InvokeFunction(rdi, rdx, rax, InvokeType::kCall); // ----------- S t a t e ------------- // -- rax constructor result @@ -383,8 +384,8 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type, #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE // Initialize the pointer cage base register. - // TODO(syg): Actually make a cage. - __ movq(kPointerCageBaseRegister, arg_reg_1); + __ LoadRootRelative(kPtrComprCageBaseRegister, + IsolateData::cage_base_offset()); #endif } @@ -560,7 +561,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // rdx : new_target // Clear the context before we push it when entering the internal frame. - __ Set(rsi, 0); + __ Move(rsi, 0); // Enter an internal frame. FrameScope scope(masm, StackFrame::INTERNAL); @@ -687,9 +688,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { __ StoreTaggedField( FieldOperand(rdx, JSGeneratorObject::kInputOrDebugPosOffset), rax); __ RecordWriteField(rdx, JSGeneratorObject::kInputOrDebugPosOffset, rax, rcx, - kDontSaveFPRegs); + SaveFPRegsMode::kIgnore); - Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r11 : no_reg; + Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r8 : no_reg; // Load suspended function and context. __ LoadTaggedPointerField( @@ -740,20 +741,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) { rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset)); { - { - Label done_loop, loop; - __ movq(r9, rcx); - - __ bind(&loop); - __ decq(r9); - __ j(less, &done_loop, Label::kNear); - __ PushTaggedAnyField( - FieldOperand(rbx, r9, times_tagged_size, FixedArray::kHeaderSize), - decompr_scratch1); - __ jmp(&loop); - - __ bind(&done_loop); - } + Label done_loop, loop; + __ bind(&loop); + __ decq(rcx); + __ j(less, &done_loop, Label::kNear); + __ PushTaggedAnyField( + FieldOperand(rbx, rcx, times_tagged_size, FixedArray::kHeaderSize), + decompr_scratch1); + __ jmp(&loop); + __ bind(&done_loop); // Push the receiver. __ PushTaggedPointerField( @@ -841,7 +837,8 @@ static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm, optimized_code); __ movq(scratch1, optimized_code); // Write barrier clobbers scratch1 below. __ RecordWriteField(closure, JSFunction::kCodeOffset, scratch1, scratch2, - kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); + SaveFPRegsMode::kIgnore, RememberedSetAction::kOmit, + SmiCheck::kOmit); } static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1, @@ -1084,7 +1081,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( __ LoadAnyTaggedField( optimized_code_entry, FieldOperand(feedback_vector, FeedbackVector::kMaybeOptimizedCodeOffset)); - TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15, jump_mode); + TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8, r15, jump_mode); } // Generate code for entering a JS function with the interpreter. @@ -1236,10 +1233,11 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { __ Move( kInterpreterDispatchTableRegister, ExternalReference::interpreter_dispatch_table_address(masm->isolate())); - __ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister, times_1, 0)); + __ movzxbq(kScratchRegister, + Operand(kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister, times_1, 0)); __ movq(kJavaScriptCallCodeStartRegister, - Operand(kInterpreterDispatchTableRegister, r11, + Operand(kInterpreterDispatchTableRegister, kScratchRegister, times_system_pointer_size, 0)); __ call(kJavaScriptCallCodeStartRegister); masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset()); @@ -1259,7 +1257,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { kInterpreterBytecodeOffsetRegister, times_1, 0)); AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister, rbx, rcx, - r11, &do_return); + r8, &do_return); __ jmp(&do_dispatch); __ bind(&do_return); @@ -1558,15 +1556,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { } // Dispatch to the target bytecode. - __ movzxbq(r11, Operand(kInterpreterBytecodeArrayRegister, - kInterpreterBytecodeOffsetRegister, times_1, 0)); + __ movzxbq(kScratchRegister, + Operand(kInterpreterBytecodeArrayRegister, + kInterpreterBytecodeOffsetRegister, times_1, 0)); __ movq(kJavaScriptCallCodeStartRegister, - Operand(kInterpreterDispatchTableRegister, r11, + Operand(kInterpreterDispatchTableRegister, kScratchRegister, times_system_pointer_size, 0)); __ jmp(kJavaScriptCallCodeStartRegister); } -void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) { // Get bytecode array and bytecode offset from the stack frame. __ movq(kInterpreterBytecodeArrayRegister, Operand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp)); @@ -1587,7 +1586,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { Label if_return; AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister, kInterpreterBytecodeOffsetRegister, rbx, rcx, - r11, &if_return); + r8, &if_return); __ bind(&enter_bytecode); // Convert new bytecode offset to a Smi and save in the stackframe. @@ -1611,29 +1610,38 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { __ Abort(AbortReason::kInvalidBytecodeAdvance); } -void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) { +void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) { Generate_InterpreterEnterBytecode(masm); } // static void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { + Register feedback_vector = r8; + Register optimization_state = rcx; + Register return_address = r15; + +#ifdef DEBUG + for (auto reg : BaselineOutOfLinePrologueDescriptor::registers()) { + DCHECK( + !AreAliased(feedback_vector, optimization_state, return_address, reg)); + } +#endif + auto descriptor = Builtins::CallInterfaceDescriptorFor( Builtins::kBaselineOutOfLinePrologue); Register closure = descriptor.GetRegisterParameter( BaselineOutOfLinePrologueDescriptor::kClosure); // Load the feedback vector from the closure. - Register feedback_vector = r11; __ LoadTaggedPointerField( feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset)); __ LoadTaggedPointerField(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset)); - if (__ emit_debug_code()) { + if (FLAG_debug_code) { __ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister); __ Assert(equal, AbortReason::kExpectedFeedbackVector); } // Check for an optimization marker. - Register optimization_state = rcx; Label has_optimized_code_or_marker; LoadOptimizationStateAndJumpIfNeedsProcessing( masm, optimization_state, feedback_vector, &has_optimized_code_or_marker); @@ -1642,8 +1650,6 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { __ incl( FieldOperand(feedback_vector, FeedbackVector::kInvocationCountOffset)); - Register return_address = r15; - __ RecordComment("[ Frame Setup"); // Save the return address, so that we can push it to the end of the newly // set-up frame once we're done setting it up. @@ -1723,8 +1729,8 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { // return since we may do a runtime call along the way that requires the // stack to only contain valid frames. __ Drop(1); - MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, rcx, feedback_vector, - JumpMode::kPushAndReturn); + MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( + masm, optimization_state, feedback_vector, JumpMode::kPushAndReturn); __ Trap(); __ RecordComment("]"); } @@ -1840,7 +1846,7 @@ void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) { void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) { Register optimized_code_entry = kJavaScriptCallCodeStartRegister; - TailCallOptimizedCodeSlot(masm, optimized_code_entry, r11, r15, + TailCallOptimizedCodeSlot(masm, optimized_code_entry, r8, r15, JumpMode::kJump); } @@ -1905,7 +1911,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) { // Function.prototype.apply() yet, we use a normal Call builtin here. __ bind(&no_arguments); { - __ Set(rax, 0); + __ Move(rax, 0); __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET); } } @@ -2062,6 +2068,7 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) { } // static +// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, Handle code) { // ----------- S t a t e ------------- @@ -2072,14 +2079,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // -- rdx : new.target (for [[Construct]]) // -- rsp[0] : return address // ----------------------------------- - Register scratch = r11; - if (masm->emit_debug_code()) { + if (FLAG_debug_code) { // Allow rbx to be a FixedArray, or a FixedDoubleArray if rcx == 0. Label ok, fail; __ AssertNotSmi(rbx); Register map = r9; - __ LoadTaggedPointerField(map, FieldOperand(rbx, HeapObject::kMapOffset)); + __ LoadMap(map, rbx); __ CmpInstanceType(map, FIXED_ARRAY_TYPE); __ j(equal, &ok); __ CmpInstanceType(map, FIXED_DOUBLE_ARRAY_TYPE); @@ -2101,13 +2107,13 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // including the receiver and the return address. { Label copy, check; - Register src = r8, dest = rsp, num = r9, current = r11; + Register src = r8, dest = rsp, num = r9, current = r12; __ movq(src, rsp); __ leaq(kScratchRegister, Operand(rcx, times_system_pointer_size, 0)); __ AllocateStackSpace(kScratchRegister); __ leaq(num, Operand(rax, 2)); // Number of words to copy. // +2 for receiver and return address. - __ Set(current, 0); + __ Move(current, 0); __ jmp(&check); __ bind(©); __ movq(kScratchRegister, @@ -2123,9 +2129,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm, // Copy the additional arguments onto the stack. { - Register value = scratch; + Register value = r12; Register src = rbx, dest = r8, num = rcx, current = r9; - __ Set(current, 0); + __ Move(current, 0); Label done, push, loop; __ bind(&loop); __ cmpl(current, num); @@ -2166,7 +2172,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, if (mode == CallOrConstructMode::kConstruct) { Label new_target_constructor, new_target_not_constructor; __ JumpIfSmi(rdx, &new_target_not_constructor, Label::kNear); - __ LoadTaggedPointerField(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); + __ LoadMap(rbx, rdx); __ testb(FieldOperand(rbx, Map::kBitFieldOffset), Immediate(Map::Bits1::IsConstructorBit::kMask)); __ j(not_zero, &new_target_constructor, Label::kNear); @@ -2203,13 +2209,13 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // including the receiver and the return address. { Label copy, check; - Register src = r9, dest = rsp, num = r12, current = r11; + Register src = r9, dest = rsp, num = r12, current = r15; __ movq(src, rsp); __ leaq(kScratchRegister, Operand(r8, times_system_pointer_size, 0)); __ AllocateStackSpace(kScratchRegister); __ leaq(num, Operand(rax, 2)); // Number of words to copy. // +2 for receiver and return address. - __ Set(current, 0); + __ Move(current, 0); __ jmp(&check); __ bind(©); __ movq(kScratchRegister, @@ -2359,7 +2365,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm, __ movzxwq( rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset)); - __ InvokeFunctionCode(rdi, no_reg, rbx, rax, JUMP_FUNCTION); + __ InvokeFunctionCode(rdi, no_reg, rbx, rax, InvokeType::kJump); // The function is a "classConstructor", need to raise an exception. __ bind(&class_constructor); @@ -2592,7 +2598,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { __ JumpIfSmi(rdi, &non_constructor); // Check if target has a [[Construct]] internal method. - __ LoadTaggedPointerField(rcx, FieldOperand(rdi, HeapObject::kMapOffset)); + __ LoadMap(rcx, rdi); __ testb(FieldOperand(rcx, Map::kBitFieldOffset), Immediate(Map::Bits1::IsConstructorBit::kMask)); __ j(zero, &non_constructor); @@ -2682,15 +2688,17 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { } void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { + __ movq(kContextRegister, + MemOperand(rbp, BaselineFrameConstants::kContextOffset)); return OnStackReplacement(masm, false); } #if V8_ENABLE_WEBASSEMBLY void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // The function index was pushed to the stack by the caller as int32. - __ Pop(r11); + __ Pop(r15); // Convert to Smi for the runtime call. - __ SmiTag(r11); + __ SmiTag(r15); { HardAbortScope hard_abort(masm); // Avoid calls to Abort. FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); @@ -2717,13 +2725,13 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // Push the Wasm instance as an explicit argument to WasmCompileLazy. __ Push(kWasmInstanceRegister); // Push the function index as second argument. - __ Push(r11); + __ Push(r15); // Initialize the JavaScript context with 0. CEntry will use it to // set the current context on the isolate. __ Move(kContextRegister, Smi::zero()); __ CallRuntime(Runtime::kWasmCompileLazy, 2); // The entrypoint address is the return value. - __ movq(r11, kReturnRegister0); + __ movq(r15, kReturnRegister0); // Restore registers. for (DoubleRegister reg : base::Reversed(wasm::kFpParamRegisters)) { @@ -2737,7 +2745,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { } } // Finally, jump to the entrypoint. - __ jmp(r11); + __ jmp(r15); } void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { @@ -2915,7 +2923,7 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ LoadExternalPointerField( signature, FieldOperand(foreign_signature, Foreign::kForeignAddressOffset), - kForeignForeignAddressTag); + kForeignForeignAddressTag, kScratchRegister); foreign_signature = no_reg; Register return_count = r8; __ movq(return_count, @@ -3243,28 +3251,17 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { thread_in_wasm_flag_addr, MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset())); __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(1)); - - Register jump_table_start = thread_in_wasm_flag_addr; - __ movq(jump_table_start, - MemOperand(wasm_instance, - wasm::ObjectAccess::ToTagged( - WasmInstanceObject::kJumpTableStartOffset))); thread_in_wasm_flag_addr = no_reg; - Register jump_table_offset = function_data; - __ LoadAnyTaggedField( - jump_table_offset, - MemOperand( - function_data, - WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag)); - - // Change from smi to integer. - __ SmiUntag(jump_table_offset); - - Register function_entry = jump_table_offset; - __ addq(function_entry, jump_table_start); - jump_table_offset = no_reg; - jump_table_start = no_reg; + Register function_entry = function_data; + Register scratch = r12; + __ LoadExternalPointerField( + function_entry, + FieldOperand(function_data, + WasmExportedFunctionData::kForeignAddressOffset), + kForeignForeignAddressTag, scratch); + function_data = no_reg; + scratch = no_reg; // We set the indicating value for the GC to the proper one for Wasm call. constexpr int kWasmCallGCScanSlotCount = 0; @@ -3349,6 +3346,9 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { // Param conversion builtins. // ------------------------------------------- __ bind(&convert_param); + // Restore function_data register (which was clobbered by the code above, + // but was valid when jumping here earlier). + function_data = rdi; // The order of pushes is important. We want the heap objects, that should be // scanned by GC, to be on the top of the stack. // We have to set the indicating value for the GC to the number of values on @@ -3527,6 +3527,13 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { __ jmp(&compile_wrapper_done); } +void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) { + MemOperand OSRTargetSlot(rbp, -wasm::kOSRTargetOffset); + __ movq(kScratchRegister, OSRTargetSlot); + __ movq(OSRTargetSlot, Immediate(0)); + __ jmp(kScratchRegister); +} + #endif // V8_ENABLE_WEBASSEMBLY void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, @@ -3538,7 +3545,7 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // rsp: stack pointer (restored after C call) // rsi: current context (restored) // - // If argv_mode == kArgvInRegister: + // If argv_mode == ArgvMode::kRegister: // r15: pointer to the first argument #ifdef V8_TARGET_OS_WIN @@ -3569,15 +3576,15 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, int arg_stack_space = kArgExtraStackSpace + (result_size <= kMaxRegisterResultSize ? 0 : result_size); - if (argv_mode == kArgvInRegister) { - DCHECK(save_doubles == kDontSaveFPRegs); + if (argv_mode == ArgvMode::kRegister) { + DCHECK(save_doubles == SaveFPRegsMode::kIgnore); DCHECK(!builtin_exit_frame); __ EnterApiExitFrame(arg_stack_space); // Move argc into r12 (argv is already in r15). __ movq(r12, rax); } else { __ EnterExitFrame( - arg_stack_space, save_doubles == kSaveFPRegs, + arg_stack_space, save_doubles == SaveFPRegsMode::kSave, builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT); } @@ -3641,7 +3648,8 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, } // Exit the JavaScript to C++ exit frame. - __ LeaveExitFrame(save_doubles == kSaveFPRegs, argv_mode == kArgvOnStack); + __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, + argv_mode == ArgvMode::kStack); __ ret(0); // Handling of exception. @@ -3866,9 +3874,7 @@ void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address, Register map = rcx; __ JumpIfSmi(return_value, &ok, Label::kNear); - __ LoadTaggedPointerField(map, - FieldOperand(return_value, HeapObject::kMapOffset)); - + __ LoadMap(map, return_value); __ CmpInstanceType(map, LAST_NAME_TYPE); __ j(below_equal, &ok, Label::kNear); @@ -4053,7 +4059,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { Register holder = ApiGetterDescriptor::HolderRegister(); Register callback = ApiGetterDescriptor::CallbackRegister(); Register scratch = rax; - Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r11 : no_reg; + Register decompr_scratch1 = COMPRESS_POINTERS_BOOL ? r15 : no_reg; DCHECK(!AreAliased(receiver, holder, callback, scratch, decompr_scratch1)); @@ -4116,7 +4122,7 @@ void Builtins::Generate_CallApiGetter(MacroAssembler* masm) { __ LoadExternalPointerField( api_function_address, FieldOperand(scratch, Foreign::kForeignAddressOffset), - kForeignForeignAddressTag); + kForeignForeignAddressTag, kScratchRegister); // +3 is to skip prolog, return address and name handle. Operand return_value_operand( @@ -4172,7 +4178,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, // We use this to keep the value of the fifth argument temporarily. // Unfortunately we can't store it directly in r8 (used for passing // this on linux), since it is another parameter passing register on windows. - Register arg5 = r11; + Register arg5 = r15; __ movq(arg_reg_3, Immediate(Deoptimizer::kFixedExitSizeMarker)); // Get the address of the location in the code object @@ -4192,7 +4198,7 @@ void Generate_DeoptimizationEntry(MacroAssembler* masm, __ movq(rax, Operand(rbp, StandardFrameConstants::kFunctionOffset)); __ bind(&context_check); __ movq(arg_reg_1, rax); - __ Set(arg_reg_2, static_cast(deopt_kind)); + __ Move(arg_reg_2, static_cast(deopt_kind)); // Args 3 and 4 are already in the right registers. // On windows put the arguments on the stack (PrepareCallCFunction diff --git a/deps/v8/src/codegen/arm/assembler-arm-inl.h b/deps/v8/src/codegen/arm/assembler-arm-inl.h index 7035fa2492b4b8..f72e27703e9bfa 100644 --- a/deps/v8/src/codegen/arm/assembler-arm-inl.h +++ b/deps/v8/src/codegen/arm/assembler-arm-inl.h @@ -206,7 +206,7 @@ Operand::Operand(Smi value) : rmode_(RelocInfo::NONE) { Operand::Operand(Register rm) : rm_(rm), shift_op_(LSL), shift_imm_(0) {} void Assembler::CheckBuffer() { - if (buffer_space() <= kGap) { + if (V8_UNLIKELY(buffer_space() <= kGap)) { GrowBuffer(); } MaybeCheckConstPool(); diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc index 17a20a6f977c4b..09c57928ffac1d 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.cc +++ b/deps/v8/src/codegen/arm/assembler-arm.cc @@ -534,9 +534,8 @@ Assembler::Assembler(const AssemblerOptions& options, : AssemblerBase(options, std::move(buffer)), pending_32_bit_constants_(), scratch_register_list_(ip.bit()) { - pending_32_bit_constants_.reserve(kMinNumPendingConstants); reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_); - next_buffer_check_ = 0; + constant_pool_deadline_ = kMaxInt; const_pool_blocked_nesting_ = 0; no_const_pool_before_ = 0; first_const_pool_32_use_ = -1; @@ -556,7 +555,10 @@ Assembler::Assembler(const AssemblerOptions& options, } } -Assembler::~Assembler() { DCHECK_EQ(const_pool_blocked_nesting_, 0); } +Assembler::~Assembler() { + DCHECK_EQ(const_pool_blocked_nesting_, 0); + DCHECK_EQ(first_const_pool_32_use_, -1); +} void Assembler::GetCode(Isolate* isolate, CodeDesc* desc, SafepointTableBuilder* safepoint_table_builder, @@ -841,7 +843,7 @@ void Assembler::target_at_put(int pos, int target_pos) { // orr dst, dst, #target8_2 << 16 uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag); - DCHECK(is_uint24(target24)); + CHECK(is_uint24(target24)); if (is_uint8(target24)) { // If the target fits in a byte then only patch with a mov // instruction. @@ -897,7 +899,7 @@ void Assembler::target_at_put(int pos, int target_pos) { instr &= ~kImm24Mask; } int imm24 = imm26 >> 2; - DCHECK(is_int24(imm24)); + CHECK(is_int24(imm24)); instr_at_put(pos, instr | (imm24 & kImm24Mask)); } @@ -1030,10 +1032,53 @@ namespace { bool FitsShifter(uint32_t imm32, uint32_t* rotate_imm, uint32_t* immed_8, Instr* instr) { // imm32 must be unsigned. - for (int rot = 0; rot < 16; rot++) { - uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot); - if ((imm8 <= 0xFF)) { - *rotate_imm = rot; + { + // 32-bit immediates can be encoded as: + // (8-bit value, 2*N bit left rotation) + // e.g. 0xab00 can be encoded as 0xab shifted left by 8 == 2*4, i.e. + // (0xab, 4) + // + // Check three categories which cover all possible shifter fits: + // 1. 0x000000FF: The value is already 8-bit (no shifting necessary), + // 2. 0x000FF000: The 8-bit value is somewhere in the middle of the 32-bit + // value, and + // 3. 0xF000000F: The 8-bit value is split over the beginning and end of + // the 32-bit value. + + // For 0x000000FF. + if (imm32 <= 0xFF) { + *rotate_imm = 0; + *immed_8 = imm32; + return true; + } + // For 0x000FF000, count trailing zeros and shift down to 0x000000FF. Note + // that we have to round the trailing zeros down to the nearest multiple of + // two, since we can only encode shifts of 2*N. Note also that we know that + // imm32 isn't zero, since we already checked if it's less than 0xFF. + int half_trailing_zeros = base::bits::CountTrailingZerosNonZero(imm32) / 2; + uint32_t imm8 = imm32 >> (half_trailing_zeros * 2); + if (imm8 <= 0xFF) { + DCHECK_GT(half_trailing_zeros, 0); + // Rotating right by trailing_zeros is equivalent to rotating left by + // 32 - trailing_zeros. We return rotate_right / 2, so calculate + // (32 - trailing_zeros)/2 == 16 - trailing_zeros/2. + *rotate_imm = (16 - half_trailing_zeros); + *immed_8 = imm8; + return true; + } + // For 0xF000000F, rotate by 16 to get 0x000FF000 and continue as if it + // were that case. + uint32_t imm32_rot16 = base::bits::RotateLeft32(imm32, 16); + half_trailing_zeros = + base::bits::CountTrailingZerosNonZero(imm32_rot16) / 2; + imm8 = imm32_rot16 >> (half_trailing_zeros * 2); + if (imm8 <= 0xFF) { + // We've rotated left by 2*8, so we can't have more than that many + // trailing zeroes. + DCHECK_LT(half_trailing_zeros, 8); + // We've already rotated by 2*8, before calculating trailing_zeros/2, + // so we need (32 - (16 + trailing_zeros))/2 == 8 - trailing_zeros/2. + *rotate_imm = 8 - half_trailing_zeros; *immed_8 = imm8; return true; } @@ -2258,7 +2303,7 @@ void Assembler::bkpt(uint32_t imm16) { } void Assembler::svc(uint32_t imm24, Condition cond) { - DCHECK(is_uint24(imm24)); + CHECK(is_uint24(imm24)); emit(cond | 15 * B24 | imm24); } @@ -5204,8 +5249,13 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode, (rmode == RelocInfo::CODE_TARGET && value != 0) || (RelocInfo::IsEmbeddedObjectMode(rmode) && value != 0); DCHECK_LT(pending_32_bit_constants_.size(), kMaxNumPending32Constants); - if (pending_32_bit_constants_.empty()) { + if (first_const_pool_32_use_ < 0) { + DCHECK(pending_32_bit_constants_.empty()); + DCHECK_EQ(constant_pool_deadline_, kMaxInt); first_const_pool_32_use_ = position; + constant_pool_deadline_ = position + kCheckPoolDeadline; + } else { + DCHECK(!pending_32_bit_constants_.empty()); } ConstantPoolEntry entry(position, value, sharing_ok, rmode); @@ -5224,7 +5274,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode, } } - pending_32_bit_constants_.push_back(entry); + pending_32_bit_constants_.emplace_back(entry); // Make sure the constant pool is not emitted in place of the next // instruction for which we just recorded relocation info. @@ -5239,17 +5289,17 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode, void Assembler::BlockConstPoolFor(int instructions) { int pc_limit = pc_offset() + instructions * kInstrSize; if (no_const_pool_before_ < pc_limit) { - // Max pool start (if we need a jump and an alignment). -#ifdef DEBUG - int start = pc_limit + kInstrSize + 2 * kPointerSize; - DCHECK(pending_32_bit_constants_.empty() || - (start < first_const_pool_32_use_ + kMaxDistToIntPool)); -#endif no_const_pool_before_ = pc_limit; } - if (next_buffer_check_ < no_const_pool_before_) { - next_buffer_check_ = no_const_pool_before_; + // If we're due a const pool check before the block finishes, move it to just + // after the block. + if (constant_pool_deadline_ < no_const_pool_before_) { + // Make sure that the new deadline isn't too late (including a jump and the + // constant pool marker). + DCHECK_LE(no_const_pool_before_, + first_const_pool_32_use_ + kMaxDistToIntPool); + constant_pool_deadline_ = no_const_pool_before_; } } @@ -5265,49 +5315,44 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { // There is nothing to do if there are no pending constant pool entries. if (pending_32_bit_constants_.empty()) { - // Calculate the offset of the next check. - next_buffer_check_ = pc_offset() + kCheckPoolInterval; + // We should only fall into this case if we're either trying to forcing + // emission or opportunistically checking after a jump. + DCHECK(force_emit || !require_jump); return; } - // Check that the code buffer is large enough before emitting the constant - // pool (include the jump over the pool and the constant pool marker and - // the gap to the relocation information). - int jump_instr = require_jump ? kInstrSize : 0; - int size_up_to_marker = jump_instr + kInstrSize; - int estimated_size_after_marker = - pending_32_bit_constants_.size() * kPointerSize; - int estimated_size = size_up_to_marker + estimated_size_after_marker; - // We emit a constant pool when: // * requested to do so by parameter force_emit (e.g. after each function). // * the distance from the first instruction accessing the constant pool to - // any of the constant pool entries will exceed its limit the next - // time the pool is checked. This is overly restrictive, but we don't emit - // constant pool entries in-order so it's conservatively correct. + // the first constant pool entry will exceed its limit the next time the + // pool is checked. // * the instruction doesn't require a jump after itself to jump over the // constant pool, and we're getting close to running out of range. if (!force_emit) { - DCHECK(!pending_32_bit_constants_.empty()); - bool need_emit = false; - int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_; - if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) || - (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) { - need_emit = true; + DCHECK_NE(first_const_pool_32_use_, -1); + int dist32 = pc_offset() - first_const_pool_32_use_; + if (require_jump) { + // We should only be on this path if we've exceeded our deadline. + DCHECK_GE(dist32, kCheckPoolDeadline); + } else if (dist32 < kCheckPoolDeadline / 2) { + return; } - if (!need_emit) return; } - // Deduplicate constants. - int size_after_marker = estimated_size_after_marker; + int size_after_marker = pending_32_bit_constants_.size() * kPointerSize; + // Deduplicate constants. for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) { ConstantPoolEntry& entry = pending_32_bit_constants_[i]; if (entry.is_merged()) size_after_marker -= kPointerSize; } + // Check that the code buffer is large enough before emitting the constant + // pool (include the jump over the pool and the constant pool marker and + // the gap to the relocation information). + int jump_instr = require_jump ? kInstrSize : 0; + int size_up_to_marker = jump_instr + kInstrSize; int size = size_up_to_marker + size_after_marker; - int needed_space = size + kGap; while (buffer_space() <= needed_space) GrowBuffer(); @@ -5331,6 +5376,14 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { emit(kConstantPoolMarker | EncodeConstantPoolLength(size_after_marker / kPointerSize)); + // The first entry in the constant pool should also be the first + CHECK_EQ(first_const_pool_32_use_, pending_32_bit_constants_[0].position()); + CHECK(!pending_32_bit_constants_[0].is_merged()); + + // Make sure we're not emitting the constant too late. + CHECK_LE(pc_offset(), + first_const_pool_32_use_ + kMaxDistToPcRelativeConstant); + // Emit 32-bit constant pool entries. for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) { ConstantPoolEntry& entry = pending_32_bit_constants_[i]; @@ -5354,6 +5407,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { ConstantPoolEntry& merged = pending_32_bit_constants_[entry.merged_index()]; DCHECK(entry.value() == merged.value()); + DCHECK_LT(merged.position(), entry.position()); Instr merged_instr = instr_at(merged.position()); DCHECK(IsLdrPcImmediateOffset(merged_instr)); delta = GetLdrRegisterImmediateOffset(merged_instr); @@ -5379,9 +5433,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { } } - // Since a constant pool was just emitted, move the check offset forward by - // the standard interval. - next_buffer_check_ = pc_offset() + kCheckPoolInterval; + // Since a constant pool was just emitted, we don't need another check until + // the next constant pool entry is added. + constant_pool_deadline_ = kMaxInt; } PatchingAssembler::PatchingAssembler(const AssemblerOptions& options, diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h index e0490a68533466..04d5eef054d731 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.h +++ b/deps/v8/src/codegen/arm/assembler-arm.h @@ -45,6 +45,7 @@ #include #include +#include "src/base/small-vector.h" #include "src/codegen/arm/constants-arm.h" #include "src/codegen/arm/register-arm.h" #include "src/codegen/assembler.h" @@ -310,7 +311,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ~Assembler() override; - void AbortedCodeGeneration() override { pending_32_bit_constants_.clear(); } + void AbortedCodeGeneration() override { + pending_32_bit_constants_.clear(); + first_const_pool_32_use_ = -1; + } // GetCode emits any pending (non-emitted) code and fills the descriptor desc. static constexpr int kNoHandlerTable = 0; @@ -1148,13 +1152,24 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { static int DecodeShiftImm(Instr instr); static Instr PatchShiftImm(Instr instr, int immed); - // Constants in pools are accessed via pc relative addressing, which can - // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point + // Constants are accessed via pc relative addressing, which can reach −4095 to + // 4095 for integer PC-relative loads, and −1020 to 1020 for floating-point // PC-relative loads, thereby defining a maximum distance between the - // instruction and the accessed constant. - static constexpr int kMaxDistToIntPool = 4 * KB; - // All relocations could be integer, it therefore acts as the limit. - static constexpr int kMinNumPendingConstants = 4; + // instruction and the accessed constant. Additionally, PC-relative loads + // start at a delta from the actual load instruction's PC, so we can add this + // on to the (positive) distance. + static constexpr int kMaxDistToPcRelativeConstant = + 4095 + Instruction::kPcLoadDelta; + // The constant pool needs to be jumped over, and has a marker, so the actual + // distance from the instruction and start of the constant pool has to include + // space for these two instructions. + static constexpr int kMaxDistToIntPool = + kMaxDistToPcRelativeConstant - 2 * kInstrSize; + // Experimentally derived as sufficient for ~95% of compiles. + static constexpr int kTypicalNumPending32Constants = 32; + // The maximum number of pending constants is reached by a sequence of only + // constant loads, which limits it to the number of constant loads that can + // fit between the first constant load and the distance to the constant pool. static constexpr int kMaxNumPending32Constants = kMaxDistToIntPool / kInstrSize; @@ -1165,8 +1180,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Check if is time to emit a constant pool. void CheckConstPool(bool force_emit, bool require_jump); - void MaybeCheckConstPool() { - if (pc_offset() >= next_buffer_check_) { + V8_INLINE void MaybeCheckConstPool() { + if (V8_UNLIKELY(pc_offset() >= constant_pool_deadline_)) { CheckConstPool(false, true); } } @@ -1192,9 +1207,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // number of call to EndBlockConstpool. void StartBlockConstPool() { if (const_pool_blocked_nesting_++ == 0) { - // Prevent constant pool checks happening by setting the next check to - // the biggest possible offset. - next_buffer_check_ = kMaxInt; + // Prevent constant pool checks happening by resetting the deadline. + constant_pool_deadline_ = kMaxInt; } } @@ -1202,19 +1216,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // StartBlockConstPool to have an effect. void EndBlockConstPool() { if (--const_pool_blocked_nesting_ == 0) { + if (first_const_pool_32_use_ >= 0) { #ifdef DEBUG - // Max pool start (if we need a jump and an alignment). - int start = pc_offset() + kInstrSize + 2 * kPointerSize; - // Check the constant pool hasn't been blocked for too long. - DCHECK(pending_32_bit_constants_.empty() || - (start < first_const_pool_32_use_ + kMaxDistToIntPool)); + // Check the constant pool hasn't been blocked for too long. + DCHECK_LE(pc_offset(), first_const_pool_32_use_ + kMaxDistToIntPool); #endif - // Two cases: - // * no_const_pool_before_ >= next_buffer_check_ and the emission is - // still blocked - // * no_const_pool_before_ < next_buffer_check_ and the next emit will - // trigger a check. - next_buffer_check_ = no_const_pool_before_; + // Reset the constant pool check back to the deadline. + constant_pool_deadline_ = first_const_pool_32_use_ + kCheckPoolDeadline; + } } } @@ -1258,7 +1267,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // pending relocation entry per instruction. // The buffers of pending constant pool entries. - std::vector pending_32_bit_constants_; + base::SmallVector + pending_32_bit_constants_; // Scratch registers available for use by the Assembler. RegList scratch_register_list_; @@ -1268,8 +1278,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // Avoid overflows for displacements etc. static const int kMaximalBufferSize = 512 * MB; - int next_buffer_check_; // pc offset of next buffer check - // Constant pool generation // Pools are emitted in the instruction stream, preferably after unconditional // jumps or after returns from functions (in dead code locations). @@ -1281,11 +1289,16 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // if so, a relocation info entry is associated to the constant pool entry. // Repeated checking whether the constant pool should be emitted is rather - // expensive. By default we only check again once a number of instructions - // has been generated. That also means that the sizing of the buffers is not - // an exact science, and that we rely on some slop to not overrun buffers. - static constexpr int kCheckPoolIntervalInst = 32; - static constexpr int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize; + // expensive. Instead, we check once a deadline is hit; the deadline being + // when there is a possibility that MaybeCheckConstPool won't be called before + // kMaxDistToIntPoolWithHeader is exceeded. Since MaybeCheckConstPool is + // called in CheckBuffer, this means that kGap is an upper bound on this + // check. Use 2 * kGap just to give it some slack around BlockConstPoolScopes. + static constexpr int kCheckPoolDeadline = kMaxDistToIntPool - 2 * kGap; + + // pc offset of the upcoming constant pool deadline. Equivalent to + // first_const_pool_32_use_ + kCheckPoolDeadline. + int constant_pool_deadline_; // Emission of the constant pool may be blocked in some code sequences. int const_pool_blocked_nesting_; // Block emission if this is not zero. @@ -1298,7 +1311,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // The bound position, before this we cannot do instruction elimination. int last_bound_pos_; - inline void CheckBuffer(); + V8_INLINE void CheckBuffer(); void GrowBuffer(); // Instruction generation diff --git a/deps/v8/src/codegen/arm/cpu-arm.cc b/deps/v8/src/codegen/arm/cpu-arm.cc index 47fe4bdb7404ce..88491c5e51cdaa 100644 --- a/deps/v8/src/codegen/arm/cpu-arm.cc +++ b/deps/v8/src/codegen/arm/cpu-arm.cc @@ -6,7 +6,7 @@ #ifdef __arm__ #ifdef __QNXNTO__ #include // for cache flushing. -#undef MAP_TYPE // NOLINT +#undef MAP_TYPE #elif V8_OS_FREEBSD #include // for cache flushing #include diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h new file mode 100644 index 00000000000000..296f72d1578c8f --- /dev/null +++ b/deps/v8/src/codegen/arm/interface-descriptors-arm-inl.h @@ -0,0 +1,256 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_ARM_INTERFACE_DESCRIPTORS_ARM_INL_H_ +#define V8_CODEGEN_ARM_INTERFACE_DESCRIPTORS_ARM_INL_H_ + +#if V8_TARGET_ARCH_ARM + +#include "src/codegen/interface-descriptors.h" +#include "src/execution/frames.h" + +namespace v8 { +namespace internal { + +constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() { + auto registers = RegisterArray(r0, r1, r2, r3, r4); + STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams); + return registers; +} + +// static +constexpr auto RecordWriteDescriptor::registers() { + return RegisterArray(r0, r1, r2, r3, r4, kReturnRegister0); +} + +// static +constexpr auto DynamicCheckMapsDescriptor::registers() { + return RegisterArray(r0, r1, r2, r3, cp); +} + +// static +constexpr auto EphemeronKeyBarrierDescriptor::registers() { + return RegisterArray(r0, r1, r2, r3, r4, kReturnRegister0); +} + +// static +constexpr Register LoadDescriptor::ReceiverRegister() { return r1; } +// static +constexpr Register LoadDescriptor::NameRegister() { return r2; } +// static +constexpr Register LoadDescriptor::SlotRegister() { return r0; } + +// static +constexpr Register LoadWithVectorDescriptor::VectorRegister() { return r3; } + +// static +constexpr Register +LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() { + return r4; +} + +// static +constexpr Register StoreDescriptor::ReceiverRegister() { return r1; } +// static +constexpr Register StoreDescriptor::NameRegister() { return r2; } +// static +constexpr Register StoreDescriptor::ValueRegister() { return r0; } +// static +constexpr Register StoreDescriptor::SlotRegister() { return r4; } + +// static +constexpr Register StoreWithVectorDescriptor::VectorRegister() { return r3; } + +// static +constexpr Register StoreTransitionDescriptor::MapRegister() { return r5; } + +// static +constexpr Register ApiGetterDescriptor::HolderRegister() { return r0; } +// static +constexpr Register ApiGetterDescriptor::CallbackRegister() { return r3; } + +// static +constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; } +// static +constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return r3; } + +// static +constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { + return r3; +} +// static +constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return r4; } + +// static +// static +constexpr Register TypeConversionDescriptor::ArgumentRegister() { return r0; } + +// static +constexpr auto TypeofDescriptor::registers() { return RegisterArray(r3); } + +// static +constexpr auto CallTrampolineDescriptor::registers() { + // r0 : number of arguments + // r1 : the target to call + return RegisterArray(r1, r0); +} + +// static +constexpr auto CallVarargsDescriptor::registers() { + // r0 : number of arguments (on the stack, not including receiver) + // r1 : the target to call + // r4 : arguments list length (untagged) + // r2 : arguments list (FixedArray) + return RegisterArray(r1, r0, r4, r2); +} + +// static +constexpr auto CallForwardVarargsDescriptor::registers() { + // r0 : number of arguments + // r2 : start index (to support rest parameters) + // r1 : the target to call + return RegisterArray(r1, r0, r2); +} + +// static +constexpr auto CallFunctionTemplateDescriptor::registers() { + // r1 : function template info + // r2 : number of arguments (on the stack, not including receiver) + return RegisterArray(r1, r2); +} + +// static +constexpr auto CallWithSpreadDescriptor::registers() { + // r0 : number of arguments (on the stack, not including receiver) + // r1 : the target to call + // r2 : the object to spread + return RegisterArray(r1, r0, r2); +} + +// static +constexpr auto CallWithArrayLikeDescriptor::registers() { + // r1 : the target to call + // r2 : the arguments list + return RegisterArray(r1, r2); +} + +// static +constexpr auto ConstructVarargsDescriptor::registers() { + // r0 : number of arguments (on the stack, not including receiver) + // r1 : the target to call + // r3 : the new target + // r4 : arguments list length (untagged) + // r2 : arguments list (FixedArray) + return RegisterArray(r1, r3, r0, r4, r2); +} + +// static +constexpr auto ConstructForwardVarargsDescriptor::registers() { + // r0 : number of arguments + // r3 : the new target + // r2 : start index (to support rest parameters) + // r1 : the target to call + return RegisterArray(r1, r3, r0, r2); +} + +// static +constexpr auto ConstructWithSpreadDescriptor::registers() { + // r0 : number of arguments (on the stack, not including receiver) + // r1 : the target to call + // r3 : the new target + // r2 : the object to spread + return RegisterArray(r1, r3, r0, r2); +} + +// static +constexpr auto ConstructWithArrayLikeDescriptor::registers() { + // r1 : the target to call + // r3 : the new target + // r2 : the arguments list + return RegisterArray(r1, r3, r2); +} + +// static +constexpr auto ConstructStubDescriptor::registers() { + // r0 : number of arguments + // r1 : the target to call + // r3 : the new target + // r2 : allocation site or undefined + return RegisterArray(r1, r3, r0, r2); +} + +// static +constexpr auto AbortDescriptor::registers() { return RegisterArray(r1); } + +// static +constexpr auto CompareDescriptor::registers() { return RegisterArray(r1, r0); } + +// static +constexpr auto Compare_BaselineDescriptor::registers() { + // r1: left operand + // r0: right operand + // r2: feedback slot + return RegisterArray(r1, r0, r2); +} + +// static +constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(r1, r0); } + +// static +constexpr auto BinaryOp_BaselineDescriptor::registers() { + // r1: left operand + // r0: right operand + // r2: feedback slot + return RegisterArray(r1, r0, r2); +} + +// static +constexpr auto ApiCallbackDescriptor::registers() { + return RegisterArray(r1, // kApiFunctionAddress + r2, // kArgc + r3, // kCallData + r0); // kHolder +} + +// static +constexpr auto InterpreterDispatchDescriptor::registers() { + return RegisterArray( + kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister, + kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister); +} + +// static +constexpr auto InterpreterPushArgsThenCallDescriptor::registers() { + return RegisterArray(r0, // argument count (not including receiver) + r2, // address of first argument + r1); // the target callable to be call +} + +// static +constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() { + return RegisterArray( + r0, // argument count (not including receiver) + r4, // address of the first argument + r1, // constructor to call + r3, // new target + r2); // allocation site feedback if available, undefined otherwise +} + +// static +constexpr auto ResumeGeneratorDescriptor::registers() { + return RegisterArray(r0, // the value to pass to the generator + r1); // the JSGeneratorObject to resume +} + +// static +constexpr auto RunMicrotasksEntryDescriptor::registers() { + return RegisterArray(r0, r1); +} + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_ARM + +#endif // V8_CODEGEN_ARM_INTERFACE_DESCRIPTORS_ARM_INL_H_ diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc deleted file mode 100644 index 53992227ab37ba..00000000000000 --- a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc +++ /dev/null @@ -1,306 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_ARM - -#include "src/codegen/interface-descriptors.h" - -#include "src/execution/frames.h" - -namespace v8 { -namespace internal { - -const Register CallInterfaceDescriptor::ContextRegister() { return cp; } - -void CallInterfaceDescriptor::DefaultInitializePlatformSpecific( - CallInterfaceDescriptorData* data, int register_parameter_count) { - const Register default_stub_registers[] = {r0, r1, r2, r3, r4}; - CHECK_LE(static_cast(register_parameter_count), - arraysize(default_stub_registers)); - data->InitializePlatformSpecific(register_parameter_count, - default_stub_registers); -} - -void RecordWriteDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - const Register default_stub_registers[] = {r0, r1, r2, r3, r4}; - - data->RestrictAllocatableRegisters(default_stub_registers, - arraysize(default_stub_registers)); - - CHECK_LE(static_cast(kParameterCount), - arraysize(default_stub_registers)); - data->InitializePlatformSpecific(kParameterCount, default_stub_registers); -} - -void DynamicCheckMapsDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register default_stub_registers[] = {r0, r1, r2, r3, cp}; - - data->RestrictAllocatableRegisters(default_stub_registers, - arraysize(default_stub_registers)); - - CHECK_LE(static_cast(kParameterCount), - arraysize(default_stub_registers)); - data->InitializePlatformSpecific(kParameterCount, default_stub_registers); -} - -void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - const Register default_stub_registers[] = {r0, r1, r2, r3, r4}; - - data->RestrictAllocatableRegisters(default_stub_registers, - arraysize(default_stub_registers)); - - CHECK_LE(static_cast(kParameterCount), - arraysize(default_stub_registers)); - data->InitializePlatformSpecific(kParameterCount, default_stub_registers); -} - -const Register LoadDescriptor::ReceiverRegister() { return r1; } -const Register LoadDescriptor::NameRegister() { return r2; } -const Register LoadDescriptor::SlotRegister() { return r0; } - -const Register LoadWithVectorDescriptor::VectorRegister() { return r3; } - -const Register -LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() { - return r4; -} - -const Register StoreDescriptor::ReceiverRegister() { return r1; } -const Register StoreDescriptor::NameRegister() { return r2; } -const Register StoreDescriptor::ValueRegister() { return r0; } -const Register StoreDescriptor::SlotRegister() { return r4; } - -const Register StoreWithVectorDescriptor::VectorRegister() { return r3; } - -const Register StoreTransitionDescriptor::SlotRegister() { return r4; } -const Register StoreTransitionDescriptor::VectorRegister() { return r3; } -const Register StoreTransitionDescriptor::MapRegister() { return r5; } - -const Register ApiGetterDescriptor::HolderRegister() { return r0; } -const Register ApiGetterDescriptor::CallbackRegister() { return r3; } - -const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; } -const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; } - -const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { return r3; } -const Register BaselineLeaveFrameDescriptor::WeightRegister() { return r4; } - -// static -const Register TypeConversionDescriptor::ArgumentRegister() { return r0; } - -void TypeofDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {r3}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CallTrampolineDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r0 : number of arguments - // r1 : the target to call - Register registers[] = {r1, r0}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CallVarargsDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r0 : number of arguments (on the stack, not including receiver) - // r1 : the target to call - // r4 : arguments list length (untagged) - // r2 : arguments list (FixedArray) - Register registers[] = {r1, r0, r4, r2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CallForwardVarargsDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r0 : number of arguments - // r2 : start index (to support rest parameters) - // r1 : the target to call - Register registers[] = {r1, r0, r2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CallFunctionTemplateDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r1 : function template info - // r2 : number of arguments (on the stack, not including receiver) - Register registers[] = {r1, r2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CallWithSpreadDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r0 : number of arguments (on the stack, not including receiver) - // r1 : the target to call - // r2 : the object to spread - Register registers[] = {r1, r0, r2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CallWithArrayLikeDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r1 : the target to call - // r2 : the arguments list - Register registers[] = {r1, r2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ConstructVarargsDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r0 : number of arguments (on the stack, not including receiver) - // r1 : the target to call - // r3 : the new target - // r4 : arguments list length (untagged) - // r2 : arguments list (FixedArray) - Register registers[] = {r1, r3, r0, r4, r2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r0 : number of arguments - // r3 : the new target - // r2 : start index (to support rest parameters) - // r1 : the target to call - Register registers[] = {r1, r3, r0, r2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ConstructWithSpreadDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r0 : number of arguments (on the stack, not including receiver) - // r1 : the target to call - // r3 : the new target - // r2 : the object to spread - Register registers[] = {r1, r3, r0, r2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r1 : the target to call - // r3 : the new target - // r2 : the arguments list - Register registers[] = {r1, r3, r2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ConstructStubDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r0 : number of arguments - // r1 : the target to call - // r3 : the new target - // r2 : allocation site or undefined - Register registers[] = {r1, r3, r0, r2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void AbortDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {r1}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CompareDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {r1, r0}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void Compare_BaselineDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r1: left operand - // r0: right operand - // r2: feedback slot - Register registers[] = {r1, r0, r2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void BinaryOpDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {r1, r0}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void BinaryOp_BaselineDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // r1: left operand - // r0: right operand - // r2: feedback slot - Register registers[] = {r1, r0, r2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ApiCallbackDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = { - r1, // kApiFunctionAddress - r2, // kArgc - r3, // kCallData - r0, // kHolder - }; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void InterpreterDispatchDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = { - kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister, - kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = { - r0, // argument count (not including receiver) - r2, // address of first argument - r1 // the target callable to be call - }; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = { - r0, // argument count (not including receiver) - r4, // address of the first argument - r1, // constructor to call - r3, // new target - r2, // allocation site feedback if available, undefined otherwise - }; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ResumeGeneratorDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = { - r0, // the value to pass to the generator - r1 // the JSGeneratorObject to resume - }; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void FrameDropperTrampolineDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = { - r1, // loaded new FP - }; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void RunMicrotasksEntryDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {r0, r1}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index f83eee4a919df3..d4e12f3092da1e 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -13,6 +13,7 @@ #include "src/codegen/callable.h" #include "src/codegen/code-factory.h" #include "src/codegen/external-reference-table.h" +#include "src/codegen/interface-descriptors-inl.h" #include "src/codegen/macro-assembler.h" #include "src/codegen/register-configuration.h" #include "src/debug/debug.h" @@ -59,7 +60,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, bytes += NumRegs(list) * kPointerSize; - if (fp_mode == kSaveFPRegs) { + if (fp_mode == SaveFPRegsMode::kSave) { bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes; } @@ -85,7 +86,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, bytes += NumRegs(list) * kPointerSize; - if (fp_mode == kSaveFPRegs) { + if (fp_mode == SaveFPRegsMode::kSave) { SaveFPRegs(sp, lr); bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes; } @@ -96,7 +97,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1, Register exclusion2, Register exclusion3) { int bytes = 0; - if (fp_mode == kSaveFPRegs) { + if (fp_mode == SaveFPRegsMode::kSave) { RestoreFPRegs(sp, lr); bytes += DwVfpRegister::kNumRegisters * DwVfpRegister::kSizeInBytes; } @@ -660,7 +661,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Label done; // Skip barrier if writing a smi. - if (smi_check == INLINE_SMI_CHECK) { + if (smi_check == SmiCheck::kInline) { JumpIfSmi(value, &done); } @@ -668,7 +669,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, // of the object, so so offset must be a multiple of kPointerSize. DCHECK(IsAligned(offset, kPointerSize)); - if (emit_debug_code()) { + if (FLAG_debug_code) { Label ok; UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -680,7 +681,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status, - save_fp, remembered_set_action, OMIT_SMI_CHECK); + save_fp, remembered_set_action, SmiCheck::kOmit); bind(&done); } @@ -826,7 +827,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, RememberedSetAction remembered_set_action, SmiCheck smi_check) { DCHECK_NE(object, value); - if (emit_debug_code()) { + if (FLAG_debug_code) { { UseScratchRegisterScope temps(this); Register scratch = temps.Acquire(); @@ -837,7 +838,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite); } - if ((remembered_set_action == OMIT_REMEMBERED_SET && + if ((remembered_set_action == RememberedSetAction::kOmit && !FLAG_incremental_marking) || FLAG_disable_write_barriers) { return; @@ -847,7 +848,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, // catch stores of smis and stores into the young generation. Label done; - if (smi_check == INLINE_SMI_CHECK) { + if (smi_check == SmiCheck::kInline) { JumpIfSmi(value, &done); } @@ -1435,7 +1436,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space, PushCommonFrame(scratch); // Reserve room for saved entry sp. sub(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp)); - if (emit_debug_code()) { + if (FLAG_debug_code) { mov(scratch, Operand::Zero()); str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); } @@ -1622,7 +1623,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch, void MacroAssembler::InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, - Label* done, InvokeFlag flag) { + Label* done, InvokeType type) { Label regular_invoke; // r0: actual arguments count // r1: function (passed through to callee) @@ -1722,9 +1723,9 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, - InvokeFlag flag) { + InvokeType type) { // You can't call a function without a valid frame. - DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); + DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); DCHECK_EQ(function, r1); DCHECK_IMPLIES(new_target.is_valid(), new_target == r3); @@ -1746,17 +1747,19 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, } Label done; - InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag); + InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type); // We call indirectly through the code field in the function to // allow recompilation to take effect without changing any of the // call sites. Register code = kJavaScriptCallCodeStartRegister; ldr(code, FieldMemOperand(function, JSFunction::kCodeOffset)); - if (flag == CALL_FUNCTION) { - CallCodeObject(code); - } else { - DCHECK(flag == JUMP_FUNCTION); - JumpCodeObject(code); + switch (type) { + case InvokeType::kCall: + CallCodeObject(code); + break; + case InvokeType::kJump: + JumpCodeObject(code); + break; } b(&done); @@ -1773,9 +1776,9 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, void MacroAssembler::InvokeFunctionWithNewTarget( Register fun, Register new_target, Register actual_parameter_count, - InvokeFlag flag) { + InvokeType type) { // You can't call a function without a valid frame. - DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); + DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); // Contract with called JS functions requires that function is passed in r1. DCHECK_EQ(fun, r1); @@ -1790,15 +1793,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget( SharedFunctionInfo::kFormalParameterCountOffset)); InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count, - flag); + type); } void MacroAssembler::InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, - InvokeFlag flag) { + InvokeType type) { // You can't call a function without a valid frame. - DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); + DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); // Contract with called JS functions requires that function is passed in r1. DCHECK_EQ(function, r1); @@ -1807,18 +1810,7 @@ void MacroAssembler::InvokeFunction(Register function, ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); InvokeFunctionCode(r1, no_reg, expected_parameter_count, - actual_parameter_count, flag); -} - -void MacroAssembler::MaybeDropFrames() { - // Check whether we need to drop frames to restart a function on the stack. - ExternalReference restart_fp = - ExternalReference::debug_restart_fp_address(isolate()); - Move(r1, restart_fp); - ldr(r1, MemOperand(r1)); - tst(r1, r1); - Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET, - ne); + actual_parameter_count, type); } void MacroAssembler::PushStackHandler() { @@ -1993,8 +1985,8 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, DCHECK_EQ(builtin.address() & 1, 1); #endif Move(r1, builtin); - Handle code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, - kArgvOnStack, builtin_exit_frame); + Handle code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, + ArgvMode::kStack, builtin_exit_frame); Jump(code, RelocInfo::CODE_TARGET); } @@ -2034,11 +2026,11 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, } void TurboAssembler::Assert(Condition cond, AbortReason reason) { - if (emit_debug_code()) Check(cond, reason); + if (FLAG_debug_code) Check(cond, reason); } void TurboAssembler::AssertUnreachable(AbortReason reason) { - if (emit_debug_code()) Abort(reason); + if (FLAG_debug_code) Abort(reason); } void TurboAssembler::Check(Condition cond, AbortReason reason) { @@ -2052,11 +2044,11 @@ void TurboAssembler::Check(Condition cond, AbortReason reason) { void TurboAssembler::Abort(AbortReason reason) { Label abort_start; bind(&abort_start); -#ifdef DEBUG - const char* msg = GetAbortReason(reason); - RecordComment("Abort message: "); - RecordComment(msg); -#endif + if (FLAG_code_comments) { + const char* msg = GetAbortReason(reason); + RecordComment("Abort message: "); + RecordComment(msg); + } // Avoid emitting call to builtin if requested. if (trap_on_abort()) { @@ -2143,7 +2135,7 @@ void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) { } void MacroAssembler::AssertNotSmi(Register object) { - if (emit_debug_code()) { + if (FLAG_debug_code) { STATIC_ASSERT(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); Check(ne, AbortReason::kOperandIsASmi); @@ -2151,7 +2143,7 @@ void MacroAssembler::AssertNotSmi(Register object) { } void MacroAssembler::AssertSmi(Register object) { - if (emit_debug_code()) { + if (FLAG_debug_code) { STATIC_ASSERT(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); Check(eq, AbortReason::kOperandIsNotASmi); @@ -2159,7 +2151,7 @@ void MacroAssembler::AssertSmi(Register object) { } void MacroAssembler::AssertConstructor(Register object) { - if (emit_debug_code()) { + if (FLAG_debug_code) { STATIC_ASSERT(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor); @@ -2173,7 +2165,7 @@ void MacroAssembler::AssertConstructor(Register object) { } void MacroAssembler::AssertFunction(Register object) { - if (emit_debug_code()) { + if (FLAG_debug_code) { STATIC_ASSERT(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); Check(ne, AbortReason::kOperandIsASmiAndNotAFunction); @@ -2187,7 +2179,7 @@ void MacroAssembler::AssertFunction(Register object) { } void MacroAssembler::AssertBoundFunction(Register object) { - if (emit_debug_code()) { + if (FLAG_debug_code) { STATIC_ASSERT(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction); @@ -2199,7 +2191,7 @@ void MacroAssembler::AssertBoundFunction(Register object) { } void MacroAssembler::AssertGeneratorObject(Register object) { - if (!emit_debug_code()) return; + if (!FLAG_debug_code) return; tst(object, Operand(kSmiTagMask)); Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject); @@ -2229,7 +2221,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) { void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, Register scratch) { - if (emit_debug_code()) { + if (FLAG_debug_code) { Label done_checking; AssertNotSmi(object); CompareRoot(object, RootIndex::kUndefinedValue); @@ -2520,7 +2512,7 @@ void TurboAssembler::CallCFunctionHelper(Register function, // running in the simulator. The simulator has its own alignment check which // provides more information. #if V8_HOST_ARCH_ARM - if (emit_debug_code()) { + if (FLAG_debug_code) { int frame_alignment = base::OS::ActivationFrameAlignment(); int frame_alignment_mask = frame_alignment - 1; if (frame_alignment > kPointerSize) { diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h index e622d4aa172fb1..3a54f6c45fc16e 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.h +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h @@ -29,8 +29,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) { return MemOperand(object, offset - kHeapObjectTag); } -enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; -enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg, @@ -656,16 +654,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void RecordWriteField( Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + RememberedSetAction remembered_set_action = RememberedSetAction::kEmit, + SmiCheck smi_check = SmiCheck::kInline); // For a given |object| notify the garbage collector that the slot at |offset| // has been written. |value| is the object being stored. void RecordWrite( Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + RememberedSetAction remembered_set_action = RememberedSetAction::kEmit, + SmiCheck smi_check = SmiCheck::kInline); // Enter exit frame. // stack_space - extra stack space, used for alignment before call to C. @@ -689,7 +687,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Invoke the JavaScript function code by either calling or jumping. void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, - Register actual_parameter_count, InvokeFlag flag); + Register actual_parameter_count, InvokeType type); // On function call, call into the debugger. void CallDebugOnFunctionCall(Register fun, Register new_target, @@ -700,13 +698,10 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // current context to the context in the function before invoking. void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, - InvokeFlag flag); + InvokeType type); void InvokeFunction(Register function, Register expected_parameter_count, - Register actual_parameter_count, InvokeFlag flag); - - // Frame restart support - void MaybeDropFrames(); + Register actual_parameter_count, InvokeType type); // Exception handling @@ -784,18 +779,18 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Call a runtime routine. void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); // Convenience function: Same as above, but takes the fid instead. void CallRuntime(Runtime::FunctionId fid, - SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { const Runtime::Function* function = Runtime::FunctionForId(fid); CallRuntime(function, function->nargs, save_doubles); } // Convenience function: Same as above, but takes the fid instead. void CallRuntime(Runtime::FunctionId fid, int num_arguments, - SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); } @@ -874,7 +869,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // Helper functions for generating invokes. void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, Label* done, - InvokeFlag flag); + InvokeType type); DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler); }; diff --git a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h index ee64dbe1f26fc7..2668502f816abe 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64-inl.h @@ -1072,12 +1072,12 @@ const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const { inline void Assembler::CheckBufferSpace() { DCHECK_LT(pc_, buffer_start_ + buffer_->size()); - if (buffer_space() < kGap) { + if (V8_UNLIKELY(buffer_space() < kGap)) { GrowBuffer(); } } -inline void Assembler::CheckBuffer() { +V8_INLINE void Assembler::CheckBuffer() { CheckBufferSpace(); if (pc_offset() >= next_veneer_pool_check_) { CheckVeneerPool(false, true); @@ -1085,6 +1085,10 @@ inline void Assembler::CheckBuffer() { constpool_.MaybeCheck(); } +EnsureSpace::EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) { + assembler->CheckBufferSpace(); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h index aa2ffb26cdfc24..9d8b135954b4f7 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64.h @@ -2634,7 +2634,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { } void GrowBuffer(); - void CheckBufferSpace(); + V8_INLINE void CheckBufferSpace(); void CheckBuffer(); // Emission of the veneer pools may be blocked in some code sequences. @@ -2786,9 +2786,7 @@ class PatchingAssembler : public Assembler { class EnsureSpace { public: - explicit EnsureSpace(Assembler* assembler) : block_pools_scope_(assembler) { - assembler->CheckBufferSpace(); - } + explicit V8_INLINE EnsureSpace(Assembler* assembler); private: Assembler::BlockPoolsScope block_pools_scope_; diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc index d7bd4834b0ea0a..4baf2e07ec4491 100644 --- a/deps/v8/src/codegen/arm64/cpu-arm64.cc +++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc @@ -23,7 +23,7 @@ class CacheLineSizes { cache_type_register_ = 0; #else // Copy the content of the cache type register to a core register. - __asm__ __volatile__("mrs %x[ctr], ctr_el0" // NOLINT + __asm__ __volatile__("mrs %x[ctr], ctr_el0" : [ctr] "=r"(cache_type_register_)); #endif } @@ -64,9 +64,8 @@ void CpuFeatures::FlushICache(void* address, size_t length) { uintptr_t istart = start & ~(isize - 1); uintptr_t end = start + length; - __asm__ __volatile__( // NOLINT - // Clean every line of the D cache containing the - // target data. + __asm__ __volatile__( + // Clean every line of the D cache containing the target data. "0: \n\t" // dc : Data Cache maintenance // c : Clean @@ -111,7 +110,7 @@ void CpuFeatures::FlushICache(void* address, size_t length) { : [dsize] "r"(dsize), [isize] "r"(isize), [end] "r"(end) // This code does not write to memory but without the dependency gcc might // move this code before the code is generated. - : "cc", "memory"); // NOLINT + : "cc", "memory"); #endif // V8_OS_WIN #endif // V8_HOST_ARCH_ARM64 } diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h new file mode 100644 index 00000000000000..90123dbdcb151e --- /dev/null +++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64-inl.h @@ -0,0 +1,265 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CODEGEN_ARM64_INTERFACE_DESCRIPTORS_ARM64_INL_H_ +#define V8_CODEGEN_ARM64_INTERFACE_DESCRIPTORS_ARM64_INL_H_ + +#if V8_TARGET_ARCH_ARM64 + +#include "src/base/template-utils.h" +#include "src/codegen/interface-descriptors.h" +#include "src/execution/frames.h" + +namespace v8 { +namespace internal { + +constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() { + auto registers = RegisterArray(x0, x1, x2, x3, x4); + STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams); + return registers; +} + +// static +constexpr auto RecordWriteDescriptor::registers() { + return RegisterArray(x0, x1, x2, x3, x4, kReturnRegister0); +} + +// static +constexpr auto DynamicCheckMapsDescriptor::registers() { + return RegisterArray(x0, x1, x2, x3, cp); +} + +// static +constexpr auto EphemeronKeyBarrierDescriptor::registers() { + return RegisterArray(x0, x1, x2, x3, x4, kReturnRegister0); +} + +// static +constexpr Register LoadDescriptor::ReceiverRegister() { return x1; } +// static +constexpr Register LoadDescriptor::NameRegister() { return x2; } +// static +constexpr Register LoadDescriptor::SlotRegister() { return x0; } + +// static +constexpr Register LoadWithVectorDescriptor::VectorRegister() { return x3; } + +// static +constexpr Register +LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() { + return x4; +} + +// static +constexpr Register StoreDescriptor::ReceiverRegister() { return x1; } +// static +constexpr Register StoreDescriptor::NameRegister() { return x2; } +// static +constexpr Register StoreDescriptor::ValueRegister() { return x0; } +// static +constexpr Register StoreDescriptor::SlotRegister() { return x4; } + +// static +constexpr Register StoreWithVectorDescriptor::VectorRegister() { return x3; } + +// static +constexpr Register StoreTransitionDescriptor::MapRegister() { return x5; } + +// static +constexpr Register ApiGetterDescriptor::HolderRegister() { return x0; } +// static +constexpr Register ApiGetterDescriptor::CallbackRegister() { return x3; } + +// static +constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; } +// static +constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return x3; } + +// static +constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { + return x3; +} +// static +constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return x4; } + +// static +// static +constexpr Register TypeConversionDescriptor::ArgumentRegister() { return x0; } + +// static +constexpr auto TypeofDescriptor::registers() { return RegisterArray(x3); } + +// static +constexpr auto CallTrampolineDescriptor::registers() { + // x1: target + // x0: number of arguments + return RegisterArray(x1, x0); +} + +// static +constexpr auto CallVarargsDescriptor::registers() { + // x0 : number of arguments (on the stack, not including receiver) + // x1 : the target to call + // x4 : arguments list length (untagged) + // x2 : arguments list (FixedArray) + return RegisterArray(x1, x0, x4, x2); +} + +// static +constexpr auto CallForwardVarargsDescriptor::registers() { + // x1: target + // x0: number of arguments + // x2: start index (to supported rest parameters) + return RegisterArray(x1, x0, x2); +} + +// static +constexpr auto CallFunctionTemplateDescriptor::registers() { + // x1 : function template info + // x2 : number of arguments (on the stack, not including receiver) + return RegisterArray(x1, x2); +} + +// static +constexpr auto CallWithSpreadDescriptor::registers() { + // x0 : number of arguments (on the stack, not including receiver) + // x1 : the target to call + // x2 : the object to spread + return RegisterArray(x1, x0, x2); +} + +// static +constexpr auto CallWithArrayLikeDescriptor::registers() { + // x1 : the target to call + // x2 : the arguments list + return RegisterArray(x1, x2); +} + +// static +constexpr auto ConstructVarargsDescriptor::registers() { + // x0 : number of arguments (on the stack, not including receiver) + // x1 : the target to call + // x3 : the new target + // x4 : arguments list length (untagged) + // x2 : arguments list (FixedArray) + return RegisterArray(x1, x3, x0, x4, x2); +} + +// static +constexpr auto ConstructForwardVarargsDescriptor::registers() { + // x3: new target + // x1: target + // x0: number of arguments + // x2: start index (to supported rest parameters) + return RegisterArray(x1, x3, x0, x2); +} + +// static +constexpr auto ConstructWithSpreadDescriptor::registers() { + // x0 : number of arguments (on the stack, not including receiver) + // x1 : the target to call + // x3 : the new target + // x2 : the object to spread + return RegisterArray(x1, x3, x0, x2); +} + +// static +constexpr auto ConstructWithArrayLikeDescriptor::registers() { + // x1 : the target to call + // x3 : the new target + // x2 : the arguments list + return RegisterArray(x1, x3, x2); +} + +// static +constexpr auto ConstructStubDescriptor::registers() { + // x3: new target + // x1: target + // x0: number of arguments + // x2: allocation site or undefined + return RegisterArray(x1, x3, x0, x2); +} + +// static +constexpr auto AbortDescriptor::registers() { return RegisterArray(x1); } + +// static +constexpr auto CompareDescriptor::registers() { + // x1: left operand + // x0: right operand + return RegisterArray(x1, x0); +} + +// static +constexpr auto Compare_BaselineDescriptor::registers() { + // x1: left operand + // x0: right operand + // x2: feedback slot + return RegisterArray(x1, x0, x2); +} + +// static +constexpr auto BinaryOpDescriptor::registers() { + // x1: left operand + // x0: right operand + return RegisterArray(x1, x0); +} + +// static +constexpr auto BinaryOp_BaselineDescriptor::registers() { + // x1: left operand + // x0: right operand + // x2: feedback slot + return RegisterArray(x1, x0, x2); +} + +// static +constexpr auto ApiCallbackDescriptor::registers() { + return RegisterArray(x1, // kApiFunctionAddress + x2, // kArgc + x3, // kCallData + x0); // kHolder +} + +// static +constexpr auto InterpreterDispatchDescriptor::registers() { + return RegisterArray( + kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister, + kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister); +} + +// static +constexpr auto InterpreterPushArgsThenCallDescriptor::registers() { + return RegisterArray(x0, // argument count (not including receiver) + x2, // address of first argument + x1); // the target callable to be call +} + +// static +constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() { + return RegisterArray( + x0, // argument count (not including receiver) + x4, // address of the first argument + x1, // constructor to call + x3, // new target + x2); // allocation site feedback if available, undefined otherwise +} + +// static +constexpr auto ResumeGeneratorDescriptor::registers() { + return RegisterArray(x0, // the value to pass to the generator + x1); // the JSGeneratorObject to resume +} + +// static +constexpr auto RunMicrotasksEntryDescriptor::registers() { + return RegisterArray(x0, x1); +} + +} // namespace internal +} // namespace v8 + +#endif // V8_TARGET_ARCH_ARM64 + +#endif // V8_CODEGEN_ARM64_INTERFACE_DESCRIPTORS_ARM64_INL_H_ diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc deleted file mode 100644 index 246d6fc9610386..00000000000000 --- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc +++ /dev/null @@ -1,310 +0,0 @@ -// Copyright 2012 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#if V8_TARGET_ARCH_ARM64 - -#include "src/codegen/interface-descriptors.h" - -#include "src/execution/frames.h" - -namespace v8 { -namespace internal { - -const Register CallInterfaceDescriptor::ContextRegister() { return cp; } - -void CallInterfaceDescriptor::DefaultInitializePlatformSpecific( - CallInterfaceDescriptorData* data, int register_parameter_count) { - const Register default_stub_registers[] = {x0, x1, x2, x3, x4}; - CHECK_LE(static_cast(register_parameter_count), - arraysize(default_stub_registers)); - data->InitializePlatformSpecific(register_parameter_count, - default_stub_registers); -} - -void RecordWriteDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - const Register default_stub_registers[] = {x0, x1, x2, x3, x4}; - - data->RestrictAllocatableRegisters(default_stub_registers, - arraysize(default_stub_registers)); - - CHECK_LE(static_cast(kParameterCount), - arraysize(default_stub_registers)); - data->InitializePlatformSpecific(kParameterCount, default_stub_registers); -} - -void DynamicCheckMapsDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register default_stub_registers[] = {x0, x1, x2, x3, cp}; - - data->RestrictAllocatableRegisters(default_stub_registers, - arraysize(default_stub_registers)); - - CHECK_LE(static_cast(kParameterCount), - arraysize(default_stub_registers)); - data->InitializePlatformSpecific(kParameterCount, default_stub_registers); -} - -void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - const Register default_stub_registers[] = {x0, x1, x2, x3, x4}; - - data->RestrictAllocatableRegisters(default_stub_registers, - arraysize(default_stub_registers)); - - CHECK_LE(static_cast(kParameterCount), - arraysize(default_stub_registers)); - data->InitializePlatformSpecific(kParameterCount, default_stub_registers); -} - -const Register LoadDescriptor::ReceiverRegister() { return x1; } -const Register LoadDescriptor::NameRegister() { return x2; } -const Register LoadDescriptor::SlotRegister() { return x0; } - -const Register LoadWithVectorDescriptor::VectorRegister() { return x3; } - -const Register -LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() { - return x4; -} - -const Register StoreDescriptor::ReceiverRegister() { return x1; } -const Register StoreDescriptor::NameRegister() { return x2; } -const Register StoreDescriptor::ValueRegister() { return x0; } -const Register StoreDescriptor::SlotRegister() { return x4; } - -const Register StoreWithVectorDescriptor::VectorRegister() { return x3; } - -const Register StoreTransitionDescriptor::SlotRegister() { return x4; } -const Register StoreTransitionDescriptor::VectorRegister() { return x3; } -const Register StoreTransitionDescriptor::MapRegister() { return x5; } - -const Register ApiGetterDescriptor::HolderRegister() { return x0; } -const Register ApiGetterDescriptor::CallbackRegister() { return x3; } - -const Register GrowArrayElementsDescriptor::ObjectRegister() { return x0; } -const Register GrowArrayElementsDescriptor::KeyRegister() { return x3; } - -const Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() { return x3; } -const Register BaselineLeaveFrameDescriptor::WeightRegister() { return x4; } - -// static -const Register TypeConversionDescriptor::ArgumentRegister() { return x0; } - -void TypeofDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {x3}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CallTrampolineDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x1: target - // x0: number of arguments - Register registers[] = {x1, x0}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CallVarargsDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x0 : number of arguments (on the stack, not including receiver) - // x1 : the target to call - // x4 : arguments list length (untagged) - // x2 : arguments list (FixedArray) - Register registers[] = {x1, x0, x4, x2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CallForwardVarargsDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x1: target - // x0: number of arguments - // x2: start index (to supported rest parameters) - Register registers[] = {x1, x0, x2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CallFunctionTemplateDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x1 : function template info - // x2 : number of arguments (on the stack, not including receiver) - Register registers[] = {x1, x2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CallWithSpreadDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x0 : number of arguments (on the stack, not including receiver) - // x1 : the target to call - // x2 : the object to spread - Register registers[] = {x1, x0, x2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CallWithArrayLikeDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x1 : the target to call - // x2 : the arguments list - Register registers[] = {x1, x2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ConstructVarargsDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x0 : number of arguments (on the stack, not including receiver) - // x1 : the target to call - // x3 : the new target - // x4 : arguments list length (untagged) - // x2 : arguments list (FixedArray) - Register registers[] = {x1, x3, x0, x4, x2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ConstructForwardVarargsDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x3: new target - // x1: target - // x0: number of arguments - // x2: start index (to supported rest parameters) - Register registers[] = {x1, x3, x0, x2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ConstructWithSpreadDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x0 : number of arguments (on the stack, not including receiver) - // x1 : the target to call - // x3 : the new target - // x2 : the object to spread - Register registers[] = {x1, x3, x0, x2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ConstructWithArrayLikeDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x1 : the target to call - // x3 : the new target - // x2 : the arguments list - Register registers[] = {x1, x3, x2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ConstructStubDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x3: new target - // x1: target - // x0: number of arguments - // x2: allocation site or undefined - Register registers[] = {x1, x3, x0, x2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void AbortDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {x1}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void CompareDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x1: left operand - // x0: right operand - Register registers[] = {x1, x0}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void Compare_BaselineDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x1: left operand - // x0: right operand - // x2: feedback slot - Register registers[] = {x1, x0, x2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void BinaryOpDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x1: left operand - // x0: right operand - Register registers[] = {x1, x0}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void BinaryOp_BaselineDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - // x1: left operand - // x0: right operand - // x2: feedback slot - Register registers[] = {x1, x0, x2}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ApiCallbackDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = { - x1, // kApiFunctionAddress - x2, // kArgc - x3, // kCallData - x0, // kHolder - }; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void InterpreterDispatchDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = { - kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister, - kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void InterpreterPushArgsThenCallDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = { - x0, // argument count (not including receiver) - x2, // address of first argument - x1 // the target callable to be call - }; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void InterpreterPushArgsThenConstructDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = { - x0, // argument count (not including receiver) - x4, // address of the first argument - x1, // constructor to call - x3, // new target - x2, // allocation site feedback if available, undefined otherwise - }; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void ResumeGeneratorDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = { - x0, // the value to pass to the generator - x1 // the JSGeneratorObject to resume - }; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void FrameDropperTrampolineDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = { - x1, // loaded new FP - }; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -void RunMicrotasksEntryDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - Register registers[] = {x0, x1}; - data->InitializePlatformSpecific(arraysize(registers), registers); -} - -} // namespace internal -} // namespace v8 - -#endif // V8_TARGET_ARCH_ARM64 diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h index 6a33f864ab7d73..8fb9bbfd7b7b7f 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h @@ -7,12 +7,12 @@ #include -#include "src/common/globals.h" - #include "src/base/bits.h" #include "src/codegen/arm64/assembler-arm64-inl.h" #include "src/codegen/arm64/assembler-arm64.h" #include "src/codegen/macro-assembler.h" +#include "src/common/globals.h" +#include "src/execution/isolate-data.h" namespace v8 { namespace internal { @@ -1037,7 +1037,7 @@ void TurboAssembler::InitializeRootRegister() { ExternalReference isolate_root = ExternalReference::isolate_root(isolate()); Mov(kRootRegister, Operand(isolate_root)); #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE - Mov(kPointerCageBaseRegister, Operand(isolate_root)); + LoadRootRelative(kPtrComprCageBaseRegister, IsolateData::cage_base_offset()); #endif } @@ -1200,7 +1200,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) { if (offset.IsImmediate()) { DCHECK_GE(offset.ImmediateValue(), 0); - } else if (emit_debug_code()) { + } else if (FLAG_debug_code) { Cmp(xzr, offset); Check(le, AbortReason::kStackAccessBelowStackPointer); } @@ -1212,7 +1212,7 @@ template void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) { if (offset.IsImmediate()) { DCHECK_GE(offset.ImmediateValue(), 0); - } else if (emit_debug_code()) { + } else if (FLAG_debug_code) { Cmp(xzr, offset); Check(le, AbortReason::kStackAccessBelowStackPointer); } diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index a3570b80354996..b18ff554553a5b 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -10,6 +10,7 @@ #include "src/codegen/callable.h" #include "src/codegen/code-factory.h" #include "src/codegen/external-reference-table.h" +#include "src/codegen/interface-descriptors-inl.h" #include "src/codegen/macro-assembler-inl.h" #include "src/codegen/register-configuration.h" #include "src/codegen/reloc-info.h" @@ -52,7 +53,7 @@ int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode, int bytes = list.Count() * kXRegSizeInBits / 8; - if (fp_mode == kSaveFPRegs) { + if (fp_mode == SaveFPRegsMode::kSave) { DCHECK_EQ(kCallerSavedV.Count() % 2, 0); bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8; } @@ -69,7 +70,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, int bytes = list.Count() * kXRegSizeInBits / 8; - if (fp_mode == kSaveFPRegs) { + if (fp_mode == SaveFPRegsMode::kSave) { DCHECK_EQ(kCallerSavedV.Count() % 2, 0); PushCPURegList(kCallerSavedV); bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8; @@ -79,7 +80,7 @@ int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion) { int bytes = 0; - if (fp_mode == kSaveFPRegs) { + if (fp_mode == SaveFPRegsMode::kSave) { DCHECK_EQ(kCallerSavedV.Count() % 2, 0); PopCPURegList(kCallerSavedV); bytes += kCallerSavedV.Count() * kDRegSizeInBits / 8; @@ -1266,7 +1267,7 @@ void MacroAssembler::PopCalleeSavedRegisters() { } void TurboAssembler::AssertSpAligned() { - if (emit_debug_code()) { + if (FLAG_debug_code) { HardAbortScope hard_abort(this); // Avoid calls to Abort. // Arm64 requires the stack pointer to be 16-byte aligned prior to address // calculation. @@ -1299,7 +1300,7 @@ void TurboAssembler::CopyDoubleWords(Register dst, Register src, Register count, CopyDoubleWordsMode mode) { DCHECK(!AreAliased(dst, src, count)); - if (emit_debug_code()) { + if (FLAG_debug_code) { Register pointer1 = dst; Register pointer2 = src; if (mode == kSrcLessThanDst) { @@ -1374,7 +1375,7 @@ void TurboAssembler::SlotAddress(Register dst, Register slot_offset) { } void TurboAssembler::AssertFPCRState(Register fpcr) { - if (emit_debug_code()) { + if (FLAG_debug_code) { Label unexpected_mode, done; UseScratchRegisterScope temps(this); if (fpcr.IsNone()) { @@ -1473,7 +1474,7 @@ void TurboAssembler::Swap(VRegister lhs, VRegister rhs) { } void TurboAssembler::AssertSmi(Register object, AbortReason reason) { - if (emit_debug_code()) { + if (FLAG_debug_code) { STATIC_ASSERT(kSmiTag == 0); Tst(object, kSmiTagMask); Check(eq, reason); @@ -1481,7 +1482,7 @@ void TurboAssembler::AssertSmi(Register object, AbortReason reason) { } void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) { - if (emit_debug_code()) { + if (FLAG_debug_code) { STATIC_ASSERT(kSmiTag == 0); Tst(object, kSmiTagMask); Check(ne, reason); @@ -1489,7 +1490,7 @@ void MacroAssembler::AssertNotSmi(Register object, AbortReason reason) { } void MacroAssembler::AssertConstructor(Register object) { - if (emit_debug_code()) { + if (FLAG_debug_code) { AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAConstructor); UseScratchRegisterScope temps(this); @@ -1504,7 +1505,7 @@ void MacroAssembler::AssertConstructor(Register object) { } void MacroAssembler::AssertFunction(Register object) { - if (emit_debug_code()) { + if (FLAG_debug_code) { AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAFunction); UseScratchRegisterScope temps(this); @@ -1517,7 +1518,7 @@ void MacroAssembler::AssertFunction(Register object) { } void MacroAssembler::AssertBoundFunction(Register object) { - if (emit_debug_code()) { + if (FLAG_debug_code) { AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotABoundFunction); UseScratchRegisterScope temps(this); @@ -1529,7 +1530,7 @@ void MacroAssembler::AssertBoundFunction(Register object) { } void MacroAssembler::AssertGeneratorObject(Register object) { - if (!emit_debug_code()) return; + if (!FLAG_debug_code) return; AssertNotSmi(object, AbortReason::kOperandIsASmiAndNotAGeneratorObject); // Load map @@ -1555,7 +1556,7 @@ void MacroAssembler::AssertGeneratorObject(Register object) { } void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) { - if (emit_debug_code()) { + if (FLAG_debug_code) { UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); Label done_checking; @@ -1569,7 +1570,7 @@ void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) { } void TurboAssembler::AssertPositiveOrZero(Register value) { - if (emit_debug_code()) { + if (FLAG_debug_code) { Label done; int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit; Tbz(value, sign_bit, &done); @@ -1599,8 +1600,8 @@ void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, bool builtin_exit_frame) { Mov(x1, builtin); - Handle code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs, - kArgvOnStack, builtin_exit_frame); + Handle code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore, + ArgvMode::kStack, builtin_exit_frame); Jump(code, RelocInfo::CODE_TARGET); } @@ -1942,7 +1943,7 @@ void TurboAssembler::CallBuiltin(int builtin_index) { Ldr(scratch, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); Call(scratch); } - if (FLAG_code_comments) RecordComment("]"); + RecordComment("]"); } void TurboAssembler::TailCallBuiltin(int builtin_index) { @@ -1971,7 +1972,7 @@ void TurboAssembler::TailCallBuiltin(int builtin_index) { Ldr(temp, Operand(entry, RelocInfo::OFF_HEAP_TARGET)); Jump(temp); } - if (FLAG_code_comments) RecordComment("]"); + RecordComment("]"); } void TurboAssembler::LoadCodeObjectEntry(Register destination, @@ -2059,7 +2060,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { #endif Poke(x17, 0); - if (emit_debug_code()) { + if (FLAG_debug_code) { // Verify that the slot below fp[kSPOffset]-8 points to the signed return // location. Ldr(x16, MemOperand(fp, ExitFrameConstants::kSPOffset)); @@ -2189,7 +2190,7 @@ void MacroAssembler::StackOverflowCheck(Register num_args, void MacroAssembler::InvokePrologue(Register formal_parameter_count, Register actual_argument_count, Label* done, - InvokeFlag flag) { + InvokeType type) { // x0: actual arguments count. // x1: function (passed through to callee). // x2: expected arguments count. @@ -2320,9 +2321,9 @@ void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target, void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, Register actual_parameter_count, - InvokeFlag flag) { + InvokeType type) { // You can't call a function without a valid frame. - DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame()); + DCHECK_IMPLIES(type == InvokeType::kCall, has_frame()); DCHECK_EQ(function, x1); DCHECK_IMPLIES(new_target.is_valid(), new_target == x3); @@ -2341,7 +2342,7 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, } Label done; - InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag); + InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type); // If actual != expected, InvokePrologue will have handled the call through // the argument adaptor mechanism. @@ -2352,11 +2353,13 @@ void MacroAssembler::InvokeFunctionCode(Register function, Register new_target, Register code = kJavaScriptCallCodeStartRegister; LoadTaggedPointerField(code, FieldMemOperand(function, JSFunction::kCodeOffset)); - if (flag == CALL_FUNCTION) { - CallCodeObject(code); - } else { - DCHECK(flag == JUMP_FUNCTION); - JumpCodeObject(code); + switch (type) { + case InvokeType::kCall: + CallCodeObject(code); + break; + case InvokeType::kJump: + JumpCodeObject(code); + break; } B(&done); @@ -2377,9 +2380,9 @@ Operand MacroAssembler::ReceiverOperand(Register arg_count) { void MacroAssembler::InvokeFunctionWithNewTarget( Register function, Register new_target, Register actual_parameter_count, - InvokeFlag flag) { + InvokeType type) { // You can't call a function without a valid frame. - DCHECK(flag == JUMP_FUNCTION || has_frame()); + DCHECK(type == InvokeType::kJump || has_frame()); // Contract with called JS functions requires that function is passed in x1. // (See FullCodeGenerator::Generate().) @@ -2400,15 +2403,15 @@ void MacroAssembler::InvokeFunctionWithNewTarget( SharedFunctionInfo::kFormalParameterCountOffset)); InvokeFunctionCode(function, new_target, expected_parameter_count, - actual_parameter_count, flag); + actual_parameter_count, type); } void MacroAssembler::InvokeFunction(Register function, Register expected_parameter_count, Register actual_parameter_count, - InvokeFlag flag) { + InvokeType type) { // You can't call a function without a valid frame. - DCHECK(flag == JUMP_FUNCTION || has_frame()); + DCHECK(type == InvokeType::kJump || has_frame()); // Contract with called JS functions requires that function is passed in x1. // (See FullCodeGenerator::Generate().) @@ -2419,7 +2422,7 @@ void MacroAssembler::InvokeFunction(Register function, FieldMemOperand(function, JSFunction::kContextOffset)); InvokeFunctionCode(function, no_reg, expected_parameter_count, - actual_parameter_count, flag); + actual_parameter_count, type); } void TurboAssembler::TryConvertDoubleToInt64(Register result, @@ -2664,7 +2667,7 @@ void MacroAssembler::LeaveExitFrame(bool restore_doubles, ExternalReference::Create(IsolateAddressId::kContextAddress, isolate())); Ldr(cp, MemOperand(scratch)); - if (emit_debug_code()) { + if (FLAG_debug_code) { // Also emit debug code to clear the cp in the top frame. Mov(scratch2, Operand(Context::kInvalidContext)); Mov(scratch, ExternalReference::Create(IsolateAddressId::kContextAddress, @@ -2715,15 +2718,6 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, IncrementCounter(counter, -value, scratch1, scratch2); } -void MacroAssembler::MaybeDropFrames() { - // Check whether we need to drop frames to restart a function on the stack. - Mov(x1, ExternalReference::debug_restart_fp_address(isolate())); - Ldr(x1, MemOperand(x1)); - Tst(x1, x1); - Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET, - ne); -} - void MacroAssembler::JumpIfObjectType(Register object, Register map, Register type_reg, InstanceType type, Label* if_cond_pass, Condition cond) { @@ -2860,14 +2854,14 @@ void TurboAssembler::DecompressTaggedPointer(const Register& destination, const MemOperand& field_operand) { RecordComment("[ DecompressTaggedPointer"); Ldr(destination.W(), field_operand); - Add(destination, kPointerCageBaseRegister, destination); + Add(destination, kPtrComprCageBaseRegister, destination); RecordComment("]"); } void TurboAssembler::DecompressTaggedPointer(const Register& destination, const Register& source) { RecordComment("[ DecompressTaggedPointer"); - Add(destination, kPointerCageBaseRegister, Operand(source, UXTW)); + Add(destination, kPtrComprCageBaseRegister, Operand(source, UXTW)); RecordComment("]"); } @@ -2875,7 +2869,7 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination, const MemOperand& field_operand) { RecordComment("[ DecompressAnyTagged"); Ldr(destination.W(), field_operand); - Add(destination, kPointerCageBaseRegister, destination); + Add(destination, kPtrComprCageBaseRegister, destination); RecordComment("]"); } @@ -2904,7 +2898,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, Label done; // Skip the barrier if writing a smi. - if (smi_check == INLINE_SMI_CHECK) { + if (smi_check == SmiCheck::kInline) { JumpIfSmi(value, &done); } @@ -2912,7 +2906,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, // of the object, so offset must be a multiple of kTaggedSize. DCHECK(IsAligned(offset, kTaggedSize)); - if (emit_debug_code()) { + if (FLAG_debug_code) { Label ok; UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); @@ -2924,7 +2918,7 @@ void MacroAssembler::RecordWriteField(Register object, int offset, } RecordWrite(object, Operand(offset - kHeapObjectTag), value, lr_status, - save_fp, remembered_set_action, OMIT_SMI_CHECK); + save_fp, remembered_set_action, SmiCheck::kOmit); Bind(&done); } @@ -3069,7 +3063,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, ASM_LOCATION_IN_ASSEMBLER("MacroAssembler::RecordWrite"); DCHECK(!AreAliased(object, value)); - if (emit_debug_code()) { + if (FLAG_debug_code) { UseScratchRegisterScope temps(this); Register temp = temps.AcquireX(); @@ -3079,7 +3073,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite); } - if ((remembered_set_action == OMIT_REMEMBERED_SET && + if ((remembered_set_action == RememberedSetAction::kOmit && !FLAG_incremental_marking) || FLAG_disable_write_barriers) { return; @@ -3089,7 +3083,7 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, // catch stores of smis and stores into the young generation. Label done; - if (smi_check == INLINE_SMI_CHECK) { + if (smi_check == SmiCheck::kInline) { DCHECK_EQ(0, kSmiTag); JumpIfSmi(value, &done); } @@ -3112,13 +3106,13 @@ void MacroAssembler::RecordWrite(Register object, Operand offset, } void TurboAssembler::Assert(Condition cond, AbortReason reason) { - if (emit_debug_code()) { + if (FLAG_debug_code) { Check(cond, reason); } } void TurboAssembler::AssertUnreachable(AbortReason reason) { - if (emit_debug_code()) Abort(reason); + if (FLAG_debug_code) Abort(reason); } void TurboAssembler::Check(Condition cond, AbortReason reason) { @@ -3133,10 +3127,10 @@ void TurboAssembler::Trap() { Brk(0); } void TurboAssembler::DebugBreak() { Debug("DebugBreak", 0, BREAK); } void TurboAssembler::Abort(AbortReason reason) { -#ifdef DEBUG - RecordComment("Abort message: "); - RecordComment(GetAbortReason(reason)); -#endif + if (FLAG_code_comments) { + RecordComment("Abort message: "); + RecordComment(GetAbortReason(reason)); + } // Avoid emitting call to builtin if requested. if (trap_on_abort()) { diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index a749676cccdac8..7bc6432c36062c 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -126,8 +126,6 @@ inline BranchType InvertBranchType(BranchType type) { } } -enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; -enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved }; enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg }; @@ -1849,17 +1847,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // ---- Calling / Jumping helpers ---- void CallRuntime(const Runtime::Function* f, int num_arguments, - SaveFPRegsMode save_doubles = kDontSaveFPRegs); + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore); // Convenience function: Same as above, but takes the fid instead. void CallRuntime(Runtime::FunctionId fid, int num_arguments, - SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles); } // Convenience function: Same as above, but takes the fid instead. void CallRuntime(Runtime::FunctionId fid, - SaveFPRegsMode save_doubles = kDontSaveFPRegs) { + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) { const Runtime::Function* function = Runtime::FunctionForId(fid); CallRuntime(function, function->nargs, save_doubles); } @@ -1882,7 +1880,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { // 'call_kind' must be x5. void InvokePrologue(Register expected_parameter_count, Register actual_parameter_count, Label* done, - InvokeFlag flag); + InvokeType type); // On function call, call into the debugger. void CallDebugOnFunctionCall(Register fun, Register new_target, @@ -1890,20 +1888,17 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { Register actual_parameter_count); void InvokeFunctionCode(Register function, Register new_target, Register expected_parameter_count, - Register actual_parameter_count, InvokeFlag flag); + Register actual_parameter_count, InvokeType type); // Invoke the JavaScript function in the given register. // Changes the current context to the context in the function before invoking. void InvokeFunctionWithNewTarget(Register function, Register new_target, Register actual_parameter_count, - InvokeFlag flag); + InvokeType type); void InvokeFunction(Register function, Register expected_parameter_count, - Register actual_parameter_count, InvokeFlag flag); + Register actual_parameter_count, InvokeType type); // ---- Code generation helpers ---- - // Frame restart support - void MaybeDropFrames(); - // --------------------------------------------------------------------------- // Support functions. @@ -2032,16 +2027,16 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler { void RecordWriteField( Register object, int offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + RememberedSetAction remembered_set_action = RememberedSetAction::kEmit, + SmiCheck smi_check = SmiCheck::kInline); // For a given |object| notify the garbage collector that the slot at |offset| // has been written. |value| is the object being stored. void RecordWrite( Register object, Operand offset, Register value, LinkRegisterStatus lr_status, SaveFPRegsMode save_fp, - RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, - SmiCheck smi_check = INLINE_SMI_CHECK); + RememberedSetAction remembered_set_action = RememberedSetAction::kEmit, + SmiCheck smi_check = SmiCheck::kInline); // --------------------------------------------------------------------------- // Debugging. diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h index 819806319af6ce..5b234526a4361a 100644 --- a/deps/v8/src/codegen/arm64/register-arm64.h +++ b/deps/v8/src/codegen/arm64/register-arm64.h @@ -102,7 +102,7 @@ class CPURegister : public RegisterBase { } static constexpr CPURegister Create(int code, int size, RegisterType type) { - CONSTEXPR_DCHECK(IsValid(code, size, type)); + DCHECK(IsValid(code, size, type)); return CPURegister{code, size, type}; } @@ -320,7 +320,7 @@ class VRegister : public CPURegister { } static constexpr VRegister Create(int code, int size, int lane_count = 1) { - CONSTEXPR_DCHECK(IsValidLaneCount(lane_count)); + DCHECK(IsValidLaneCount(lane_count)); return VRegister(CPURegister::Create(code, size, CPURegister::kVRegister), lane_count); } @@ -413,7 +413,7 @@ class VRegister : public CPURegister { static constexpr int kMaxNumRegisters = kNumberOfVRegisters; STATIC_ASSERT(kMaxNumRegisters == kDoubleAfterLast); - static VRegister from_code(int code) { + static constexpr VRegister from_code(int code) { // Always return a D register. return VRegister::Create(code, kDRegSizeInBits); } @@ -477,9 +477,9 @@ ALIAS_REGISTER(Register, kRootRegister, x26); ALIAS_REGISTER(Register, rr, x26); // Pointer cage base register. #ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE -ALIAS_REGISTER(Register, kPointerCageBaseRegister, x28); +ALIAS_REGISTER(Register, kPtrComprCageBaseRegister, x28); #else -ALIAS_REGISTER(Register, kPointerCageBaseRegister, kRootRegister); +ALIAS_REGISTER(Register, kPtrComprCageBaseRegister, kRootRegister); #endif // Context pointer register. ALIAS_REGISTER(Register, cp, x27); diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc index 95983705abd350..bb80d366de317d 100644 --- a/deps/v8/src/codegen/assembler.cc +++ b/deps/v8/src/codegen/assembler.cc @@ -69,7 +69,7 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) { #endif options.inline_offheap_trampolines &= !generating_embedded_builtin; #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 - const base::AddressRegion& code_range = isolate->heap()->code_range(); + const base::AddressRegion& code_range = isolate->heap()->code_region(); DCHECK_IMPLIES(code_range.begin() != kNullAddress, !code_range.is_empty()); options.code_range_start = code_range.begin(); #endif @@ -180,7 +180,6 @@ AssemblerBase::AssemblerBase(const AssemblerOptions& options, : buffer_(std::move(buffer)), options_(options), enabled_cpu_features_(0), - emit_debug_code_(FLAG_debug_code), predictable_code_size_(false), constant_pool_available_(false), jump_optimization_info_(nullptr) { @@ -298,6 +297,7 @@ Handle AssemblerBase::GetEmbeddedObject( int Assembler::WriteCodeComments() { + if (!FLAG_code_comments) return 0; CHECK_IMPLIES(code_comments_writer_.entry_count() > 0, options().emit_code_comments); if (code_comments_writer_.entry_count() == 0) return 0; diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h index 7066905966458e..98cca61a7c691c 100644 --- a/deps/v8/src/codegen/assembler.h +++ b/deps/v8/src/codegen/assembler.h @@ -222,9 +222,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { const AssemblerOptions& options() const { return options_; } - bool emit_debug_code() const { return emit_debug_code_; } - void set_emit_debug_code(bool value) { emit_debug_code_ = value; } - bool predictable_code_size() const { return predictable_code_size_; } void set_predictable_code_size(bool value) { predictable_code_size_ = value; } @@ -291,7 +288,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { // Record an inline code comment that can be used by a disassembler. // Use --code-comments to enable. - void RecordComment(const char* msg) { + V8_INLINE void RecordComment(const char* msg) { + // Set explicit dependency on --code-comments for dead-code elimination in + // release builds. + if (!FLAG_code_comments) return; if (options().emit_code_comments) { code_comments_writer_.Add(pc_offset(), std::string(msg)); } @@ -346,7 +346,7 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { DCHECK(!RelocInfo::IsNone(rmode)); if (options().disable_reloc_info_for_patching) return false; if (RelocInfo::IsOnlyForSerializer(rmode) && - !options().record_reloc_info_for_serialization && !emit_debug_code()) { + !options().record_reloc_info_for_serialization && !FLAG_debug_code) { return false; } return true; @@ -378,7 +378,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { const AssemblerOptions options_; uint64_t enabled_cpu_features_; - bool emit_debug_code_; bool predictable_code_size_; // Indicates whether the constant pool can be accessed, which is only possible @@ -392,20 +391,6 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { friend class ConstantPoolUnavailableScope; }; -// Avoids emitting debug code during the lifetime of this scope object. -class V8_NODISCARD DontEmitDebugCodeScope { - public: - explicit DontEmitDebugCodeScope(AssemblerBase* assembler) - : assembler_(assembler), old_value_(assembler->emit_debug_code()) { - assembler_->set_emit_debug_code(false); - } - ~DontEmitDebugCodeScope() { assembler_->set_emit_debug_code(old_value_); } - - private: - AssemblerBase* assembler_; - bool old_value_; -}; - // Enable a specified feature within a scope. class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope { public: @@ -425,7 +410,7 @@ class V8_EXPORT_PRIVATE V8_NODISCARD CpuFeatureScope { #else CpuFeatureScope(AssemblerBase* assembler, CpuFeature f, CheckPolicy check = kCheckSupported) {} - ~CpuFeatureScope() { // NOLINT (modernize-use-equals-default) + ~CpuFeatureScope() { // Define a destructor to avoid unused variable warnings. } #endif diff --git a/deps/v8/src/codegen/bailout-reason.h b/deps/v8/src/codegen/bailout-reason.h index 57bbbca72300d1..e8afa74e1689b5 100644 --- a/deps/v8/src/codegen/bailout-reason.h +++ b/deps/v8/src/codegen/bailout-reason.h @@ -24,6 +24,7 @@ namespace internal { V(kExpectedFeedbackVector, "Expected feedback vector") \ V(kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry, \ "The function_data field should be a BytecodeArray on interpreter entry") \ + V(kFpuTopIsNotZeroInDeoptimizer, "FPU TOP is not zero in deoptimizer") \ V(kInputStringTooLong, "Input string too long") \ V(kInvalidBytecode, "Invalid bytecode") \ V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \ diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc index ece8200023da4e..854969f8cb4a3c 100644 --- a/deps/v8/src/codegen/code-factory.cc +++ b/deps/v8/src/codegen/code-factory.cc @@ -31,25 +31,35 @@ Handle CodeFactory::CEntry(Isolate* isolate, int result_size, const ArgvMode am = argv_mode; const bool be = builtin_exit_frame; - if (rs == 1 && sd == kDontSaveFPRegs && am == kArgvOnStack && !be) { + if (rs == 1 && sd == SaveFPRegsMode::kIgnore && am == ArgvMode::kStack && + !be) { return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvOnStack, NoBuiltinExit); - } else if (rs == 1 && sd == kDontSaveFPRegs && am == kArgvOnStack && be) { + } else if (rs == 1 && sd == SaveFPRegsMode::kIgnore && + am == ArgvMode::kStack && be) { return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvOnStack, BuiltinExit); - } else if (rs == 1 && sd == kDontSaveFPRegs && am == kArgvInRegister && !be) { + } else if (rs == 1 && sd == SaveFPRegsMode::kIgnore && + am == ArgvMode::kRegister && !be) { return CENTRY_CODE(Return1, DontSaveFPRegs, ArgvInRegister, NoBuiltinExit); - } else if (rs == 1 && sd == kSaveFPRegs && am == kArgvOnStack && !be) { + } else if (rs == 1 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack && + !be) { return CENTRY_CODE(Return1, SaveFPRegs, ArgvOnStack, NoBuiltinExit); - } else if (rs == 1 && sd == kSaveFPRegs && am == kArgvOnStack && be) { + } else if (rs == 1 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack && + be) { return CENTRY_CODE(Return1, SaveFPRegs, ArgvOnStack, BuiltinExit); - } else if (rs == 2 && sd == kDontSaveFPRegs && am == kArgvOnStack && !be) { + } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore && + am == ArgvMode::kStack && !be) { return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvOnStack, NoBuiltinExit); - } else if (rs == 2 && sd == kDontSaveFPRegs && am == kArgvOnStack && be) { + } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore && + am == ArgvMode::kStack && be) { return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvOnStack, BuiltinExit); - } else if (rs == 2 && sd == kDontSaveFPRegs && am == kArgvInRegister && !be) { + } else if (rs == 2 && sd == SaveFPRegsMode::kIgnore && + am == ArgvMode::kRegister && !be) { return CENTRY_CODE(Return2, DontSaveFPRegs, ArgvInRegister, NoBuiltinExit); - } else if (rs == 2 && sd == kSaveFPRegs && am == kArgvOnStack && !be) { + } else if (rs == 2 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack && + !be) { return CENTRY_CODE(Return2, SaveFPRegs, ArgvOnStack, NoBuiltinExit); - } else if (rs == 2 && sd == kSaveFPRegs && am == kArgvOnStack && be) { + } else if (rs == 2 && sd == SaveFPRegsMode::kSave && am == ArgvMode::kStack && + be) { return CENTRY_CODE(Return2, SaveFPRegs, ArgvOnStack, BuiltinExit); } @@ -70,7 +80,7 @@ Callable CodeFactory::CallApiCallback(Isolate* isolate) { // static Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) { - return typeof_mode == NOT_INSIDE_TYPEOF + return typeof_mode == TypeofMode::kNotInside ? Builtins::CallableFor(isolate, Builtins::kLoadGlobalICTrampoline) : Builtins::CallableFor( isolate, Builtins::kLoadGlobalICInsideTypeofTrampoline); @@ -79,7 +89,7 @@ Callable CodeFactory::LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode) { // static Callable CodeFactory::LoadGlobalICInOptimizedCode(Isolate* isolate, TypeofMode typeof_mode) { - return typeof_mode == NOT_INSIDE_TYPEOF + return typeof_mode == TypeofMode::kNotInside ? Builtins::CallableFor(isolate, Builtins::kLoadGlobalIC) : Builtins::CallableFor(isolate, Builtins::kLoadGlobalICInsideTypeof); @@ -232,16 +242,6 @@ Callable CodeFactory::ResumeGenerator(Isolate* isolate) { return Builtins::CallableFor(isolate, Builtins::kResumeGeneratorTrampoline); } -// static -Callable CodeFactory::FrameDropperTrampoline(Isolate* isolate) { - return Builtins::CallableFor(isolate, Builtins::kFrameDropperTrampoline); -} - -// static -Callable CodeFactory::HandleDebuggerStatement(Isolate* isolate) { - return Builtins::CallableFor(isolate, Builtins::kHandleDebuggerStatement); -} - // static Callable CodeFactory::FastNewFunctionContext(Isolate* isolate, ScopeType scope_type) { @@ -388,8 +388,8 @@ Callable CodeFactory::InterpreterPushArgsThenConstruct( Callable CodeFactory::InterpreterCEntry(Isolate* isolate, int result_size) { // Note: If we ever use fpregs in the interpreter then we will need to // save fpregs too. - Handle code = CodeFactory::CEntry(isolate, result_size, kDontSaveFPRegs, - kArgvInRegister); + Handle code = CodeFactory::CEntry( + isolate, result_size, SaveFPRegsMode::kIgnore, ArgvMode::kRegister); if (result_size == 1) { return Callable(code, InterpreterCEntry1Descriptor{}); } else { diff --git a/deps/v8/src/codegen/code-factory.h b/deps/v8/src/codegen/code-factory.h index aab297704536bd..e55de10533e411 100644 --- a/deps/v8/src/codegen/code-factory.h +++ b/deps/v8/src/codegen/code-factory.h @@ -28,10 +28,10 @@ class V8_EXPORT_PRIVATE CodeFactory final { // is exported here. static Handle RuntimeCEntry(Isolate* isolate, int result_size = 1); - static Handle CEntry(Isolate* isolate, int result_size = 1, - SaveFPRegsMode save_doubles = kDontSaveFPRegs, - ArgvMode argv_mode = kArgvOnStack, - bool builtin_exit_frame = false); + static Handle CEntry( + Isolate* isolate, int result_size = 1, + SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore, + ArgvMode argv_mode = ArgvMode::kStack, bool builtin_exit_frame = false); // Initial states for ICs. static Callable LoadGlobalIC(Isolate* isolate, TypeofMode typeof_mode); @@ -49,9 +49,6 @@ class V8_EXPORT_PRIVATE CodeFactory final { static Callable ResumeGenerator(Isolate* isolate); - static Callable FrameDropperTrampoline(Isolate* isolate); - static Callable HandleDebuggerStatement(Isolate* isolate); - static Callable BinaryOperation(Isolate* isolate, Operation op); static Callable ApiGetter(Isolate* isolate); diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index 492e6aaf3713bc..5995a766d1164a 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -103,7 +103,7 @@ void CodeStubAssembler::Check(const BranchGenerator& branch, std::initializer_list extra_nodes) { Label ok(this); Label not_ok(this, Label::kDeferred); - if (message != nullptr && FLAG_code_comments) { + if (message != nullptr) { Comment("[ Assert: ", message); } else { Comment("[ Assert"); @@ -1368,6 +1368,7 @@ TNode CodeStubAssembler::AllocateInNewSpace( TNode CodeStubAssembler::Allocate(TNode size_in_bytes, AllocationFlags flags) { Comment("Allocate"); + if (FLAG_single_generation) flags |= kPretenured; bool const new_space = !(flags & kPretenured); bool const allow_large_objects = flags & kAllowLargeObjectAllocation; // For optimized allocations, we don't allow the allocation to happen in a @@ -1574,8 +1575,8 @@ TNode CodeStubAssembler::LoadExternalPointerFromObject( TNode entry = Load(table, table_offset); if (external_pointer_tag != 0) { - TNode tag = UintPtrConstant(external_pointer_tag); - entry = UncheckedCast(WordXor(entry, tag)); + TNode tag = UintPtrConstant(~external_pointer_tag); + entry = UncheckedCast(WordAnd(entry, tag)); } return UncheckedCast(UncheckedCast(entry)); #else @@ -1603,7 +1604,7 @@ void CodeStubAssembler::StoreExternalPointerToObject( TNode value = UncheckedCast(pointer); if (external_pointer_tag != 0) { TNode tag = UintPtrConstant(external_pointer_tag); - value = UncheckedCast(WordXor(pointer, tag)); + value = UncheckedCast(WordOr(pointer, tag)); } StoreNoWriteBarrier(MachineType::PointerRepresentation(), table, table_offset, value); @@ -1619,6 +1620,8 @@ TNode CodeStubAssembler::LoadFromParentFrame(int offset) { TNode CodeStubAssembler::LoadAndUntagObjectField( TNode object, int offset) { + // Please use LoadMap(object) instead. + DCHECK_NE(offset, HeapObject::kMapOffset); if (SmiValuesAre32Bits()) { #if V8_TARGET_LITTLE_ENDIAN offset += 4; @@ -1631,6 +1634,8 @@ TNode CodeStubAssembler::LoadAndUntagObjectField( TNode CodeStubAssembler::LoadAndUntagToWord32ObjectField( TNode object, int offset) { + // Please use LoadMap(object) instead. + DCHECK_NE(offset, HeapObject::kMapOffset); if (SmiValuesAre32Bits()) { #if V8_TARGET_LITTLE_ENDIAN offset += 4; @@ -1656,7 +1661,15 @@ TNode CodeStubAssembler::GetInstanceTypeMap(InstanceType instance_type) { } TNode CodeStubAssembler::LoadMap(TNode object) { - return LoadObjectField(object, HeapObject::kMapOffset); + TNode map = LoadObjectField(object, HeapObject::kMapOffset); +#ifdef V8_MAP_PACKING + // Check the loaded map is unpacked. i.e. the lowest two bits != 0b10 + CSA_ASSERT(this, + WordNotEqual(WordAnd(BitcastTaggedToWord(map), + IntPtrConstant(Internals::kMapWordXorMask)), + IntPtrConstant(Internals::kMapWordSignature))); +#endif + return map; } TNode CodeStubAssembler::LoadInstanceType(TNode object) { @@ -2033,6 +2046,13 @@ void CodeStubAssembler::DispatchMaybeObject(TNode maybe_object, Goto(if_strong); } +void CodeStubAssembler::AssertHasValidMap(TNode object) { +#ifdef V8_MAP_PACKING + // Test if the map is an unpacked and valid map + CSA_ASSERT(this, IsMap(LoadMap(object))); +#endif +} + TNode CodeStubAssembler::IsStrong(TNode value) { return Word32Equal(Word32And(TruncateIntPtrToInt32( BitcastTaggedToWordForTagAndSmiBits(value)), @@ -2943,12 +2963,14 @@ void CodeStubAssembler::StoreObjectField(TNode object, void CodeStubAssembler::UnsafeStoreObjectFieldNoWriteBarrier( TNode object, int offset, TNode value) { + DCHECK_NE(HeapObject::kMapOffset, offset); // Use StoreMap instead. OptimizedStoreFieldUnsafeNoWriteBarrier(MachineRepresentation::kTagged, object, offset, value); } void CodeStubAssembler::StoreMap(TNode object, TNode map) { OptimizedStoreMap(object, map); + AssertHasValidMap(object); } void CodeStubAssembler::StoreMapNoWriteBarrier(TNode object, @@ -2958,16 +2980,19 @@ void CodeStubAssembler::StoreMapNoWriteBarrier(TNode object, void CodeStubAssembler::StoreMapNoWriteBarrier(TNode object, TNode map) { - OptimizedStoreFieldAssertNoWriteBarrier(MachineRepresentation::kTaggedPointer, - object, HeapObject::kMapOffset, map); + OptimizedStoreMap(object, map); + AssertHasValidMap(object); } void CodeStubAssembler::StoreObjectFieldRoot(TNode object, int offset, RootIndex root_index) { - if (RootsTable::IsImmortalImmovable(root_index)) { - StoreObjectFieldNoWriteBarrier(object, offset, LoadRoot(root_index)); + TNode root = LoadRoot(root_index); + if (offset == HeapObject::kMapOffset) { + StoreMap(object, CAST(root)); + } else if (RootsTable::IsImmortalImmovable(root_index)) { + StoreObjectFieldNoWriteBarrier(object, offset, root); } else { - StoreObjectField(object, offset, LoadRoot(root_index)); + StoreObjectField(object, offset, root); } } @@ -4762,7 +4787,11 @@ void CodeStubAssembler::MoveElements(ElementsKind kind, TNode length) { Label finished(this); Label needs_barrier(this); +#ifdef V8_DISABLE_WRITE_BARRIERS + const bool needs_barrier_check = false; +#else const bool needs_barrier_check = !IsDoubleElementsKind(kind); +#endif // V8_DISABLE_WRITE_BARRIERS DCHECK(IsFastElementsKind(kind)); CSA_ASSERT(this, IsFixedArrayWithKind(elements, kind)); @@ -4847,7 +4876,11 @@ void CodeStubAssembler::CopyElements(ElementsKind kind, WriteBarrierMode write_barrier) { Label finished(this); Label needs_barrier(this); +#ifdef V8_DISABLE_WRITE_BARRIERS + const bool needs_barrier_check = false; +#else const bool needs_barrier_check = !IsDoubleElementsKind(kind); +#endif // V8_DISABLE_WRITE_BARRIERS DCHECK(IsFastElementsKind(kind)); CSA_ASSERT(this, IsFixedArrayWithKind(dst_elements, kind)); @@ -5294,6 +5327,10 @@ TNode CodeStubAssembler::GrowElementsCapacity( return new_elements; } +template TNode CodeStubAssembler::GrowElementsCapacity( + TNode, TNode, ElementsKind, ElementsKind, + TNode, TNode, compiler::CodeAssemblerLabel*); + void CodeStubAssembler::InitializeAllocationMemento( TNode base, TNode base_allocation_size, TNode allocation_site) { @@ -6036,6 +6073,13 @@ TNode CodeStubAssembler::IsNoElementsProtectorCellInvalid() { return TaggedEqual(cell_value, invalid); } +TNode CodeStubAssembler::IsMegaDOMProtectorCellInvalid() { + TNode invalid = SmiConstant(Protectors::kProtectorInvalid); + TNode cell = MegaDOMProtectorConstant(); + TNode cell_value = LoadObjectField(cell, PropertyCell::kValueOffset); + return TaggedEqual(cell_value, invalid); +} + TNode CodeStubAssembler::IsArrayIteratorProtectorCellInvalid() { TNode invalid = SmiConstant(Protectors::kProtectorInvalid); TNode cell = ArrayIteratorProtectorConstant(); @@ -6285,14 +6329,27 @@ TNode CodeStubAssembler::IsJSObjectInstanceType( Int32Constant(FIRST_JS_OBJECT_TYPE)); } +TNode CodeStubAssembler::IsJSApiObjectInstanceType( + TNode instance_type) { + return InstanceTypeEqual(instance_type, JS_API_OBJECT_TYPE); +} + TNode CodeStubAssembler::IsJSObjectMap(TNode map) { return IsJSObjectInstanceType(LoadMapInstanceType(map)); } +TNode CodeStubAssembler::IsJSApiObjectMap(TNode map) { + return IsJSApiObjectInstanceType(LoadMapInstanceType(map)); +} + TNode CodeStubAssembler::IsJSObject(TNode object) { return IsJSObjectMap(LoadMap(object)); } +TNode CodeStubAssembler::IsJSApiObject(TNode object) { + return IsJSApiObjectMap(LoadMap(object)); +} + TNode CodeStubAssembler::IsJSFinalizationRegistryMap(TNode map) { return InstanceTypeEqual(LoadMapInstanceType(map), JS_FINALIZATION_REGISTRY_TYPE); @@ -7672,15 +7729,25 @@ TNode CodeStubAssembler::OrdinaryToPrimitive( TNode CodeStubAssembler::DecodeWord32(TNode word32, uint32_t shift, uint32_t mask) { DCHECK_EQ((mask >> shift) << shift, mask); - return Unsigned(Word32And(Word32Shr(word32, static_cast(shift)), - Int32Constant(mask >> shift))); + if ((std::numeric_limits::max() >> shift) == + ((std::numeric_limits::max() & mask) >> shift)) { + return Unsigned(Word32Shr(word32, static_cast(shift))); + } else { + return Unsigned(Word32And(Word32Shr(word32, static_cast(shift)), + Int32Constant(mask >> shift))); + } } TNode CodeStubAssembler::DecodeWord(TNode word, uint32_t shift, uintptr_t mask) { DCHECK_EQ((mask >> shift) << shift, mask); - return Unsigned(WordAnd(WordShr(word, static_cast(shift)), - IntPtrConstant(mask >> shift))); + if ((std::numeric_limits::max() >> shift) == + ((std::numeric_limits::max() & mask) >> shift)) { + return Unsigned(WordShr(word, static_cast(shift))); + } else { + return Unsigned(WordAnd(WordShr(word, static_cast(shift)), + IntPtrConstant(mask >> shift))); + } } TNode CodeStubAssembler::UpdateWord32(TNode word, @@ -8871,9 +8938,9 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( { Label slow_load(this, Label::kDeferred); - var_value = CallGetterIfAccessor(var_value.value(), object, - var_details.value(), context, - object, &slow_load, kCallJSGetter); + var_value = CallGetterIfAccessor( + var_value.value(), object, var_details.value(), context, object, + next_key, &slow_load, kCallJSGetter); Goto(&callback); BIND(&slow_load); @@ -9325,8 +9392,8 @@ template void CodeStubAssembler::LoadPropertyFromDictionary( // result of the getter call. TNode CodeStubAssembler::CallGetterIfAccessor( TNode value, TNode holder, TNode details, - TNode context, TNode receiver, Label* if_bailout, - GetOwnPropertyMode mode) { + TNode context, TNode receiver, TNode name, + Label* if_bailout, GetOwnPropertyMode mode) { TVARIABLE(Object, var_value, value); Label done(this), if_accessor_info(this, Label::kDeferred); @@ -9354,13 +9421,16 @@ TNode CodeStubAssembler::CallGetterIfAccessor( BIND(&if_callable); { - // Call the accessor. + // Call the accessor. No need to check side-effect mode here, since it + // will be checked later in DebugOnFunctionCall. var_value = Call(context, getter, receiver); Goto(&done); } BIND(&if_function_template_info); { + Label runtime(this, Label::kDeferred); + GotoIf(IsSideEffectFreeDebuggingActive(), &runtime); TNode cached_property_name = LoadObjectField( getter, FunctionTemplateInfo::kCachedPropertyNameOffset); GotoIfNot(IsTheHole(cached_property_name), if_bailout); @@ -9371,6 +9441,13 @@ TNode CodeStubAssembler::CallGetterIfAccessor( Builtins::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver, creation_context, getter, IntPtrConstant(0), receiver); Goto(&done); + + BIND(&runtime); + { + var_value = CallRuntime(Runtime::kGetProperty, context, holder, name, + receiver); + Goto(&done); + } } } else { Goto(&done); @@ -9505,7 +9582,7 @@ void CodeStubAssembler::TryGetOwnProperty( } TNode value = CallGetterIfAccessor(var_value->value(), object, var_details->value(), - context, receiver, if_bailout, mode); + context, receiver, unique_name, if_bailout, mode); *var_value = value; Goto(if_found_value); } @@ -9554,6 +9631,7 @@ void CodeStubAssembler::TryLookupElement( BIGUINT64_ELEMENTS, BIGINT64_ELEMENTS, }; + // TODO(v8:11111): Support RAB / GSAB. Label* labels[] = { &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi, &if_isobjectorsmi, @@ -10811,6 +10889,12 @@ void CodeStubAssembler::EmitElementStore( TNode context, TVariable* maybe_converted_value) { CSA_ASSERT(this, Word32BinaryNot(IsJSProxy(object))); + // TODO(v8:11111): Fast path for RAB / GSAB backed TypedArrays. + if (IsRabGsabTypedArrayElementsKind(elements_kind)) { + GotoIf(Int32TrueConstant(), bailout); + return; + } + TNode elements = LoadElements(object); if (!(IsSmiOrObjectElementsKind(elements_kind) || IsSealedElementsKind(elements_kind) || @@ -11057,6 +11141,8 @@ void CodeStubAssembler::TrapAllocationMemento(TNode object, // Bail out if the object is not in new space. TNode object_word = BitcastTaggedToWord(object); + // TODO(v8:11641): Skip TrapAllocationMemento when allocation-site + // tracking is disabled. TNode object_page = PageFromAddress(object_word); { TNode page_flags = @@ -11102,15 +11188,19 @@ void CodeStubAssembler::TrapAllocationMemento(TNode object, // Memento map check. BIND(&map_check); { - TNode memento_map = LoadObjectField(object, kMementoMapOffset); - Branch(TaggedEqual(memento_map, AllocationMementoMapConstant()), - memento_found, &no_memento_found); + TNode maybe_mapword = + LoadObjectField(object, kMementoMapOffset); + TNode memento_mapword = + LoadRootMapWord(RootIndex::kAllocationMementoMap); + Branch(TaggedEqual(maybe_mapword, memento_mapword), memento_found, + &no_memento_found); } BIND(&no_memento_found); Comment("] TrapAllocationMemento"); } TNode CodeStubAssembler::PageFromAddress(TNode address) { + if (FLAG_enable_third_party_heap) Unreachable(); return WordAnd(address, IntPtrConstant(~kPageAlignmentMask)); } @@ -11323,7 +11413,12 @@ void CodeStubAssembler::InitializeFieldsWithRoot(TNode object, CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object)); start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag)); end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag)); - TNode root_value = LoadRoot(root_index); + TNode root_value; + if (root_index == RootIndex::kOnePointerFillerMap) { + root_value = LoadRootMapWord(root_index); + } else { + root_value = LoadRoot(root_index); + } BuildFastLoop( end_offset, start_offset, [=](TNode current) { @@ -13597,6 +13692,149 @@ TNode CodeStubAssembler::LoadJSTypedArrayLength( return LoadObjectField(typed_array, JSTypedArray::kLengthOffset); } +// ES #sec-integerindexedobjectlength +TNode CodeStubAssembler::LoadVariableLengthJSTypedArrayLength( + TNode array, TNode buffer, Label* miss) { + Label is_gsab(this), is_rab(this), end(this); + TVARIABLE(UintPtrT, result); + + Branch(IsSharedArrayBuffer(buffer), &is_gsab, &is_rab); + BIND(&is_gsab); + { + // Non-length-tracking GSAB-backed TypedArrays shouldn't end up here. + CSA_ASSERT(this, IsLengthTrackingTypedArray(array)); + // Read the byte length from the BackingStore. + const TNode length_function = ExternalConstant( + ExternalReference::length_tracking_gsab_backed_typed_array_length()); + TNode isolate_ptr = + ExternalConstant(ExternalReference::isolate_address(isolate())); + result = UncheckedCast( + CallCFunction(length_function, MachineType::UintPtr(), + std::make_pair(MachineType::Pointer(), isolate_ptr), + std::make_pair(MachineType::AnyTagged(), array))); + Goto(&end); + } + + BIND(&is_rab); + { + GotoIf(IsDetachedBuffer(buffer), miss); + + TNode buffer_byte_length = LoadJSArrayBufferByteLength(buffer); + TNode array_byte_offset = LoadJSArrayBufferViewByteOffset(array); + + Label is_length_tracking(this), not_length_tracking(this); + Branch(IsLengthTrackingTypedArray(array), &is_length_tracking, + ¬_length_tracking); + + BIND(&is_length_tracking); + { + // The backing RAB might have been shrunk so that the start of the + // TypedArray is already out of bounds. + GotoIfNot(UintPtrLessThanOrEqual(array_byte_offset, buffer_byte_length), + miss); + // length = (buffer_byte_length - byte_offset) / element_size + // Conversion to signed is OK since buffer_byte_length < + // JSArrayBuffer::kMaxByteLength. + TNode element_size = + RabGsabElementsKindToElementByteSize(LoadElementsKind(array)); + TNode length = + IntPtrDiv(Signed(UintPtrSub(buffer_byte_length, array_byte_offset)), + element_size); + result = Unsigned(length); + Goto(&end); + } + + BIND(¬_length_tracking); + { + // Check if the backing RAB has shrunk so that the buffer is out of + // bounds. + TNode array_byte_length = + LoadJSArrayBufferViewByteLength(array); + GotoIfNot(UintPtrGreaterThanOrEqual( + buffer_byte_length, + UintPtrAdd(array_byte_offset, array_byte_length)), + miss); + result = LoadJSTypedArrayLength(array); + Goto(&end); + } + } + BIND(&end); + return result.value(); +} + +// ES #sec-integerindexedobjectbytelength +TNode CodeStubAssembler::LoadVariableLengthJSTypedArrayByteLength( + TNode context, TNode array, + TNode buffer) { + Label miss(this), end(this); + TVARIABLE(UintPtrT, result); + + TNode length = + LoadVariableLengthJSTypedArrayLength(array, buffer, &miss); + TNode element_size = + RabGsabElementsKindToElementByteSize(LoadElementsKind(array)); + // Conversion to signed is OK since length < JSArrayBuffer::kMaxByteLength. + TNode byte_length = IntPtrMul(Signed(length), element_size); + result = Unsigned(byte_length); + Goto(&end); + BIND(&miss); + { + result = UintPtrConstant(0); + Goto(&end); + } + BIND(&end); + return result.value(); +} + +TNode CodeStubAssembler::RabGsabElementsKindToElementByteSize( + TNode elements_kind) { + TVARIABLE(IntPtrT, result); + Label elements_8(this), elements_16(this), elements_32(this), + elements_64(this), not_found(this), end(this); + int32_t elements_kinds[] = { + RAB_GSAB_UINT8_ELEMENTS, RAB_GSAB_UINT8_CLAMPED_ELEMENTS, + RAB_GSAB_INT8_ELEMENTS, RAB_GSAB_UINT16_ELEMENTS, + RAB_GSAB_INT16_ELEMENTS, RAB_GSAB_UINT32_ELEMENTS, + RAB_GSAB_INT32_ELEMENTS, RAB_GSAB_FLOAT32_ELEMENTS, + RAB_GSAB_FLOAT64_ELEMENTS, RAB_GSAB_BIGINT64_ELEMENTS, + RAB_GSAB_BIGUINT64_ELEMENTS}; + Label* elements_kind_labels[] = {&elements_8, &elements_8, &elements_8, + &elements_16, &elements_16, &elements_32, + &elements_32, &elements_32, &elements_64, + &elements_64, &elements_64}; + const size_t kTypedElementsKindCount = + LAST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND - + FIRST_RAB_GSAB_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1; + DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds)); + DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels)); + Switch(elements_kind, ¬_found, elements_kinds, elements_kind_labels, + kTypedElementsKindCount); + BIND(&elements_8); + { + result = IntPtrConstant(1); + Goto(&end); + } + BIND(&elements_16); + { + result = IntPtrConstant(2); + Goto(&end); + } + BIND(&elements_32); + { + result = IntPtrConstant(4); + Goto(&end); + } + BIND(&elements_64); + { + result = IntPtrConstant(8); + Goto(&end); + } + BIND(¬_found); + { Unreachable(); } + BIND(&end); + return result.value(); +} + TNode CodeStubAssembler::GetTypedArrayBuffer( TNode context, TNode array) { Label call_runtime(this), done(this); @@ -13799,6 +14037,17 @@ TNode CodeStubAssembler::IsDebugActive() { return Word32NotEqual(is_debug_active, Int32Constant(0)); } +TNode CodeStubAssembler::IsSideEffectFreeDebuggingActive() { + TNode debug_execution_mode = Load(ExternalConstant( + ExternalReference::debug_execution_mode_address(isolate()))); + + TNode is_active = + Word32Equal(debug_execution_mode, + Int32Constant(DebugInfo::ExecutionMode::kSideEffects)); + + return is_active; +} + TNode CodeStubAssembler::HasAsyncEventDelegate() { const TNode async_event_delegate = Load(ExternalConstant( ExternalReference::async_event_delegate_address(isolate()))); @@ -13837,18 +14086,8 @@ TNode CodeStubAssembler:: return IsSetWord32(flags, mask); } -TNode CodeStubAssembler:: - IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate( - TNode flags) { - return Word32NotEqual(flags, Int32Constant(0)); -} - TNode CodeStubAssembler::NeedsAnyPromiseHooks(TNode flags) { - uint32_t mask = Isolate::PromiseHookFields::HasContextPromiseHook::kMask | - Isolate::PromiseHookFields::HasIsolatePromiseHook::kMask | - Isolate::PromiseHookFields::HasAsyncEventDelegate::kMask | - Isolate::PromiseHookFields::IsDebugActive::kMask; - return IsSetWord32(flags, mask); + return Word32NotEqual(flags, Int32Constant(0)); } TNode CodeStubAssembler::LoadBuiltin(TNode builtin_id) { diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index 24204d82f88b3b..9b54b5014e3300 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -67,6 +67,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; AsyncIteratorValueUnwrapSharedFun) \ V(MapIteratorProtector, map_iterator_protector, MapIteratorProtector) \ V(NoElementsProtector, no_elements_protector, NoElementsProtector) \ + V(MegaDOMProtector, mega_dom_protector, MegaDOMProtector) \ V(NumberStringCache, number_string_cache, NumberStringCache) \ V(PromiseAllResolveElementSharedFun, promise_all_resolve_element_shared_fun, \ PromiseAllResolveElementSharedFun) \ @@ -157,6 +158,7 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \ V(match_symbol, match_symbol, MatchSymbol) \ V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \ + V(mega_dom_symbol, mega_dom_symbol, MegaDOMSymbol) \ V(message_string, message_string, MessageString) \ V(minus_Infinity_string, minus_Infinity_string, MinusInfinityString) \ V(MinusZeroValue, minus_zero_value, MinusZero) \ @@ -1088,7 +1090,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } // Load a field from an object on the heap. template , TNode>::value, + std::is_convertible, TNode>::value && + std::is_base_of::value, + int>::type = 0> + TNode LoadObjectField(TNode object, int offset) { + const MachineType machine_type = offset == HeapObject::kMapOffset + ? MachineType::MapInHeader() + : MachineTypeOf::value; + return CAST(LoadFromObject(machine_type, object, + IntPtrConstant(offset - kHeapObjectTag))); + } + template , TNode>::value && + !std::is_base_of::value, int>::type = 0> TNode LoadObjectField(TNode object, int offset) { return CAST(LoadFromObject(MachineTypeOf::value, object, @@ -1163,6 +1177,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler std::is_convertible, TNode>::value, int>::type = 0> TNode LoadReference(Reference reference) { + if (IsMapOffsetConstant(reference.offset)) { + TNode map = LoadMap(CAST(reference.object)); + DCHECK((std::is_base_of::value)); + return ReinterpretCast(map); + } + TNode offset = IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); CSA_ASSERT(this, TaggedIsNotSmi(reference.object)); @@ -1175,6 +1195,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler std::is_same::value, int>::type = 0> TNode LoadReference(Reference reference) { + DCHECK(!IsMapOffsetConstant(reference.offset)); TNode offset = IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); return UncheckedCast( @@ -1185,6 +1206,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler std::is_same::value, int>::type = 0> void StoreReference(Reference reference, TNode value) { + if (IsMapOffsetConstant(reference.offset)) { + DCHECK((std::is_base_of::value)); + return StoreMap(CAST(reference.object), ReinterpretCast(value)); + } MachineRepresentation rep = MachineRepresentationOf::value; StoreToObjectWriteBarrier write_barrier = StoreToObjectWriteBarrier::kFull; if (std::is_same::value) { @@ -1201,6 +1226,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler std::is_convertible, TNode>::value, int>::type = 0> void StoreReference(Reference reference, TNode value) { + DCHECK(!IsMapOffsetConstant(reference.offset)); TNode offset = IntPtrSub(reference.offset, IntPtrConstant(kHeapObjectTag)); StoreToObject(MachineRepresentationOf::value, reference.object, offset, @@ -2346,6 +2372,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // JSProxy or an object with interceptors. TNode InstanceTypeEqual(TNode instance_type, int type); TNode IsNoElementsProtectorCellInvalid(); + TNode IsMegaDOMProtectorCellInvalid(); TNode IsArrayIteratorProtectorCellInvalid(); TNode IsBigIntInstanceType(TNode instance_type); TNode IsBigInt(TNode object); @@ -2395,6 +2422,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsJSObjectInstanceType(TNode instance_type); TNode IsJSObjectMap(TNode map); TNode IsJSObject(TNode object); + TNode IsJSApiObjectInstanceType(TNode instance_type); + TNode IsJSApiObjectMap(TNode map); + TNode IsJSApiObject(TNode object); TNode IsJSFinalizationRegistryMap(TNode map); TNode IsJSFinalizationRegistry(TNode object); TNode IsJSPromiseMap(TNode map); @@ -3451,6 +3481,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Debug helpers TNode IsDebugActive(); + TNode IsSideEffectFreeDebuggingActive(); // JSArrayBuffer helpers TNode LoadJSArrayBufferBackingStorePtr( @@ -3464,6 +3495,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode array_buffer_view); TNode LoadJSArrayBufferViewByteLength( TNode array_buffer_view); + TNode LoadJSArrayBufferViewByteOffset( TNode array_buffer_view); void ThrowIfArrayBufferViewBufferIsDetached( @@ -3472,6 +3504,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // JSTypedArray helpers TNode LoadJSTypedArrayLength(TNode typed_array); + // Helper for length tracking JSTypedArrays and JSTypedArrays backed by + // ResizableArrayBuffer. + TNode LoadVariableLengthJSTypedArrayLength( + TNode array, TNode buffer, Label* miss); + // Helper for length tracking JSTypedArrays and JSTypedArrays backed by + // ResizableArrayBuffer. + TNode LoadVariableLengthJSTypedArrayByteLength( + TNode context, TNode array, + TNode buffer); + TNode RabGsabElementsKindToElementByteSize( + TNode elementsKind); TNode LoadJSTypedArrayDataPtr(TNode typed_array); TNode GetTypedArrayBuffer(TNode context, TNode array); @@ -3528,13 +3571,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return IsIsolatePromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate( PromiseHookFlags()); } - TNode IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate( - TNode flags); - TNode - IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() { - return IsAnyPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate( - PromiseHookFlags()); - } TNode NeedsAnyPromiseHooks(TNode flags); TNode NeedsAnyPromiseHooks() { @@ -3600,6 +3636,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } int32_t ConstexprWord32Or(int32_t a, int32_t b) { return a | b; } + uint32_t ConstexprWord32Shl(uint32_t a, int32_t b) { return a << b; } bool ConstexprUintPtrLessThan(uintptr_t a, uintptr_t b) { return a < b; } @@ -3712,12 +3749,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler const ForEachKeyValueFunction& body, Label* bailout); - TNode CallGetterIfAccessor(TNode value, - TNode holder, - TNode details, - TNode context, - TNode receiver, Label* if_bailout, - GetOwnPropertyMode mode = kCallJSGetter); + TNode CallGetterIfAccessor( + TNode value, TNode holder, TNode details, + TNode context, TNode receiver, TNode name, + Label* if_bailout, GetOwnPropertyMode mode = kCallJSGetter); TNode TryToIntptr(TNode key, Label* if_not_intptr, TVariable* var_instance_type = nullptr); @@ -3916,6 +3951,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return CodeAssembler::LoadRoot(root_index); } + TNode LoadRootMapWord(RootIndex root_index) { + return CodeAssembler::LoadRootMapWord(root_index); + } + template void StoreFixedArrayOrPropertyArrayElement( TNode> array, TNode index, @@ -3955,6 +3994,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TVariable* var_result, Label* if_bailout); + void AssertHasValidMap(TNode object); + template void EmitElementStoreTypedArray(TNode typed_array, TNode key, TNode value, diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc index 3941e56e6a62db..ee50f8b0153a51 100644 --- a/deps/v8/src/codegen/compilation-cache.cc +++ b/deps/v8/src/codegen/compilation-cache.cc @@ -29,10 +29,9 @@ CompilationCache::CompilationCache(Isolate* isolate) eval_global_(isolate), eval_contextual_(isolate), reg_exp_(isolate, kRegExpGenerations), - code_(isolate), enabled_script_and_eval_(true) { CompilationSubCache* subcaches[kSubCacheCount] = { - &script_, &eval_global_, &eval_contextual_, ®_exp_, &code_}; + &script_, &eval_global_, &eval_contextual_, ®_exp_}; for (int i = 0; i < kSubCacheCount; ++i) { subcaches_[i] = subcaches[i]; } @@ -77,10 +76,6 @@ void CompilationCacheScript::Age() { } void CompilationCacheEval::Age() { AgeCustom(this); } void CompilationCacheRegExp::Age() { AgeByGeneration(this); } -void CompilationCacheCode::Age() { - if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing(); - AgeByGeneration(this); -} void CompilationSubCache::Iterate(RootVisitor* v) { v->VisitRootPointers(Root::kCompilationCache, nullptr, @@ -267,58 +262,6 @@ void CompilationCacheRegExp::Put(Handle source, JSRegExp::Flags flags, CompilationCacheTable::PutRegExp(isolate(), table, source, flags, data)); } -MaybeHandle CompilationCacheCode::Lookup(Handle key) { - // Make sure not to leak the table into the surrounding handle - // scope. Otherwise, we risk keeping old tables around even after - // having cleared the cache. - HandleScope scope(isolate()); - MaybeHandle maybe_value; - int generation = 0; - for (; generation < generations(); generation++) { - Handle table = GetTable(generation); - maybe_value = table->LookupCode(key); - if (!maybe_value.is_null()) break; - } - - if (maybe_value.is_null()) { - isolate()->counters()->compilation_cache_misses()->Increment(); - return MaybeHandle(); - } - - Handle value = maybe_value.ToHandleChecked(); - if (generation != 0) Put(key, value); // Add to the first generation. - isolate()->counters()->compilation_cache_hits()->Increment(); - return scope.CloseAndEscape(value); -} - -void CompilationCacheCode::Put(Handle key, - Handle value) { - HandleScope scope(isolate()); - Handle table = GetFirstTable(); - SetFirstTable(CompilationCacheTable::PutCode(isolate(), table, key, value)); -} - -void CompilationCacheCode::TraceAgeing() { - DCHECK(FLAG_trace_turbo_nci); - StdoutStream os; - os << "NCI cache ageing: Removing oldest generation" << std::endl; -} - -void CompilationCacheCode::TraceInsertion(Handle key, - Handle value) { - DCHECK(FLAG_trace_turbo_nci); - StdoutStream os; - os << "NCI cache insertion: " << Brief(*key) << ", " << Brief(*value) - << std::endl; -} - -void CompilationCacheCode::TraceHit(Handle key, - Handle value) { - DCHECK(FLAG_trace_turbo_nci); - StdoutStream os; - os << "NCI cache hit: " << Brief(*key) << ", " << Brief(*value) << std::endl; -} - void CompilationCache::Remove(Handle function_info) { if (!IsEnabledScriptAndEval()) return; @@ -372,10 +315,6 @@ MaybeHandle CompilationCache::LookupRegExp(Handle source, return reg_exp_.Lookup(source, flags); } -MaybeHandle CompilationCache::LookupCode(Handle sfi) { - return code_.Lookup(sfi); -} - void CompilationCache::PutScript(Handle source, LanguageMode language_mode, Handle function_info) { @@ -414,11 +353,6 @@ void CompilationCache::PutRegExp(Handle source, JSRegExp::Flags flags, reg_exp_.Put(source, flags, data); } -void CompilationCache::PutCode(Handle shared, - Handle code) { - code_.Put(shared, code); -} - void CompilationCache::Clear() { for (int i = 0; i < kSubCacheCount; i++) { subcaches_[i]->Clear(); diff --git a/deps/v8/src/codegen/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h index 0ed13e53b6d7e1..d4f4ae52dcaa75 100644 --- a/deps/v8/src/codegen/compilation-cache.h +++ b/deps/v8/src/codegen/compilation-cache.h @@ -150,32 +150,6 @@ class CompilationCacheRegExp : public CompilationSubCache { DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp); }; -// Sub-cache for Code objects. All code inserted into this cache must -// be usable across different native contexts. -class CompilationCacheCode : public CompilationSubCache { - public: - explicit CompilationCacheCode(Isolate* isolate) - : CompilationSubCache(isolate, kGenerations) {} - - MaybeHandle Lookup(Handle key); - void Put(Handle key, Handle value); - - void Age() override; - - // TODO(jgruber,v8:8888): For simplicity we use the generational - // approach here, but could consider something else (or more - // generations) in the future. - static constexpr int kGenerations = 2; - - static void TraceAgeing(); - static void TraceInsertion(Handle key, - Handle value); - static void TraceHit(Handle key, Handle value); - - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheCode); -}; - // The compilation cache keeps shared function infos for compiled // scripts and evals. The shared function infos are looked up using // the source string as the key. For regular expressions the @@ -206,8 +180,6 @@ class V8_EXPORT_PRIVATE CompilationCache { MaybeHandle LookupRegExp(Handle source, JSRegExp::Flags flags); - MaybeHandle LookupCode(Handle sfi); - // Associate the (source, kind) pair to the shared function // info. This may overwrite an existing mapping. void PutScript(Handle source, LanguageMode language_mode, @@ -225,8 +197,6 @@ class V8_EXPORT_PRIVATE CompilationCache { void PutRegExp(Handle source, JSRegExp::Flags flags, Handle data); - void PutCode(Handle shared, Handle code); - // Clear the cache - also used to initialize the cache at startup. void Clear(); @@ -269,9 +239,8 @@ class V8_EXPORT_PRIVATE CompilationCache { CompilationCacheEval eval_global_; CompilationCacheEval eval_contextual_; CompilationCacheRegExp reg_exp_; - CompilationCacheCode code_; - static constexpr int kSubCacheCount = 5; + static constexpr int kSubCacheCount = 4; CompilationSubCache* subcaches_[kSubCacheCount]; // Current enable state of the compilation cache for scripts and eval. diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc index e46639d90a49ad..9de4ae24a3aa9c 100644 --- a/deps/v8/src/codegen/compiler.cc +++ b/deps/v8/src/codegen/compiler.cc @@ -559,10 +559,10 @@ void InstallInterpreterTrampolineCopy( script_name, line_num, column_num)); } -template +template void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info, Handle shared_info, - LocalIsolate* isolate) { + IsolateT* isolate) { if (compilation_info->has_bytecode_array()) { DCHECK(!shared_info->HasBytecodeArray()); // Only compiled once. DCHECK(!compilation_info->has_asm_wasm_data()); @@ -585,7 +585,7 @@ void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info, #if V8_ENABLE_WEBASSEMBLY DCHECK(compilation_info->has_asm_wasm_data()); // We should only have asm/wasm data when finalizing on the main thread. - DCHECK((std::is_same::value)); + DCHECK((std::is_same::value)); shared_info->set_asm_wasm_data(*compilation_info->asm_wasm_data()); shared_info->set_feedback_metadata( ReadOnlyRoots(isolate).empty_feedback_metadata()); @@ -606,13 +606,15 @@ void LogUnoptimizedCompilation(Isolate* isolate, RecordUnoptimizedCompilationStats(isolate, shared_info); } -template +template void EnsureSharedFunctionInfosArrayOnScript(Handle