diff --git a/common.gypi b/common.gypi index 4745bb5ac77639..f8440fef992252 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.17', + 'v8_embedder_string': '-node.15', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 47a83c5ff1c905..32268da1cb9115 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -42,6 +42,7 @@ Cloudflare, Inc. <*@cloudflare.com> Julia Computing, Inc. <*@juliacomputing.com> Aaron Bieber +Aaron O'Mullan Abdulla Kamar Adam Kallai Akinori MUSHA @@ -93,6 +94,7 @@ Filipe David Manana Franziska Hinkelmann Geoffrey Garside Gergely Nagy +Gilang Mentari Hamidy Gus Caplan Gwang Yoon Hwang Hannu Trey @@ -118,6 +120,7 @@ Joel Stanley Johan Bergström Jonathan Liu Julien Brianceau +Junha Park JunHo Seo Junming Huang Kang-Hao (Kenny) Lu @@ -131,6 +134,7 @@ Luke Zarko Maciej Małecki Marcin Cieślak Marcin Wiącek +Martin Bidlingmaier Mateusz Czeladka Matheus Marchini Matheus Marchini @@ -196,6 +200,7 @@ Tobias Burnus Tobias Nießen Ujjwal Sharma Vadim Gorbachev +Varun Varada Victor Costan Vlad Burlik Vladimir Krivosheev @@ -204,13 +209,13 @@ Wenlu Wang Wiktor Garbacz Wouter Vermeiren Xiaoyin Liu +Yanbo Li Yannic Bonenberger Yong Wang Youfeng Hao Yu Yin Zac Hansen +Zeynep Cankara Zhao Jiazhong Zhongping Wang 柳荣一 -Yanbo Li -Gilang Mentari Hamidy \ No newline at end of file diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 167e63503c5535..bda33c185fe683 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -109,6 +109,9 @@ declare_args() { v8_enable_pointer_compression = "" v8_enable_31bit_smis_on_64bit_arch = false + # Reverse JS arguments order in the stack (sets -dV8_REVERSE_JSARGS). + v8_enable_reverse_jsargs = false + # Sets -dOBJECT_PRINT. v8_enable_object_print = "" @@ -127,8 +130,32 @@ declare_args() { # Sets -dV8_CONCURRENT_MARKING v8_enable_concurrent_marking = true - # Sets -dV8_ARRAY_BUFFER_EXTENSION - v8_enable_array_buffer_extension = true + # Runs mksnapshot with --turbo-profiling. After building in this + # configuration, any subsequent run of d8 will output information about usage + # of basic blocks in builtins. + v8_enable_builtins_profiling = false + + # Runs mksnapshot with --turbo-profiling-verbose. After building in this + # configuration, any subsequent run of d8 will output information about usage + # of basic blocks in builtins, including the schedule and disassembly of all + # used builtins. + v8_enable_builtins_profiling_verbose = false + + # Provides the given V8 log file as an input to mksnapshot, where it can be + # used for profile-guided optimization of builtins. + # + # To do profile-guided optimizations of builtins: + # 1. Build with v8_enable_builtins_profiling = true + # 2. Run your chosen workload with the --turbo-profiling-log-builtins flag. + # For Chrome, the invocation might look like this: + # chrome --no-sandbox --disable-extensions + # --js-flags="--turbo-profiling-log-builtins --logfile=path/to/v8.log" + # "http://localhost/test-suite" + # 3. Optionally repeat step 2 for additional workloads, and concatenate all of + # the resulting log files into a single file. + # 4. Build again with v8_builtins_profiling_log_file set to the file created + # in steps 2-3. + v8_builtins_profiling_log_file = "" # Enables various testing features. v8_enable_test_features = "" @@ -200,6 +227,10 @@ declare_args() { # heap has single generation. v8_disable_write_barriers = false + # Ensure that write barriers are always used. + # Useful for debugging purposes. + v8_enable_unconditional_write_barriers = false + # Redirect allocation in young generation so that there will be # only one single generation. v8_enable_single_generation = "" @@ -222,13 +253,23 @@ declare_args() { # Enable object names in cppgc for debug purposes. cppgc_enable_object_names = false + # Enable heap reservation of size 4GB. Only possible for 64bit archs. + cppgc_enable_caged_heap = v8_current_cpu == "x64" || v8_current_cpu == "arm64" + + # Enable young generation in cppgc. + cppgc_enable_young_generation = false + + # Enable V8 zone compression experimental feature. + # Sets -DV8_COMPRESS_ZONES. + v8_enable_zone_compression = "" + # Enable V8 heap sandbox experimental feature. # Sets -DV8_HEAP_SANDBOX. v8_enable_heap_sandbox = "" - # Experimental support for native context independent code. - # https://crbug.com/v8/8888 - v8_enable_nci_code = false + # Experimental feature for collecting per-class zone memory stats. + # Requires use_rtti = true + v8_enable_precise_zone_stats = false } # Derived defaults. @@ -258,13 +299,15 @@ if (v8_enable_snapshot_native_code_counters == "") { v8_enable_snapshot_native_code_counters = v8_enable_debugging_features } if (v8_enable_pointer_compression == "") { - # TODO(v8:v7703): temporarily enable pointer compression on arm64 and on x64 v8_enable_pointer_compression = v8_current_cpu == "arm64" || v8_current_cpu == "x64" } if (v8_enable_fast_torque == "") { v8_enable_fast_torque = v8_enable_fast_mksnapshot } +if (v8_enable_zone_compression == "") { + v8_enable_zone_compression = false +} if (v8_enable_heap_sandbox == "") { v8_enable_heap_sandbox = false } @@ -293,13 +336,31 @@ assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations, assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity, "Control-flow integrity is only supported on arm64") -assert( - !v8_enable_pointer_compression || !v8_enable_shared_ro_heap, - "Pointer compression is not supported with shared read-only heap enabled") +if (v8_enable_shared_ro_heap && v8_enable_pointer_compression) { + assert( + is_linux || is_android, + "Sharing read-only heap with pointer compression is only supported on Linux or Android") +} assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression, "V8 Heap Sandbox requires pointer compression") +assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers, + "Write barriers can't be both enabled and disabled") + +assert(!cppgc_enable_caged_heap || v8_current_cpu == "x64" || + v8_current_cpu == "arm64", + "CppGC caged heap requires 64bit platforms") + +assert(!cppgc_enable_young_generation || cppgc_enable_caged_heap, + "Young generation in CppGC requires caged heap") + +if (v8_enable_single_generation == true) { + assert( + v8_enable_unconditional_write_barriers || v8_disable_write_barriers, + "Requires unconditional write barriers or none (which disables incremental marking)") +} + v8_random_seed = "314159265" v8_toolset_for_shell = "host" @@ -308,7 +369,8 @@ v8_toolset_for_shell = "host" # config("internal_config_base") { - visibility = [ ":*" ] # Only targets in this file can depend on this. + # Only targets in this file and its subdirs can depend on this. + visibility = [ "./*" ] configs = [ ":v8_tracing_config" ] @@ -321,7 +383,8 @@ config("internal_config_base") { config("internal_config") { defines = [] - visibility = [ ":*" ] # Only targets in this file can depend on this. + # Only targets in this file and its subdirs can depend on this. + visibility = [ "./*" ] configs = [ "//build/config/compiler:wexit_time_destructors", @@ -370,6 +433,12 @@ config("cppgc_base_config") { if (cppgc_enable_object_names) { defines += [ "CPPGC_SUPPORTS_OBJECT_NAMES" ] } + if (cppgc_enable_caged_heap) { + defines += [ "CPPGC_CAGED_HEAP" ] + } + if (cppgc_enable_young_generation) { + defines += [ "CPPGC_YOUNG_GENERATION" ] + } } # This config should be applied to code using the libsampler. @@ -415,6 +484,9 @@ config("v8_header_features") { if (v8_enable_pointer_compression || v8_enable_31bit_smis_on_64bit_arch) { defines += [ "V8_31BIT_SMIS_ON_64BIT_ARCH" ] } + if (v8_enable_zone_compression) { + defines += [ "V8_COMPRESS_ZONES" ] + } if (v8_enable_heap_sandbox) { defines += [ "V8_HEAP_SANDBOX" ] } @@ -424,12 +496,16 @@ config("v8_header_features") { if (v8_imminent_deprecation_warnings) { defines += [ "V8_IMMINENT_DEPRECATION_WARNINGS" ] } + if (v8_enable_reverse_jsargs) { + defines += [ "V8_REVERSE_JSARGS" ] + } } # Put defines here that are only used in our internal files and NEVER in # external headers that embedders (such as chromium and node) might include. config("features") { - visibility = [ ":*" ] # Only targets in this file can depend on this. + # Only targets in this file and its subdirs can depend on this. + visibility = [ "./*" ] defines = [] @@ -517,9 +593,6 @@ config("features") { if (v8_enable_concurrent_marking) { defines += [ "V8_CONCURRENT_MARKING" ] } - if (v8_enable_array_buffer_extension) { - defines += [ "V8_ARRAY_BUFFER_EXTENSION" ] - } if (v8_enable_lazy_source_positions) { defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ] } @@ -553,13 +626,17 @@ config("features") { if (v8_enable_wasm_gdb_remote_debugging) { defines += [ "V8_ENABLE_WASM_GDB_REMOTE_DEBUGGING" ] } - if (v8_enable_nci_code) { - defines += [ "V8_ENABLE_NCI_CODE" ] + if (v8_enable_precise_zone_stats) { + defines += [ "V8_ENABLE_PRECISE_ZONE_STATS" ] + } + if (v8_fuzzilli) { + defines += [ "V8_FUZZILLI" ] } } config("toolchain") { - visibility = [ ":*" ] # Only targets in this file can depend on this. + # Only targets in this file and its subdirs can depend on this. + visibility = [ "./*" ] defines = [] cflags = [] @@ -937,63 +1014,66 @@ action("postmortem-metadata") { # NOSORT sources = [ - "src/objects/objects.h", - "src/objects/objects-inl.h", - "src/objects/allocation-site-inl.h", + "$target_gen_dir/torque-generated/instance-types-tq.h", "src/objects/allocation-site.h", - "src/objects/cell-inl.h", + "src/objects/allocation-site-inl.h", "src/objects/cell.h", - "src/objects/code-inl.h", + "src/objects/cell-inl.h", "src/objects/code.h", + "src/objects/code-inl.h", "src/objects/data-handler.h", "src/objects/data-handler-inl.h", "src/objects/descriptor-array.h", "src/objects/descriptor-array-inl.h", "src/objects/feedback-cell.h", "src/objects/feedback-cell-inl.h", - "src/objects/fixed-array-inl.h", "src/objects/fixed-array.h", - "src/objects/heap-number-inl.h", + "src/objects/fixed-array-inl.h", "src/objects/heap-number.h", - "src/objects/heap-object-inl.h", + "src/objects/heap-number-inl.h", "src/objects/heap-object.h", + "src/objects/heap-object-inl.h", "src/objects/instance-type.h", - "src/objects/js-array-inl.h", - "src/objects/js-array.h", - "src/objects/js-array-buffer-inl.h", "src/objects/js-array-buffer.h", - "src/objects/js-objects-inl.h", + "src/objects/js-array-buffer-inl.h", + "src/objects/js-array.h", + "src/objects/js-array-inl.h", + "src/objects/js-function.cc", + "src/objects/js-function.h", + "src/objects/js-objects.cc", "src/objects/js-objects.h", - "src/objects/js-promise-inl.h", + "src/objects/js-objects-inl.h", "src/objects/js-promise.h", - "src/objects/js-regexp-inl.h", + "src/objects/js-promise-inl.h", "src/objects/js-regexp.cc", "src/objects/js-regexp.h", - "src/objects/js-regexp-string-iterator-inl.h", + "src/objects/js-regexp-inl.h", "src/objects/js-regexp-string-iterator.h", - "src/objects/map.h", + "src/objects/js-regexp-string-iterator-inl.h", "src/objects/map.cc", + "src/objects/map.h", "src/objects/map-inl.h", - "src/objects/js-objects.cc", "src/objects/name.h", "src/objects/name-inl.h", - "src/objects/oddball-inl.h", + "src/objects/objects.h", + "src/objects/objects-inl.h", "src/objects/oddball.h", + "src/objects/oddball-inl.h", "src/objects/primitive-heap-object.h", "src/objects/primitive-heap-object-inl.h", "src/objects/scope-info.h", "src/objects/script.h", "src/objects/script-inl.h", + "src/objects/shared-function-info.cc", "src/objects/shared-function-info.h", "src/objects/shared-function-info-inl.h", "src/objects/string.cc", - "src/objects/string.h", "src/objects/string-comparator.cc", "src/objects/string-comparator.h", + "src/objects/string.h", "src/objects/string-inl.h", "src/objects/struct.h", "src/objects/struct-inl.h", - "$target_gen_dir/torque-generated/instance-types-tq.h", ] outputs = [ "$target_gen_dir/debug-support.cc" ] @@ -1005,6 +1085,7 @@ action("postmortem-metadata") { } torque_files = [ + "src/builtins/aggregate-error.tq", "src/builtins/array-copywithin.tq", "src/builtins/array-every.tq", "src/builtins/array-filter.tq", @@ -1030,18 +1111,22 @@ torque_files = [ "src/builtins/bigint.tq", "src/builtins/boolean.tq", "src/builtins/builtins-string.tq", - "src/builtins/collections.tq", "src/builtins/cast.tq", + "src/builtins/collections.tq", + "src/builtins/constructor.tq", + "src/builtins/conversion.tq", "src/builtins/convert.tq", "src/builtins/console.tq", "src/builtins/data-view.tq", "src/builtins/finalization-registry.tq", "src/builtins/frames.tq", "src/builtins/frame-arguments.tq", + "src/builtins/function.tq", "src/builtins/growable-fixed-array.tq", "src/builtins/ic-callable.tq", "src/builtins/ic.tq", "src/builtins/internal-coverage.tq", + "src/builtins/internal.tq", "src/builtins/iterator.tq", "src/builtins/math.tq", "src/builtins/number.tq", @@ -1095,11 +1180,13 @@ torque_files = [ "src/builtins/torque-internal.tq", "src/builtins/typed-array-createtypedarray.tq", "src/builtins/typed-array-every.tq", + "src/builtins/typed-array-entries.tq", "src/builtins/typed-array-filter.tq", "src/builtins/typed-array-find.tq", "src/builtins/typed-array-findindex.tq", "src/builtins/typed-array-foreach.tq", "src/builtins/typed-array-from.tq", + "src/builtins/typed-array-keys.tq", "src/builtins/typed-array-of.tq", "src/builtins/typed-array-reduce.tq", "src/builtins/typed-array-reduceright.tq", @@ -1108,8 +1195,10 @@ torque_files = [ "src/builtins/typed-array-some.tq", "src/builtins/typed-array-sort.tq", "src/builtins/typed-array-subarray.tq", + "src/builtins/typed-array-values.tq", "src/builtins/typed-array.tq", "src/builtins/wasm.tq", + "src/builtins/weak-ref.tq", "src/ic/handler-configuration.tq", "src/objects/allocation-site.tq", "src/objects/api-callbacks.tq", @@ -1129,7 +1218,6 @@ torque_files = [ "src/objects/heap-number.tq", "src/objects/heap-object.tq", "src/objects/intl-objects.tq", - "src/objects/js-aggregate-error.tq", "src/objects/js-array-buffer.tq", "src/objects/js-array.tq", "src/objects/js-collection-iterator.tq", @@ -1408,6 +1496,19 @@ template("run_mksnapshot") { rebase_path("$target_gen_dir/embedded${suffix}.S", root_build_dir), ] + if (v8_enable_builtins_profiling) { + args += [ "--turbo-profiling" ] + } + if (v8_enable_builtins_profiling_verbose) { + args += [ "--turbo-profiling-verbose" ] + } + if (v8_builtins_profiling_log_file != "") { + args += [ + "--turbo-profiling-log-file", + v8_builtins_profiling_log_file, + ] + } + # This is needed to distinguish between generating code for the simulator # and cross-compiling. The latter may need to run code on the host with the # simulator but cannot use simulator-specific instructions. @@ -1632,7 +1733,6 @@ v8_source_set("v8_initializers") { "src/builtins/builtins-data-view-gen.h", "src/builtins/builtins-date-gen.cc", "src/builtins/builtins-debug-gen.cc", - "src/builtins/builtins-function-gen.cc", "src/builtins/builtins-generator-gen.cc", "src/builtins/builtins-global-gen.cc", "src/builtins/builtins-handler-gen.cc", @@ -1663,7 +1763,10 @@ v8_source_set("v8_initializers") { "src/builtins/builtins-wasm-gen.h", "src/builtins/growable-fixed-array-gen.cc", "src/builtins/growable-fixed-array-gen.h", + "src/builtins/profile-data-reader.cc", + "src/builtins/profile-data-reader.h", "src/builtins/setup-builtins-internal.cc", + "src/builtins/torque-csa-header-includes.h", "src/codegen/code-stub-assembler.cc", "src/codegen/code-stub-assembler.h", "src/heap/setup-heap-internal.cc", @@ -1673,6 +1776,8 @@ v8_source_set("v8_initializers") { "src/ic/binary-op-assembler.h", "src/ic/keyed-store-generic.cc", "src/ic/keyed-store-generic.h", + "src/ic/unary-op-assembler.cc", + "src/ic/unary-op-assembler.h", "src/interpreter/interpreter-assembler.cc", "src/interpreter/interpreter-assembler.h", "src/interpreter/interpreter-generator.cc", @@ -1772,6 +1877,7 @@ v8_header_set("v8_headers") { public_configs = [ ":v8_header_features" ] sources = [ + "include/v8-cppgc.h", "include/v8-fast-api-calls.h", "include/v8-internal.h", "include/v8.h", @@ -1801,6 +1907,7 @@ v8_header_set("v8_shared_internal_headers") { v8_compiler_sources = [ ### gcmole(all) ### + "src/builtins/profile-data-reader.h", "src/compiler/access-builder.cc", "src/compiler/access-builder.h", "src/compiler/access-info.cc", @@ -1830,12 +1937,17 @@ v8_compiler_sources = [ "src/compiler/backend/jump-threading.h", "src/compiler/backend/live-range-separator.cc", "src/compiler/backend/live-range-separator.h", + "src/compiler/backend/mid-tier-register-allocator.cc", + "src/compiler/backend/mid-tier-register-allocator.h", "src/compiler/backend/move-optimizer.cc", "src/compiler/backend/move-optimizer.h", + "src/compiler/backend/register-allocation.h", "src/compiler/backend/register-allocator-verifier.cc", "src/compiler/backend/register-allocator-verifier.h", "src/compiler/backend/register-allocator.cc", "src/compiler/backend/register-allocator.h", + "src/compiler/backend/spill-placer.cc", + "src/compiler/backend/spill-placer.h", "src/compiler/backend/unwinding-info-writer.h", "src/compiler/basic-block-instrumentor.cc", "src/compiler/basic-block-instrumentor.h", @@ -1897,6 +2009,7 @@ v8_compiler_sources = [ "src/compiler/graph-trimmer.h", "src/compiler/graph-visualizer.cc", "src/compiler/graph-visualizer.h", + "src/compiler/graph-zone-traits.h", "src/compiler/graph.cc", "src/compiler/graph.h", "src/compiler/int64-lowering.cc", @@ -2108,10 +2221,12 @@ v8_source_set("v8_base_without_compiler") { ### gcmole(all) ### "$target_gen_dir/builtins-generated/bytecodes-builtins-list.h", "include/cppgc/common.h", + "include/v8-cppgc.h", "include/v8-fast-api-calls.h", "include/v8-inspector-protocol.h", "include/v8-inspector.h", "include/v8-internal.h", + "include/v8-metrics.h", "include/v8-platform.h", "include/v8-profiler.h", "include/v8-util.h", @@ -2191,6 +2306,7 @@ v8_source_set("v8_base_without_compiler") { "src/builtins/builtins.h", "src/builtins/constants-table-builder.cc", "src/builtins/constants-table-builder.h", + "src/builtins/profile-data-reader.h", "src/codegen/assembler-arch.h", "src/codegen/assembler-inl.h", "src/codegen/assembler.cc", @@ -2338,13 +2454,13 @@ v8_source_set("v8_base_without_compiler") { "src/execution/isolate-utils.h", "src/execution/isolate.cc", "src/execution/isolate.h", + "src/execution/local-isolate-inl.h", + "src/execution/local-isolate.cc", + "src/execution/local-isolate.h", "src/execution/messages.cc", "src/execution/messages.h", "src/execution/microtask-queue.cc", "src/execution/microtask-queue.h", - "src/execution/off-thread-isolate-inl.h", - "src/execution/off-thread-isolate.cc", - "src/execution/off-thread-isolate.h", "src/execution/pointer-authentication.h", "src/execution/protectors-inl.h", "src/execution/protectors.cc", @@ -2391,16 +2507,18 @@ v8_source_set("v8_base_without_compiler") { "src/handles/maybe-handles.h", "src/handles/persistent-handles.cc", "src/handles/persistent-handles.h", - "src/heap/array-buffer-collector.cc", - "src/heap/array-buffer-collector.h", + "src/heap/allocation-observer.cc", + "src/heap/allocation-observer.h", + "src/heap/allocation-stats.h", "src/heap/array-buffer-sweeper.cc", "src/heap/array-buffer-sweeper.h", - "src/heap/array-buffer-tracker-inl.h", - "src/heap/array-buffer-tracker.cc", - "src/heap/array-buffer-tracker.h", "src/heap/barrier.h", + "src/heap/base-space.cc", + "src/heap/base-space.h", "src/heap/basic-memory-chunk.cc", "src/heap/basic-memory-chunk.h", + "src/heap/code-object-registry.cc", + "src/heap/code-object-registry.h", "src/heap/code-stats.cc", "src/heap/code-stats.h", "src/heap/combined-heap.cc", @@ -2410,6 +2528,11 @@ v8_source_set("v8_base_without_compiler") { "src/heap/concurrent-allocator.h", "src/heap/concurrent-marking.cc", "src/heap/concurrent-marking.h", + "src/heap/cppgc-js/cpp-heap.cc", + "src/heap/cppgc-js/cpp-heap.h", + "src/heap/cppgc-js/unified-heap-marking-state.h", + "src/heap/cppgc-js/unified-heap-marking-visitor.cc", + "src/heap/cppgc-js/unified-heap-marking-visitor.h", "src/heap/embedder-tracing.cc", "src/heap/embedder-tracing.h", "src/heap/factory-base.cc", @@ -2419,6 +2542,9 @@ v8_source_set("v8_base_without_compiler") { "src/heap/factory.h", "src/heap/finalization-registry-cleanup-task.cc", "src/heap/finalization-registry-cleanup-task.h", + "src/heap/free-list-inl.h", + "src/heap/free-list.cc", + "src/heap/free-list.h", "src/heap/gc-idle-time-handler.cc", "src/heap/gc-idle-time-handler.h", "src/heap/gc-tracer.cc", @@ -2427,6 +2553,7 @@ v8_source_set("v8_base_without_compiler") { "src/heap/heap-controller.h", "src/heap/heap-inl.h", "src/heap/heap-write-barrier-inl.h", + "src/heap/heap-write-barrier.cc", "src/heap/heap-write-barrier.h", "src/heap/heap.cc", "src/heap/heap.h", @@ -2445,18 +2572,28 @@ v8_source_set("v8_base_without_compiler") { "src/heap/list.h", "src/heap/local-allocator-inl.h", "src/heap/local-allocator.h", + "src/heap/local-factory.cc", + "src/heap/local-factory.h", + "src/heap/local-heap-inl.h", "src/heap/local-heap.cc", "src/heap/local-heap.h", "src/heap/mark-compact-inl.h", "src/heap/mark-compact.cc", "src/heap/mark-compact.h", + "src/heap/marking-barrier.cc", + "src/heap/marking-barrier.h", "src/heap/marking-visitor-inl.h", "src/heap/marking-visitor.h", + "src/heap/marking-worklist-inl.h", "src/heap/marking-worklist.cc", "src/heap/marking-worklist.h", "src/heap/marking.cc", "src/heap/marking.h", + "src/heap/memory-allocator.cc", + "src/heap/memory-allocator.h", "src/heap/memory-chunk-inl.h", + "src/heap/memory-chunk-layout.cc", + "src/heap/memory-chunk-layout.h", "src/heap/memory-chunk.cc", "src/heap/memory-chunk.h", "src/heap/memory-measurement-inl.h", @@ -2464,20 +2601,23 @@ v8_source_set("v8_base_without_compiler") { "src/heap/memory-measurement.h", "src/heap/memory-reducer.cc", "src/heap/memory-reducer.h", + "src/heap/new-spaces-inl.h", + "src/heap/new-spaces.cc", + "src/heap/new-spaces.h", "src/heap/object-stats.cc", "src/heap/object-stats.h", "src/heap/objects-visiting-inl.h", "src/heap/objects-visiting.cc", "src/heap/objects-visiting.h", - "src/heap/off-thread-factory.cc", - "src/heap/off-thread-factory.h", - "src/heap/off-thread-heap.cc", - "src/heap/off-thread-heap.h", + "src/heap/paged-spaces-inl.h", + "src/heap/paged-spaces.cc", + "src/heap/paged-spaces.h", "src/heap/read-only-heap-inl.h", "src/heap/read-only-heap.cc", "src/heap/read-only-heap.h", "src/heap/read-only-spaces.cc", "src/heap/read-only-spaces.h", + "src/heap/remembered-set-inl.h", "src/heap/remembered-set.h", "src/heap/safepoint.cc", "src/heap/safepoint.h", @@ -2576,12 +2716,17 @@ v8_source_set("v8_base_without_compiler") { "src/logging/counters-inl.h", "src/logging/counters.cc", "src/logging/counters.h", + "src/logging/local-logger.cc", + "src/logging/local-logger.h", "src/logging/log-inl.h", "src/logging/log-utils.cc", "src/logging/log-utils.h", "src/logging/log.cc", "src/logging/log.h", - "src/logging/off-thread-logger.h", + "src/logging/metrics.cc", + "src/logging/metrics.h", + "src/logging/tracing-flags.cc", + "src/logging/tracing-flags.h", "src/numbers/bignum-dtoa.cc", "src/numbers/bignum-dtoa.h", "src/numbers/bignum.cc", @@ -2620,6 +2765,8 @@ v8_source_set("v8_base_without_compiler") { "src/objects/cell-inl.h", "src/objects/cell.h", "src/objects/code-inl.h", + "src/objects/code-kind.cc", + "src/objects/code-kind.h", "src/objects/code.cc", "src/objects/code.h", "src/objects/compilation-cache-inl.h", @@ -2672,8 +2819,6 @@ v8_source_set("v8_base_without_compiler") { "src/objects/internal-index.h", "src/objects/intl-objects.cc", "src/objects/intl-objects.h", - "src/objects/js-aggregate-error-inl.h", - "src/objects/js-aggregate-error.h", "src/objects/js-array-buffer-inl.h", "src/objects/js-array-buffer.cc", "src/objects/js-array-buffer.h", @@ -2694,6 +2839,8 @@ v8_source_set("v8_base_without_compiler") { "src/objects/js-display-names-inl.h", "src/objects/js-display-names.cc", "src/objects/js-display-names.h", + "src/objects/js-function.cc", + "src/objects/js-function.h", "src/objects/js-generator-inl.h", "src/objects/js-generator.h", "src/objects/js-list-format-inl.h", @@ -2729,6 +2876,9 @@ v8_source_set("v8_base_without_compiler") { "src/objects/js-segmenter-inl.h", "src/objects/js-segmenter.cc", "src/objects/js-segmenter.h", + "src/objects/js-segments-inl.h", + "src/objects/js-segments.cc", + "src/objects/js-segments.h", "src/objects/js-weak-refs-inl.h", "src/objects/js-weak-refs.h", "src/objects/keys.cc", @@ -2801,6 +2951,7 @@ v8_source_set("v8_base_without_compiler") { "src/objects/script-inl.h", "src/objects/script.h", "src/objects/shared-function-info-inl.h", + "src/objects/shared-function-info.cc", "src/objects/shared-function-info.h", "src/objects/slots-atomic-inl.h", "src/objects/slots-inl.h", @@ -2813,7 +2964,10 @@ v8_source_set("v8_base_without_compiler") { "src/objects/string-comparator.cc", "src/objects/string-comparator.h", "src/objects/string-inl.h", + "src/objects/string-set-inl.h", + "src/objects/string-set.h", "src/objects/string-table-inl.h", + "src/objects/string-table.cc", "src/objects/string-table.h", "src/objects/string.cc", "src/objects/string.h", @@ -2896,6 +3050,8 @@ v8_source_set("v8_base_without_compiler") { "src/profiler/tick-sample.h", "src/profiler/tracing-cpu-profiler.cc", "src/profiler/tracing-cpu-profiler.h", + "src/regexp/experimental/experimental.cc", + "src/regexp/experimental/experimental.h", "src/regexp/property-sequences.cc", "src/regexp/property-sequences.h", "src/regexp/regexp-ast.cc", @@ -3068,6 +3224,7 @@ v8_source_set("v8_base_without_compiler") { "src/utils/ostreams.cc", "src/utils/ostreams.h", "src/utils/pointer-with-payload.h", + "src/utils/scoped-list.h", "src/utils/utils-inl.h", "src/utils/utils.cc", "src/utils/utils.h", @@ -3105,9 +3262,12 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/object-access.h", "src/wasm/signature-map.cc", "src/wasm/signature-map.h", + "src/wasm/simd-shuffle.cc", + "src/wasm/simd-shuffle.h", "src/wasm/streaming-decoder.cc", "src/wasm/streaming-decoder.h", "src/wasm/struct-types.h", + "src/wasm/sync-streaming-decoder.cc", "src/wasm/value-type.h", "src/wasm/wasm-arguments.h", "src/wasm/wasm-code-manager.cc", @@ -3125,8 +3285,6 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/wasm-features.h", "src/wasm/wasm-import-wrapper-cache.cc", "src/wasm/wasm-import-wrapper-cache.h", - "src/wasm/wasm-interpreter.cc", - "src/wasm/wasm-interpreter.h", "src/wasm/wasm-js.cc", "src/wasm/wasm-js.h", "src/wasm/wasm-limits.h", @@ -3146,17 +3304,27 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/wasm-result.h", "src/wasm/wasm-serialization.cc", "src/wasm/wasm-serialization.h", + "src/wasm/wasm-subtyping.cc", + "src/wasm/wasm-subtyping.h", "src/wasm/wasm-tier.h", "src/wasm/wasm-value.h", "src/zone/accounting-allocator.cc", "src/zone/accounting-allocator.h", + "src/zone/compressed-zone-ptr.h", + "src/zone/type-stats.cc", + "src/zone/type-stats.h", "src/zone/zone-allocator.h", "src/zone/zone-chunk-list.h", + "src/zone/zone-compression.h", "src/zone/zone-containers.h", "src/zone/zone-handle-set.h", + "src/zone/zone-hashmap.h", "src/zone/zone-list-inl.h", + "src/zone/zone-list.h", "src/zone/zone-segment.cc", "src/zone/zone-segment.h", + "src/zone/zone-type-traits.h", + "src/zone/zone-utils.h", "src/zone/zone.cc", "src/zone/zone.h", ] @@ -3500,10 +3668,14 @@ v8_source_set("v8_base_without_compiler") { ] } - configs = [ ":internal_config" ] + configs = [ + ":internal_config", + ":cppgc_base_config", + ] defines = [] deps = [ + ":cppgc_base", ":torque_generated_definitions", ":v8_headers", ":v8_libbase", @@ -3564,6 +3736,9 @@ v8_source_set("v8_base_without_compiler") { "src/objects/js-segmenter-inl.h", "src/objects/js-segmenter.cc", "src/objects/js-segmenter.h", + "src/objects/js-segments-inl.h", + "src/objects/js-segments.cc", + "src/objects/js-segments.h", "src/runtime/runtime-intl.cc", "src/strings/char-predicates.cc", ] @@ -4026,6 +4201,45 @@ v8_source_set("fuzzer_support") { ] } +v8_source_set("v8_cppgc_shared") { + sources = [ + "src/heap/base/stack.cc", + "src/heap/base/stack.h", + ] + + if (is_clang || !is_win) { + if (current_cpu == "x64") { + sources += [ "src/heap/base/asm/x64/push_registers_asm.cc" ] + } else if (current_cpu == "x86") { + sources += [ "src/heap/base/asm/ia32/push_registers_asm.cc" ] + } else if (current_cpu == "arm") { + sources += [ "src/heap/base/asm/arm/push_registers_asm.cc" ] + } else if (current_cpu == "arm64") { + sources += [ "src/heap/base/asm/arm64/push_registers_asm.cc" ] + } else if (current_cpu == "ppc64") { + sources += [ "src/heap/base/asm/ppc/push_registers_asm.cc" ] + } else if (current_cpu == "s390x") { + sources += [ "src/heap/base/asm/s390/push_registers_asm.cc" ] + } else if (current_cpu == "mipsel") { + sources += [ "src/heap/base/asm/mips/push_registers_asm.cc" ] + } else if (current_cpu == "mips64el") { + sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ] + } + } else if (is_win) { + if (current_cpu == "x64") { + sources += [ "src/heap/base/asm/x64/push_registers_masm.S" ] + } else if (current_cpu == "x86") { + sources += [ "src/heap/base/asm/ia32/push_registers_masm.S" ] + } else if (current_cpu == "arm64") { + sources += [ "src/heap/base/asm/arm64/push_registers_masm.S" ] + } + } + + configs = [ ":internal_config" ] + + public_deps = [ ":v8_libbase" ] +} + v8_source_set("cppgc_base") { visibility = [ ":*" ] @@ -4033,17 +4247,19 @@ v8_source_set("cppgc_base") { "include/cppgc/allocation.h", "include/cppgc/common.h", "include/cppgc/custom-space.h", + "include/cppgc/default-platform.h", "include/cppgc/garbage-collected.h", "include/cppgc/heap.h", - "include/cppgc/internal/accessors.h", - "include/cppgc/internal/api-contants.h", + "include/cppgc/internal/api-constants.h", + "include/cppgc/internal/atomic-entry-flag.h", "include/cppgc/internal/compiler-specific.h", - "include/cppgc/internal/finalizer-traits.h", + "include/cppgc/internal/finalizer-trait.h", "include/cppgc/internal/gc-info.h", "include/cppgc/internal/persistent-node.h", "include/cppgc/internal/pointer-policies.h", "include/cppgc/internal/prefinalizer-handler.h", - "include/cppgc/liveness-broker.h", + "include/cppgc/internal/process-heap.h", + "include/cppgc/internal/write-barrier.h", "include/cppgc/liveness-broker.h", "include/cppgc/macros.h", "include/cppgc/member.h", @@ -4056,13 +4272,19 @@ v8_source_set("cppgc_base") { "include/cppgc/visitor.h", "include/v8config.h", "src/heap/cppgc/allocation.cc", + "src/heap/cppgc/default-platform.cc", "src/heap/cppgc/free-list.cc", "src/heap/cppgc/free-list.h", + "src/heap/cppgc/garbage-collector.h", "src/heap/cppgc/gc-info-table.cc", "src/heap/cppgc/gc-info-table.h", "src/heap/cppgc/gc-info.cc", - "src/heap/cppgc/heap-inl.h", - "src/heap/cppgc/heap-object-header-inl.h", + "src/heap/cppgc/gc-invoker.cc", + "src/heap/cppgc/gc-invoker.h", + "src/heap/cppgc/heap-base.cc", + "src/heap/cppgc/heap-base.h", + "src/heap/cppgc/heap-growing.cc", + "src/heap/cppgc/heap-growing.h", "src/heap/cppgc/heap-object-header.cc", "src/heap/cppgc/heap-object-header.h", "src/heap/cppgc/heap-page.cc", @@ -4073,17 +4295,20 @@ v8_source_set("cppgc_base") { "src/heap/cppgc/heap.cc", "src/heap/cppgc/heap.h", "src/heap/cppgc/liveness-broker.cc", + "src/heap/cppgc/liveness-broker.h", "src/heap/cppgc/logging.cc", "src/heap/cppgc/marker.cc", "src/heap/cppgc/marker.h", + "src/heap/cppgc/marking-state.h", + "src/heap/cppgc/marking-verifier.cc", + "src/heap/cppgc/marking-verifier.h", "src/heap/cppgc/marking-visitor.cc", "src/heap/cppgc/marking-visitor.h", - "src/heap/cppgc/object-allocator-inl.h", + "src/heap/cppgc/marking-worklists.cc", + "src/heap/cppgc/marking-worklists.h", "src/heap/cppgc/object-allocator.cc", "src/heap/cppgc/object-allocator.h", - "src/heap/cppgc/object-start-bitmap-inl.h", "src/heap/cppgc/object-start-bitmap.h", - "src/heap/cppgc/page-memory-inl.h", "src/heap/cppgc/page-memory.cc", "src/heap/cppgc/page-memory.h", "src/heap/cppgc/persistent-node.cc", @@ -4091,43 +4316,31 @@ v8_source_set("cppgc_base") { "src/heap/cppgc/pointer-policies.cc", "src/heap/cppgc/prefinalizer-handler.cc", "src/heap/cppgc/prefinalizer-handler.h", + "src/heap/cppgc/process-heap.cc", "src/heap/cppgc/raw-heap.cc", "src/heap/cppgc/raw-heap.h", "src/heap/cppgc/sanitizers.h", "src/heap/cppgc/source-location.cc", - "src/heap/cppgc/stack.cc", - "src/heap/cppgc/stack.h", + "src/heap/cppgc/stats-collector.cc", + "src/heap/cppgc/stats-collector.h", "src/heap/cppgc/sweeper.cc", "src/heap/cppgc/sweeper.h", + "src/heap/cppgc/task-handle.h", + "src/heap/cppgc/trace-trait.cc", + "src/heap/cppgc/virtual-memory.cc", + "src/heap/cppgc/virtual-memory.h", + "src/heap/cppgc/visitor.cc", "src/heap/cppgc/worklist.h", + "src/heap/cppgc/write-barrier.cc", ] - if (is_clang || !is_win) { - if (target_cpu == "x64") { - sources += [ "src/heap/cppgc/asm/x64/push_registers_asm.cc" ] - } else if (target_cpu == "x86") { - sources += [ "src/heap/cppgc/asm/ia32/push_registers_asm.cc" ] - } else if (target_cpu == "arm") { - sources += [ "src/heap/cppgc/asm/arm/push_registers_asm.cc" ] - } else if (target_cpu == "arm64") { - sources += [ "src/heap/cppgc/asm/arm64/push_registers_asm.cc" ] - } else if (target_cpu == "ppc64") { - sources += [ "src/heap/cppgc/asm/ppc/push_registers_asm.cc" ] - } else if (target_cpu == "s390x") { - sources += [ "src/heap/cppgc/asm/s390/push_registers_asm.cc" ] - } else if (target_cpu == "mipsel") { - sources += [ "src/heap/cppgc/asm/mips/push_registers_asm.cc" ] - } else if (target_cpu == "mips64el") { - sources += [ "src/heap/cppgc/asm/mips64/push_registers_asm.cc" ] - } - } else if (is_win) { - if (target_cpu == "x64") { - sources += [ "src/heap/cppgc/asm/x64/push_registers_masm.S" ] - } else if (target_cpu == "x86") { - sources += [ "src/heap/cppgc/asm/ia32/push_registers_masm.S" ] - } else if (target_cpu == "arm64") { - sources += [ "src/heap/cppgc/asm/arm64/push_registers_masm.S" ] - } + if (cppgc_enable_caged_heap) { + sources += [ + "include/cppgc/internal/caged-heap-local-data.h", + "src/heap/cppgc/caged-heap-local-data.cc", + "src/heap/cppgc/caged-heap.cc", + "src/heap/cppgc/caged-heap.h", + ] } configs = [ @@ -4135,7 +4348,10 @@ v8_source_set("cppgc_base") { ":cppgc_base_config", ] - public_deps = [ ":v8_libbase" ] + public_deps = [ + ":v8_cppgc_shared", + ":v8_libbase", + ] } ############################################################################### @@ -4545,6 +4761,13 @@ v8_executable("d8") { "src/d8/d8.h", ] + if (v8_fuzzilli) { + sources += [ + "src/d8/cov.cc", + "src/d8/cov.h", + ] + } + configs = [ # Note: don't use :internal_config here because this target will get # the :external_config applied to it by virtue of depending on :v8, and @@ -4634,6 +4857,36 @@ if (want_v8_shell) { } } +v8_executable("cppgc_for_v8_embedders") { + sources = [ "samples/cppgc/cppgc-for-v8-embedders.cc" ] + + configs = [ + # Note: don't use :internal_config here because this target will get + # the :external_config applied to it by virtue of depending on :cppgc, and + # you can't have both applied to the same target. + ":internal_config_base", + ] + + deps = [ + ":cppgc", + ":v8_libplatform", + "//build/win:default_exe_manifest", + ] +} + +v8_executable("cppgc_standalone") { + sources = [ "samples/cppgc/cppgc-standalone.cc" ] + + configs = [ + # Note: don't use :internal_config here because this target will get + # the :external_config applied to it by virtue of depending on :cppgc, and + # you can't have both applied to the same target. + ":internal_config_base", + ] + + deps = [ ":cppgc" ] +} + template("v8_fuzzer") { name = target_name forward_variables_from(invoker, "*") @@ -4722,8 +4975,10 @@ v8_source_set("regexp_fuzzer") { v8_fuzzer("regexp_fuzzer") { } -v8_source_set("wasm_module_runner") { +v8_source_set("wasm_test_common") { sources = [ + "test/common/wasm/wasm-interpreter.cc", + "test/common/wasm/wasm-interpreter.h", "test/common/wasm/wasm-module-runner.cc", "test/common/wasm/wasm-module-runner.h", ] @@ -4748,7 +5003,7 @@ v8_source_set("wasm_fuzzer") { deps = [ ":fuzzer_support", ":lib_wasm_fuzzer_common", - ":wasm_module_runner", + ":wasm_test_common", ] configs = [ @@ -4766,7 +5021,7 @@ v8_source_set("wasm_async_fuzzer") { deps = [ ":fuzzer_support", ":lib_wasm_fuzzer_common", - ":wasm_module_runner", + ":wasm_test_common", ] configs = [ @@ -4787,7 +5042,7 @@ v8_source_set("wasm_code_fuzzer") { deps = [ ":fuzzer_support", ":lib_wasm_fuzzer_common", - ":wasm_module_runner", + ":wasm_test_common", ] configs = [ @@ -4828,7 +5083,7 @@ v8_source_set("wasm_compile_fuzzer") { deps = [ ":fuzzer_support", ":lib_wasm_fuzzer_common", - ":wasm_module_runner", + ":wasm_test_common", ] configs = [ diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS index 1319a579173c3c..bfd539fb42076e 100644 --- a/deps/v8/COMMON_OWNERS +++ b/deps/v8/COMMON_OWNERS @@ -9,6 +9,7 @@ clemensb@chromium.org danno@chromium.org delphick@chromium.org dinfuehr@chromium.org +ecmziegler@chromium.org gdeepti@chromium.org gsathya@chromium.org hablich@chromium.org @@ -34,6 +35,7 @@ solanes@chromium.org syg@chromium.org szuend@chromium.org tebbi@chromium.org +thibaudm@chromium.org ulan@chromium.org verwaest@chromium.org victorgomes@chromium.org diff --git a/deps/v8/DEPS b/deps/v8/DEPS index 7b38c3dcd07b17..7726a6973ec1df 100644 --- a/deps/v8/DEPS +++ b/deps/v8/DEPS @@ -2,8 +2,12 @@ # directory and assume that the root of the checkout is in ./v8/, so # all paths in here must match this assumption. -gclient_gn_args_file = 'v8/build/config/gclient_args.gni' +use_relative_paths = True + +gclient_gn_args_file = 'build/config/gclient_args.gni' gclient_gn_args = [ + 'checkout_google_benchmark', + 'mac_xcode_version', ] vars = { @@ -24,26 +28,32 @@ vars = { 'checkout_instrumented_libraries': False, 'checkout_ittapi': False, + # Fetch clang-tidy into the same bin/ directory as our clang binary. + 'checkout_clang_tidy': False, 'chromium_url': 'https://chromium.googlesource.com', 'android_url': 'https://android.googlesource.com', 'download_gcmole': False, 'download_jsfunfuzz': False, 'check_v8_header_includes': False, + 'checkout_google_benchmark' : False, + + 'mac_xcode_version': 'default', + # GN CIPD package version. - 'gn_version': 'git_revision:5ed3c9cc67b090d5e311e4bd2aba072173e82db9', + 'gn_version': 'git_revision:e327ffdc503815916db2543ec000226a8df45163', # luci-go CIPD package version. - 'luci_go': 'git_revision:56ae79476e3caf14da59d75118408aa778637936', + 'luci_go': 'git_revision:b022173f8069cf8001d4cf2a87ce7c5f0eae220f', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_build-tools_version # and whatever else without interference from each other. - 'android_sdk_build-tools_version': 'n-b1Qd7iFb8qzHlr1C_jIeu070UDgO_BwePtH42UqGcC', + 'android_sdk_build-tools_version': '8LZujEmLjSh0g3JciDA3cslSptxKs9HOa_iUPXkOeYQC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_emulator_version # and whatever else without interference from each other. - 'android_sdk_emulator_version': 'f4WdgkPvDdVCE8zBWPzcSIj4N9WFhKp3CSKDWylXuLEC', + 'android_sdk_emulator_version': 'A4EvXZUIuQho0QRDJopMUpgyp6NA3aiDQjGKPUKbowMC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_extras_version # and whatever else without interference from each other. @@ -55,11 +65,11 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_platform-tools_version # and whatever else without interference from each other. - 'android_sdk_platform-tools_version': 'zMVtBEihXp2Z0NYFNjLLmNrwy6252b_YWG6sh2l0QAcC', + 'android_sdk_platform-tools_version': '8tF0AOj7Dwlv4j7_nfkhxWB0jzrvWWYjEIpirt8FIWYC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_platforms_version # and whatever else without interference from each other. - 'android_sdk_platforms_version': 'yb33klKQV9UzzB-lDSsq36vzhTXOUZ2aRONBvPGwvdcC', + 'android_sdk_platforms_version': 'YMUu9EHNZ__2Xcxl-KsaSf-dI5TMt_P62IseUVsxktMC', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_sources_version # and whatever else without interference from each other. @@ -67,23 +77,23 @@ vars = { # Three lines of non-changing comments so that # the commit queue can handle CLs rolling android_sdk_tools-lint_version # and whatever else without interference from each other. - 'android_sdk_cmdline-tools_version': 'CR25ixsRhwuRnhdgDpGFyl9S0C_0HO9SUgFrwX46zq8C', + 'android_sdk_cmdline-tools_version': 'ijpIFSitwBfaEdO9VXBGPqDHUVzPimXy_whw3aHTN9oC', } deps = { - 'v8/build': - Var('chromium_url') + '/chromium/src/build.git' + '@' + '1b904cc30093c25d5fd48389bd58e3f7409bcf80', - 'v8/third_party/depot_tools': - Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '454f4ba4b3a69feb03c73f93d789062033433b4c', - 'v8/third_party/icu': - Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'f2223961702f00a8833874b0560d615a2cc42738', - 'v8/third_party/instrumented_libraries': - Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + 'bb3f1802c237dd19105dd0f7919f99e536a39d10', - 'v8/buildtools': - Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '204a35a2a64f7179f8b76d7a0385653690839e21', - 'v8/buildtools/clang_format/script': + 'build': + Var('chromium_url') + '/chromium/src/build.git' + '@' + '78b2991b0494c775e437770def455fe40061038f', + 'third_party/depot_tools': + Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '5cff4e3b5cf3116071761cbca363d416b413a064', + 'third_party/icu': + Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '79326efe26e5440f530963704c3c0ff965b3a4ac', + 'third_party/instrumented_libraries': + Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '3c52ccdd3b9edf8fb7b3bd8ba945cce47d887ea8', + 'buildtools': + Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + 'b00ad0af636401e5eb4b5d0ab01b65164dca1914', + 'buildtools/clang_format/script': Var('chromium_url') + '/chromium/llvm-project/cfe/tools/clang-format.git' + '@' + '96636aa0e9f047f17447f2d45a094d0b59ed7917', - 'v8/buildtools/linux64': { + 'buildtools/linux64': { 'packages': [ { 'package': 'gn/gn/linux-amd64', @@ -93,7 +103,7 @@ deps = { 'dep_type': 'cipd', 'condition': 'host_os == "linux"', }, - 'v8/buildtools/mac': { + 'buildtools/mac': { 'packages': [ { 'package': 'gn/gn/mac-amd64', @@ -103,13 +113,13 @@ deps = { 'dep_type': 'cipd', 'condition': 'host_os == "mac"', }, - 'v8/buildtools/third_party/libc++/trunk': + 'buildtools/third_party/libc++/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + 'd9040c75cfea5928c804ab7c235fed06a63f743a', - 'v8/buildtools/third_party/libc++abi/trunk': + 'buildtools/third_party/libc++abi/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '196ba1aaa8ac285d94f4ea8d9836390a45360533', - 'v8/buildtools/third_party/libunwind/trunk': + 'buildtools/third_party/libunwind/trunk': Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'd999d54f4bca789543a2eb6c995af2d9b5a1f3ed', - 'v8/buildtools/win': { + 'buildtools/win': { 'packages': [ { 'package': 'gn/gn/windows-amd64', @@ -119,20 +129,20 @@ deps = { 'dep_type': 'cipd', 'condition': 'host_os == "win"', }, - 'v8/base/trace_event/common': - Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + 'dab187b372fc17e51f5b9fad8201813d0aed5129', - 'v8/third_party/android_ndk': { + 'base/trace_event/common': + Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '23ef5333a357fc7314630ef88b44c3a545881dee', + 'third_party/android_ndk': { 'url': Var('chromium_url') + '/android_ndk.git' + '@' + '27c0a8d090c666a50e40fceb4ee5b40b1a2d3f87', 'condition': 'checkout_android', }, - 'v8/third_party/android_platform': { - 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '716366f5685ad8aaf1208c64941e440e8e117441', + 'third_party/android_platform': { + 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'fc6c6840eeb254ac4fd199c548c54178ce3545bb', 'condition': 'checkout_android', }, - 'v8/third_party/android_sdk/public': { + 'third_party/android_sdk/public': { 'packages': [ { - 'package': 'chromium/third_party/android_sdk/public/build-tools/29.0.2', + 'package': 'chromium/third_party/android_sdk/public/build-tools/30.0.1', 'version': Var('android_sdk_build-tools_version'), }, { @@ -152,7 +162,7 @@ deps = { 'version': Var('android_sdk_platform-tools_version'), }, { - 'package': 'chromium/third_party/android_sdk/public/platforms/android-29', + 'package': 'chromium/third_party/android_sdk/public/platforms/android-30', 'version': Var('android_sdk_platforms_version'), }, { @@ -167,35 +177,39 @@ deps = { 'condition': 'checkout_android', 'dep_type': 'cipd', }, - 'v8/third_party/catapult': { - 'url': Var('chromium_url') + '/catapult.git' + '@' + 'e9a8d378c950ee44beec5dd5207e151f48e5b5be', + 'third_party/catapult': { + 'url': Var('chromium_url') + '/catapult.git' + '@' + 'abfdfbb6683802d3a46ed515246573729ea147ff', 'condition': 'checkout_android', }, - 'v8/third_party/colorama/src': { + 'third_party/colorama/src': { 'url': Var('chromium_url') + '/external/colorama.git' + '@' + '799604a1041e9b3bc5d2789ecbd7e8db2e18e6b8', 'condition': 'checkout_android', }, - 'v8/third_party/fuchsia-sdk': { - 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '277fe9120cce5f7a42d43554646fa447f88a1598', + 'third_party/fuchsia-sdk': { + 'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '6a38b0e1f1f4a6255959b259a681e46ee72dee58', 'condition': 'checkout_fuchsia', }, - 'v8/third_party/googletest/src': - Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + 'a09ea700d32bab83325aff9ff34d0582e50e3997', - 'v8/third_party/jinja2': - Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '3f90fa05c85718505e28c9c3426c1ba52843b9b7', - 'v8/third_party/markupsafe': - Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + '8f45f5cfa0009d2a70589bcda0349b8cb2b72783', - 'v8/tools/swarming_client': - Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '160b445a44e0daacf6f3f8570ca2707ec451f374', - 'v8/test/benchmarks/data': + 'third_party/googletest/src': + Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '4fe018038f87675c083d0cfb6a6b57c274fb1753', + 'third_party/google_benchmark/src': { + 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '7f27afe83b82f3a98baf58ef595814b9d42a5b2b', + 'condition': 'checkout_google_benchmark', + }, + 'third_party/jinja2': + Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '61cfe2ac6c9108534c43b4039a95a0980251f266', + 'third_party/markupsafe': + Var('chromium_url') + '/chromium/src/third_party/markupsafe.git' + '@' + 'f2fb0f21ef1e1d4ffd43be8c63fc3d4928dea7ab', + 'tools/swarming_client': + Var('chromium_url') + '/infra/luci/client-py.git' + '@' + '4c095d04179dc725a300085ae21fe3b79900d072', + 'test/benchmarks/data': Var('chromium_url') + '/v8/deps/third_party/benchmarks.git' + '@' + '05d7188267b4560491ff9155c5ee13e207ecd65f', - 'v8/test/mozilla/data': + 'test/mozilla/data': Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be', - 'v8/test/test262/data': - Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'd2f7d4285c4a5267f5be37a9c823a397daadad1b', - 'v8/test/test262/harness': + 'test/test262/data': + Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'e73054f75e08e329e73e0f77bf92503ad5b83d0f', + 'test/test262/harness': Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '4555345a943d0c99a9461182705543fb171dda4b', - 'v8/third_party/qemu-linux-x64': { + 'third_party/qemu-linux-x64': { 'packages': [ { 'package': 'fuchsia/qemu/linux-amd64', @@ -205,7 +219,7 @@ deps = { 'condition': 'host_os == "linux" and checkout_fuchsia', 'dep_type': 'cipd', }, - 'v8/third_party/qemu-mac-x64': { + 'third_party/qemu-mac-x64': { 'packages': [ { 'package': 'fuchsia/qemu/mac-amd64', @@ -215,17 +229,17 @@ deps = { 'condition': 'host_os == "mac" and checkout_fuchsia', 'dep_type': 'cipd', }, - 'v8/third_party/aemu-linux-x64': { + 'third_party/aemu-linux-x64': { 'packages': [ { 'package': 'fuchsia/third_party/aemu/linux-amd64', - 'version': '5LzaFiFYMxwWXcgus5JjF74yr90M5oz9IMo29pTdoLgC' + 'version': 'cG1zzefbD24rFmPDujqP0rrEG0uXUhH8axBOrD619hoC' }, ], 'condition': 'host_os == "linux" and checkout_fuchsia', 'dep_type': 'cipd', }, - 'v8/third_party/aemu-mac-x64': { + 'third_party/aemu-mac-x64': { 'packages': [ { 'package': 'fuchsia/third_party/aemu/mac-amd64', @@ -235,9 +249,9 @@ deps = { 'condition': 'host_os == "mac" and checkout_fuchsia', 'dep_type': 'cipd', }, - 'v8/tools/clang': - Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + 'de3e20662b84f0ee361a5ae11c99a9513df7c8e8', - 'v8/tools/luci-go': { + 'tools/clang': + Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '299e8a233942b1978a4c0dbc301f363a6729c4e2', + 'tools/luci-go': { 'packages': [ { 'package': 'infra/tools/luci/isolate/${{platform}}', @@ -252,10 +266,10 @@ deps = { 'version': Var('luci_go'), }, ], - 'condition': 'host_cpu != "s390"', + 'condition': 'host_cpu != "s390" and host_os != "aix"', 'dep_type': 'cipd', }, - 'v8/tools/clang/dsymutil': { + 'tools/clang/dsymutil': { 'packages': [ { 'package': 'chromium/llvm-build-tools/dsymutil', @@ -265,15 +279,15 @@ deps = { 'condition': 'checkout_mac', 'dep_type': 'cipd', }, - 'v8/third_party/perfetto': + 'third_party/perfetto': Var('android_url') + '/platform/external/perfetto.git' + '@' + 'ff70e0d273ed10995866c803f23e11250eb3dc52', - 'v8/third_party/protobuf': + 'third_party/protobuf': Var('chromium_url') + '/external/github.com/google/protobuf'+ '@' + 'b68a347f56137b4b1a746e8c7438495a6ac1bd91', - 'v8/third_party/zlib': - Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '90fc47e6eed7bd1a59ad1603761303ef24705593', - 'v8/third_party/jsoncpp/source': + 'third_party/zlib': + Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + 'd53accfbd0382a98ad7378045631866449b5f92e', + 'third_party/jsoncpp/source': Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '645250b6690785be60ab6780ce4b58698d884d11', - 'v8/third_party/ittapi': { + 'third_party/ittapi': { # Force checkout ittapi libraries to pass v8 header includes check on # bots that has check_v8_header_includes enabled. 'url': Var('chromium_url') + '/external/github.com/intel/ittapi' + '@' + 'b4ae0122ba749163096058b4f1bb065bf4a7de94', @@ -303,7 +317,7 @@ hooks = [ 'pattern': '.', 'action': [ 'python', - 'v8/third_party/depot_tools/update_depot_tools_toggle.py', + 'third_party/depot_tools/update_depot_tools_toggle.py', '--disable', ], }, @@ -315,9 +329,9 @@ hooks = [ 'pattern': '.', 'action': [ 'python', - 'v8/build/landmines.py', + 'build/landmines.py', '--landmine-scripts', - 'v8/tools/get_landmines.py', + 'tools/get_landmines.py', ], }, # Pull clang-format binaries using checked-in hashes. @@ -330,7 +344,7 @@ hooks = [ '--platform=win32', '--no_auth', '--bucket', 'chromium-clang-format', - '-s', 'v8/buildtools/win/clang-format.exe.sha1', + '-s', 'buildtools/win/clang-format.exe.sha1', ], }, { @@ -342,7 +356,7 @@ hooks = [ '--platform=darwin', '--no_auth', '--bucket', 'chromium-clang-format', - '-s', 'v8/buildtools/mac/clang-format.sha1', + '-s', 'buildtools/mac/clang-format.sha1', ], }, { @@ -354,7 +368,7 @@ hooks = [ '--platform=linux*', '--no_auth', '--bucket', 'chromium-clang-format', - '-s', 'v8/buildtools/linux64/clang-format.sha1', + '-s', 'buildtools/linux64/clang-format.sha1', ], }, { @@ -364,7 +378,7 @@ hooks = [ 'action': [ 'download_from_google_storage', '--bucket', 'chrome-v8-gcmole', '-u', '--no_resume', - '-s', 'v8/tools/gcmole/gcmole-tools.tar.gz.sha1', + '-s', 'tools/gcmole/gcmole-tools.tar.gz.sha1', '--platform=linux*', ], }, @@ -375,7 +389,7 @@ hooks = [ 'action': [ 'download_from_google_storage', '--bucket', 'chrome-v8-jsfunfuzz', '-u', '--no_resume', - '-s', 'v8/tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1', + '-s', 'tools/jsfunfuzz/jsfunfuzz.tar.gz.sha1', '--platform=linux*', ], }, @@ -387,7 +401,7 @@ hooks = [ '--no_auth', '-u', '--bucket', 'v8-wasm-spec-tests', - '-s', 'v8/test/wasm-spec-tests/tests.tar.gz.sha1', + '-s', 'test/wasm-spec-tests/tests.tar.gz.sha1', ], }, { @@ -398,35 +412,35 @@ hooks = [ '--no_auth', '-u', '--bucket', 'v8-wasm-spec-tests', - '-s', 'v8/test/wasm-js/tests.tar.gz.sha1', + '-s', 'test/wasm-js/tests.tar.gz.sha1', ], }, { 'name': 'sysroot_arm', 'pattern': '.', 'condition': '(checkout_linux and checkout_arm)', - 'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python', 'build/linux/sysroot_scripts/install-sysroot.py', '--arch=arm'], }, { 'name': 'sysroot_arm64', 'pattern': '.', 'condition': '(checkout_linux and checkout_arm64)', - 'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python', 'build/linux/sysroot_scripts/install-sysroot.py', '--arch=arm64'], }, { 'name': 'sysroot_x86', 'pattern': '.', 'condition': '(checkout_linux and (checkout_x86 or checkout_x64))', - 'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python', 'build/linux/sysroot_scripts/install-sysroot.py', '--arch=x86'], }, { 'name': 'sysroot_x64', 'pattern': '.', 'condition': 'checkout_linux and checkout_x64', - 'action': ['python', 'v8/build/linux/sysroot_scripts/install-sysroot.py', + 'action': ['python', 'build/linux/sysroot_scripts/install-sysroot.py', '--arch=x64'], }, { @@ -437,7 +451,7 @@ hooks = [ '--no_resume', '--no_auth', '--bucket', 'chromium-instrumented-libraries', - '-s', 'v8/third_party/instrumented_libraries/binaries/msan-chained-origins-trusty.tgz.sha1', + '-s', 'third_party/instrumented_libraries/binaries/msan-chained-origins-trusty.tgz.sha1', ], }, { @@ -448,7 +462,7 @@ hooks = [ '--no_resume', '--no_auth', '--bucket', 'chromium-instrumented-libraries', - '-s', 'v8/third_party/instrumented_libraries/binaries/msan-no-origins-trusty.tgz.sha1', + '-s', 'third_party/instrumented_libraries/binaries/msan-no-origins-trusty.tgz.sha1', ], }, { @@ -456,25 +470,26 @@ hooks = [ 'name': 'win_toolchain', 'pattern': '.', 'condition': 'checkout_win', - 'action': ['python', 'v8/build/vs_toolchain.py', 'update'], + 'action': ['python', 'build/vs_toolchain.py', 'update'], }, { # Update the Mac toolchain if necessary. 'name': 'mac_toolchain', 'pattern': '.', 'condition': 'checkout_mac', - 'action': ['python', 'v8/build/mac_toolchain.py'], + 'action': ['python', 'build/mac_toolchain.py', + '--xcode-version', Var('mac_xcode_version')], }, # Pull binutils for linux, enabled debug fission for faster linking / # debugging when used with clang on Ubuntu Precise. # https://code.google.com/p/chromium/issues/detail?id=352046 { 'name': 'binutils', - 'pattern': 'v8/third_party/binutils', + 'pattern': 'third_party/binutils', 'condition': 'host_os == "linux"', 'action': [ 'python', - 'v8/third_party/binutils/download.py', + 'third_party/binutils/download.py', ], }, { @@ -483,14 +498,21 @@ hooks = [ 'pattern': '.', # clang not supported on aix 'condition': 'host_os != "aix"', - 'action': ['python', 'v8/tools/clang/scripts/update.py'], + 'action': ['python', 'tools/clang/scripts/update.py'], + }, + { + 'name': 'clang_tidy', + 'pattern': '.', + 'condition': 'checkout_clang_tidy', + 'action': ['python', 'tools/clang/scripts/update.py', + '--package=clang-tidy'], }, { # Update LASTCHANGE. 'name': 'lastchange', 'pattern': '.', - 'action': ['python', 'v8/build/util/lastchange.py', - '-o', 'v8/build/util/LASTCHANGE'], + 'action': ['python', 'build/util/lastchange.py', + '-o', 'build/util/LASTCHANGE'], }, { 'name': 'fuchsia_sdk', @@ -498,7 +520,7 @@ hooks = [ 'condition': 'checkout_fuchsia', 'action': [ 'python', - 'v8/build/fuchsia/update_sdk.py', + 'build/fuchsia/update_sdk.py', '--boot-images={checkout_fuchsia_boot_images}', ], }, @@ -510,7 +532,7 @@ hooks = [ 'name': 'lld/mac', 'pattern': '.', 'condition': 'host_os == "mac" and checkout_fuchsia', - 'action': ['python', 'v8/tools/clang/scripts/update.py', + 'action': ['python', 'tools/clang/scripts/update.py', '--package=lld_mac'], }, { @@ -518,7 +540,7 @@ hooks = [ 'name': 'llvm-objdump', 'pattern': '.', 'condition': 'host_os == "mac" and checkout_fuchsia', - 'action': ['python', 'v8/tools/clang/scripts/update.py', + 'action': ['python', 'tools/clang/scripts/update.py', '--package=objdump'], }, # Download and initialize "vpython" VirtualEnv environment packages. @@ -527,7 +549,7 @@ hooks = [ 'pattern': '.', 'condition': 'checkout_android', 'action': [ 'vpython', - '-vpython-spec', 'v8/.vpython', + '-vpython-spec', '.vpython', '-vpython-tool', 'install', ], }, @@ -537,7 +559,7 @@ hooks = [ 'condition': 'check_v8_header_includes', 'action': [ 'python', - 'v8/tools/generate-header-include-checks.py', + 'tools/generate-header-include-checks.py', ], }, ] diff --git a/deps/v8/INFRA_OWNERS b/deps/v8/INFRA_OWNERS index 253be4c96f8d97..b5b36aa26844cd 100644 --- a/deps/v8/INFRA_OWNERS +++ b/deps/v8/INFRA_OWNERS @@ -1,2 +1,4 @@ machenbach@chromium.org tmrts@chromium.org +almuthanna@chromium.org +liviurau@chromium.org diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py index 9b03f631c592c9..eba4158d81c20d 100644 --- a/deps/v8/PRESUBMIT.py +++ b/deps/v8/PRESUBMIT.py @@ -62,6 +62,8 @@ r'src[\\\/]compiler[\\\/]ast-graph-builder\.cc', # Test extension. r'src[\\\/]extensions[\\\/]gc-extension\.cc', + # Runtime functions used for testing. + r'src[\\\/]runtime[\\\/]runtime-test\.cc', ) @@ -85,13 +87,13 @@ def _V8PresubmitChecks(input_api, output_api): def FilterFile(affected_file): return input_api.FilterSourceFile( affected_file, - white_list=None, - black_list=_NO_LINT_PATHS) + files_to_check=None, + files_to_skip=_NO_LINT_PATHS) def FilterTorqueFile(affected_file): return input_api.FilterSourceFile( affected_file, - white_list=(r'.+\.tq')) + files_to_check=(r'.+\.tq')) results = [] if not CppLintProcessor().RunOnFiles( @@ -110,7 +112,7 @@ def FilterTorqueFile(affected_file): input_api.AffectedFiles(include_deletes=True)): results.append(output_api.PresubmitError("Status file check failed")) results.extend(input_api.canned_checks.CheckAuthorizedAuthor( - input_api, output_api, bot_whitelist=[ + input_api, output_api, bot_allowlist=[ 'v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com' ])) return results @@ -234,12 +236,11 @@ def _CheckHeadersHaveIncludeGuards(input_api, output_api): file_inclusion_pattern = r'src/.+\.h' def FilterFile(affected_file): - black_list = (_EXCLUDED_PATHS + - input_api.DEFAULT_BLACK_LIST) + files_to_skip = _EXCLUDED_PATHS + input_api.DEFAULT_FILES_TO_SKIP return input_api.FilterSourceFile( affected_file, - white_list=(file_inclusion_pattern, ), - black_list=black_list) + files_to_check=(file_inclusion_pattern, ), + files_to_skip=files_to_skip) leading_src_pattern = input_api.re.compile(r'^src/') dash_dot_slash_pattern = input_api.re.compile(r'[-./]') @@ -296,12 +297,11 @@ def _CheckNoInlineHeaderIncludesInNormalHeaders(input_api, output_api): 'header (e.g. bar.h) file. This violates layering of dependencies.') def FilterFile(affected_file): - black_list = (_EXCLUDED_PATHS + - input_api.DEFAULT_BLACK_LIST) + files_to_skip = _EXCLUDED_PATHS + input_api.DEFAULT_FILES_TO_SKIP return input_api.FilterSourceFile( affected_file, - white_list=(file_inclusion_pattern, ), - black_list=black_list) + files_to_check=(file_inclusion_pattern, ), + files_to_skip=files_to_skip) problems = [] for f in input_api.AffectedSourceFiles(FilterFile): @@ -336,13 +336,13 @@ def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api): base_function_pattern, base_function_pattern)) def FilterFile(affected_file): - black_list = (_EXCLUDED_PATHS + - _TEST_CODE_EXCLUDED_PATHS + - input_api.DEFAULT_BLACK_LIST) + files_to_skip = (_EXCLUDED_PATHS + + _TEST_CODE_EXCLUDED_PATHS + + input_api.DEFAULT_FILES_TO_SKIP) return input_api.FilterSourceFile( affected_file, - white_list=(file_inclusion_pattern, ), - black_list=black_list) + files_to_check=(file_inclusion_pattern, ), + files_to_skip=files_to_skip) problems = [] for f in input_api.AffectedSourceFiles(FilterFile): @@ -363,7 +363,7 @@ def FilterFile(affected_file): def _CheckGenderNeutralInLicenses(input_api, output_api): # License files are taken as is, even if they include gendered pronouns. def LicenseFilter(path): - input_api.FilterSourceFile(path, black_list=_LICENSE_FILE) + input_api.FilterSourceFile(path, files_to_skip=_LICENSE_FILE) return input_api.canned_checks.CheckGenderNeutral( input_api, output_api, source_file_filter=LicenseFilter) @@ -438,7 +438,7 @@ def _CheckJSONFiles(input_api, output_api): def FilterFile(affected_file): return input_api.FilterSourceFile( affected_file, - white_list=(r'.+\.json',)) + files_to_check=(r'.+\.json',)) results = [] for f in input_api.AffectedFiles( @@ -470,7 +470,7 @@ def _CheckNoexceptAnnotations(input_api, output_api): def FilterFile(affected_file): return input_api.FilterSourceFile( affected_file, - white_list=(r'src/.*', r'test/.*')) + files_to_check=(r'src/.*', r'test/.*')) # matches any class name. diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h index a7bffbdbeb44c7..28b7275345c434 100644 --- a/deps/v8/base/trace_event/common/trace_event_common.h +++ b/deps/v8/base/trace_event/common/trace_event_common.h @@ -837,6 +837,14 @@ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT, category_group, name, id, \ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_NONE) +#define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_COPY) +#define TRACE_EVENT_COPY_NESTABLE_ASYNC_END0(category_group, name, id) \ + INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_NESTABLE_ASYNC_END, \ + category_group, name, id, \ + TRACE_EVENT_FLAG_COPY) #define TRACE_EVENT_COPY_NESTABLE_ASYNC_BEGIN_WITH_TIMESTAMP0( \ category_group, name, id, timestamp) \ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \ diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index 9d286ebbfc0226..413b0d38107749 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -2,6 +2,7 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. +import("//build/config/gclient_args.gni") import("//build/config/sanitizers/sanitizers.gni") import("//build/config/v8_target_cpu.gni") import("split_static_library.gni") @@ -57,11 +58,16 @@ declare_args() { # Implement tracing using Perfetto (https://perfetto.dev). v8_use_perfetto = false - # Override global symbol level setting for v8 + # Override global symbol level setting for v8. v8_symbol_level = symbol_level # Enable WebAssembly debugging via GDB-remote protocol. v8_enable_wasm_gdb_remote_debugging = false + + # Add fuzzilli fuzzer support. + v8_fuzzilli = false + + v8_enable_google_benchmark = checkout_google_benchmark } if (v8_use_external_startup_data == "") { diff --git a/deps/v8/include/DEPS b/deps/v8/include/DEPS index 7305ff51125503..7d60081fb480b7 100644 --- a/deps/v8/include/DEPS +++ b/deps/v8/include/DEPS @@ -2,4 +2,5 @@ include_rules = [ # v8-inspector-protocol.h depends on generated files under include/inspector. "+inspector", "+cppgc/common.h", + "+cppgc/visitor.h", ] diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS index 4f90a5c8c70692..9bb043db7c2a3e 100644 --- a/deps/v8/include/OWNERS +++ b/deps/v8/include/OWNERS @@ -1,4 +1,5 @@ adamk@chromium.org +cbruni@chromium.org danno@chromium.org mlippautz@chromium.org ulan@chromium.org @@ -16,4 +17,9 @@ per-file v8-inspector-protocol.h=kozyatinskiy@chromium.org per-file js_protocol.pdl=dgozman@chromium.org per-file js_protocol.pdl=pfeldman@chromium.org +# For branch updates: +per-file v8-version.h=file:../INFRA_OWNERS +per-file v8-version.h=hablich@chromium.org +per-file v8-version.h=vahl@chromium.org + # COMPONENT: Blink>JavaScript>API diff --git a/deps/v8/include/cppgc/allocation.h b/deps/v8/include/cppgc/allocation.h index 49ad49c34d6bc9..ac5062ad01a55e 100644 --- a/deps/v8/include/cppgc/allocation.h +++ b/deps/v8/include/cppgc/allocation.h @@ -11,7 +11,6 @@ #include "cppgc/custom-space.h" #include "cppgc/garbage-collected.h" -#include "cppgc/heap.h" #include "cppgc/internal/api-constants.h" #include "cppgc/internal/gc-info.h" @@ -20,6 +19,15 @@ namespace cppgc { template class MakeGarbageCollectedTraitBase; +namespace internal { +class ObjectAllocator; +} // namespace internal + +/** + * AllocationHandle is used to allocate garbage-collected objects. + */ +class AllocationHandle; + namespace internal { class V8_EXPORT MakeGarbageCollectedTraitInternal { @@ -36,9 +44,10 @@ class V8_EXPORT MakeGarbageCollectedTraitInternal { atomic_mutable_bitfield->store(value, std::memory_order_release); } - static void* Allocate(cppgc::Heap* heap, size_t size, GCInfoIndex index); - static void* Allocate(cppgc::Heap* heapx, size_t size, GCInfoIndex index, - CustomSpaceIndex space_inde); + static void* Allocate(cppgc::AllocationHandle& handle, size_t size, + GCInfoIndex index); + static void* Allocate(cppgc::AllocationHandle& handle, size_t size, + GCInfoIndex index, CustomSpaceIndex space_index); friend class HeapObjectHeader; }; @@ -58,22 +67,22 @@ class MakeGarbageCollectedTraitBase private: template struct SpacePolicy { - static void* Allocate(Heap* heap, size_t size) { + static void* Allocate(AllocationHandle& handle, size_t size) { // Custom space. static_assert(std::is_base_of::value, "Custom space must inherit from CustomSpaceBase."); return internal::MakeGarbageCollectedTraitInternal::Allocate( - heap, size, internal::GCInfoTrait::Index(), + handle, size, internal::GCInfoTrait::Index(), CustomSpace::kSpaceIndex); } }; template struct SpacePolicy { - static void* Allocate(Heap* heap, size_t size) { + static void* Allocate(AllocationHandle& handle, size_t size) { // Default space. return internal::MakeGarbageCollectedTraitInternal::Allocate( - heap, size, internal::GCInfoTrait::Index()); + handle, size, internal::GCInfoTrait::Index()); } }; @@ -81,12 +90,14 @@ class MakeGarbageCollectedTraitBase /** * Allocates memory for an object of type T. * - * \param heap The heap to allocate this object on. + * \param handle AllocationHandle identifying the heap to allocate the object + * on. * \param size The size that should be reserved for the object. * \returns the memory to construct an object of type T on. */ - static void* Allocate(Heap* heap, size_t size) { - return SpacePolicy::Space>::Allocate(heap, size); + static void* Allocate(AllocationHandle& handle, size_t size) { + return SpacePolicy::Space>::Allocate(handle, + size); } /** @@ -115,14 +126,15 @@ template class MakeGarbageCollectedTrait : public MakeGarbageCollectedTraitBase { public: template - static T* Call(Heap* heap, Args&&... args) { + static T* Call(AllocationHandle& handle, Args&&... args) { static_assert(internal::IsGarbageCollectedType::value, "T needs to be a garbage collected object"); static_assert( !internal::IsGarbageCollectedMixinType::value || sizeof(T) <= internal::api_constants::kLargeObjectSizeThreshold, "GarbageCollectedMixin may not be a large object"); - void* memory = MakeGarbageCollectedTraitBase::Allocate(heap, sizeof(T)); + void* memory = + MakeGarbageCollectedTraitBase::Allocate(handle, sizeof(T)); T* object = ::new (memory) T(std::forward(args)...); MakeGarbageCollectedTraitBase::MarkObjectAsFullyConstructed(object); return object; @@ -149,9 +161,9 @@ struct PostConstructionCallbackTrait { * \returns an instance of type T. */ template -T* MakeGarbageCollected(Heap* heap, Args&&... args) { +T* MakeGarbageCollected(AllocationHandle& handle, Args&&... args) { T* object = - MakeGarbageCollectedTrait::Call(heap, std::forward(args)...); + MakeGarbageCollectedTrait::Call(handle, std::forward(args)...); PostConstructionCallbackTrait::Call(object); return object; } diff --git a/deps/v8/include/cppgc/default-platform.h b/deps/v8/include/cppgc/default-platform.h new file mode 100644 index 00000000000000..24b1cd14982f9a --- /dev/null +++ b/deps/v8/include/cppgc/default-platform.h @@ -0,0 +1,76 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_CPPGC_DEFAULT_PLATFORM_H_ +#define INCLUDE_CPPGC_DEFAULT_PLATFORM_H_ + +#include +#include // NOLINT(build/c++11) +#include + +#include "cppgc/platform.h" +#include "v8config.h" // NOLINT(build/include_directory) + +namespace cppgc { + +/** + * Default task runner implementation. Keep posted tasks in a list that can be + * processed by calling RunSingleTask() or RunUntilIdle(). + */ +class V8_EXPORT DefaultTaskRunner final : public cppgc::TaskRunner { + public: + DefaultTaskRunner() = default; + + DefaultTaskRunner(const DefaultTaskRunner&) = delete; + DefaultTaskRunner& operator=(const DefaultTaskRunner&) = delete; + + void PostTask(std::unique_ptr task) override; + void PostNonNestableTask(std::unique_ptr task) override; + void PostDelayedTask(std::unique_ptr task, double) override; + void PostNonNestableDelayedTask(std::unique_ptr task, + double) override; + + void PostIdleTask(std::unique_ptr task) override; + bool IdleTasksEnabled() override { return true; } + + bool RunSingleTask(); + bool RunSingleIdleTask(double duration_in_seconds); + + void RunUntilIdle(); + + private: + std::vector> tasks_; + std::vector> idle_tasks_; +}; + +/** + * Default platform implementation that uses std::thread for spawning job tasks. + */ +class V8_EXPORT DefaultPlatform final : public Platform { + public: + DefaultPlatform(); + ~DefaultPlatform() noexcept override; + + cppgc::PageAllocator* GetPageAllocator() final; + + double MonotonicallyIncreasingTime() final; + + std::shared_ptr GetForegroundTaskRunner() final; + + std::unique_ptr PostJob( + cppgc::TaskPriority priority, + std::unique_ptr job_task) final; + + void WaitAllForegroundTasks(); + void WaitAllBackgroundTasks(); + + private: + std::unique_ptr page_allocator_; + std::shared_ptr foreground_task_runner_; + std::vector> job_threads_; +}; + +} // namespace cppgc + +#endif // INCLUDE_CPPGC_DEFAULT_PLATFORM_H_ diff --git a/deps/v8/include/cppgc/garbage-collected.h b/deps/v8/include/cppgc/garbage-collected.h index c263a9fecf0d96..3c800ef61b73e5 100644 --- a/deps/v8/include/cppgc/garbage-collected.h +++ b/deps/v8/include/cppgc/garbage-collected.h @@ -8,7 +8,6 @@ #include #include "cppgc/internal/api-constants.h" -#include "cppgc/macros.h" #include "cppgc/platform.h" #include "cppgc/trace-trait.h" #include "cppgc/type-traits.h" @@ -105,16 +104,6 @@ class GarbageCollectedMixin : public internal::GarbageCollectedBase { public: using IsGarbageCollectedMixinTypeMarker = void; - // Sentinel used to mark not-fully-constructed mixins. - static constexpr void* kNotFullyConstructedObject = nullptr; - - // Provide default implementation that indicate that the vtable is not yet - // set up properly. This is used to to get GCInfo objects for mixins so that - // these objects can be processed later on. - virtual TraceDescriptor GetTraceDescriptor() const { - return {kNotFullyConstructedObject, nullptr}; - } - /** * This Trace method must be overriden by objects inheriting from * GarbageCollectedMixin. @@ -122,71 +111,6 @@ class GarbageCollectedMixin : public internal::GarbageCollectedBase { virtual void Trace(cppgc::Visitor*) const {} }; -/** - * Macro defines all methods and markers needed for handling mixins. Must be - * used on the type that is inheriting from GarbageCollected *and* - * GarbageCollectedMixin. - * - * \code - * class Mixin : public GarbageCollectedMixin { - * public: - * void Trace(cppgc::Visitor* visitor) const override { - * // Dispatch using visitor->Trace(...); - * } - * }; - * - * class Foo : public GarbageCollected, public Mixin { - * USING_GARBAGE_COLLECTED_MIXIN(); - * public: - * void Trace(cppgc::Visitor* visitor) const override { - * // Dispatch using visitor->Trace(...); - * Mixin::Trace(visitor); - * } - * }; - * \endcode - */ -#define USING_GARBAGE_COLLECTED_MIXIN() \ - public: \ - /* Marker is used by clang to check for proper usages of the macro. */ \ - typedef int HasUsingGarbageCollectedMixinMacro; \ - \ - TraceDescriptor GetTraceDescriptor() const override { \ - static_assert( \ - internal::IsSubclassOfTemplate< \ - std::remove_const_t>, \ - cppgc::GarbageCollected>::value, \ - "Only garbage collected objects can have garbage collected mixins"); \ - return {this, TraceTrait>>::Trace}; \ - } \ - \ - private: \ - friend class internal::__thisIsHereToForceASemicolonAfterThisMacro - -/** - * Merge two or more Mixins into one. - * - * \code - * class A : public GarbageCollectedMixin {}; - * class B : public GarbageCollectedMixin {}; - * class C : public A, public B { - * MERGE_GARBAGE_COLLECTED_MIXINS(); - * public: - * }; - * \endcode - */ -#define MERGE_GARBAGE_COLLECTED_MIXINS() \ - public: \ - /* When using multiple mixins the methods become */ \ - /* ambigous. Providing additional implementations */ \ - /* disambiguate them again. */ \ - TraceDescriptor GetTraceDescriptor() const override { \ - return {kNotFullyConstructedObject, nullptr}; \ - } \ - \ - private: \ - friend class internal::__thisIsHereToForceASemicolonAfterThisMacro - } // namespace cppgc #endif // INCLUDE_CPPGC_GARBAGE_COLLECTED_H_ diff --git a/deps/v8/include/cppgc/heap.h b/deps/v8/include/cppgc/heap.h index 90046c35055e2e..ca0dbeca7ffe01 100644 --- a/deps/v8/include/cppgc/heap.h +++ b/deps/v8/include/cppgc/heap.h @@ -10,9 +10,21 @@ #include "cppgc/common.h" #include "cppgc/custom-space.h" +#include "cppgc/platform.h" #include "v8config.h" // NOLINT(build/include_directory) +/** + * cppgc - A C++ garbage collection library. + */ namespace cppgc { + +class AllocationHandle; + +/** + * Implementation details of cppgc. Those details are considered internal and + * may change at any point in time without notice. Users should never rely on + * the contents of this namespace. + */ namespace internal { class Heap; } // namespace internal @@ -24,7 +36,44 @@ class V8_EXPORT Heap { */ using StackState = EmbedderStackState; + /** + * Specifies whether conservative stack scanning is supported. + */ + enum class StackSupport : uint8_t { + /** + * Conservative stack scan is supported. + */ + kSupportsConservativeStackScan, + /** + * Conservative stack scan is not supported. Embedders may use this option + * when using custom infrastructure that is unsupported by the library. + */ + kNoConservativeStackScan, + }; + + /** + * Constraints for a Heap setup. + */ + struct ResourceConstraints { + /** + * Allows the heap to grow to some initial size in bytes before triggering + * garbage collections. This is useful when it is known that applications + * need a certain minimum heap to run to avoid repeatedly invoking the + * garbage collector when growing the heap. + */ + size_t initial_heap_size_bytes = 0; + }; + + /** + * Options specifying Heap properties (e.g. custom spaces) when initializing a + * heap through Heap::Create(). + */ struct HeapOptions { + /** + * Creates reasonable defaults for instantiating a Heap. + * + * \returns the HeapOptions that can be passed to Heap::Create(). + */ static HeapOptions Default() { return {}; } /** @@ -33,9 +82,34 @@ class V8_EXPORT Heap { * to the index they reside in the vector. */ std::vector> custom_spaces; + + /** + * Specifies whether conserative stack scan is supported. When conservative + * stack scan is not supported, the collector may try to invoke + * garbage collections using non-nestable task, which are guaranteed to have + * no interesting stack, through the provided Platform. If such tasks are + * not supported by the Platform, the embedder must take care of invoking + * the GC through ForceGarbageCollectionSlow(). + */ + StackSupport stack_support = StackSupport::kSupportsConservativeStackScan; + + /** + * Resource constraints specifying various properties that the internal + * GC scheduler follows. + */ + ResourceConstraints resource_constraints; }; - static std::unique_ptr Create(HeapOptions = HeapOptions::Default()); + /** + * Creates a new heap that can be used for object allocation. + * + * \param platform implemented and provided by the embedder. + * \param options HeapOptions specifying various properties for the Heap. + * \returns a new Heap instance. + */ + static std::unique_ptr Create( + std::shared_ptr platform, + HeapOptions options = HeapOptions::Default()); virtual ~Heap() = default; @@ -52,6 +126,8 @@ class V8_EXPORT Heap { const char* source, const char* reason, StackState stack_state = StackState::kMayContainHeapPointers); + AllocationHandle& GetAllocationHandle(); + private: Heap() = default; diff --git a/deps/v8/include/cppgc/internal/accessors.h b/deps/v8/include/cppgc/internal/accessors.h deleted file mode 100644 index ee0a0042fe07b7..00000000000000 --- a/deps/v8/include/cppgc/internal/accessors.h +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2020 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef INCLUDE_CPPGC_INTERNAL_ACCESSORS_H_ -#define INCLUDE_CPPGC_INTERNAL_ACCESSORS_H_ - -#include "cppgc/internal/api-constants.h" - -namespace cppgc { - -class Heap; - -namespace internal { - -inline cppgc::Heap* GetHeapFromPayload(const void* payload) { - return *reinterpret_cast( - ((reinterpret_cast(payload) & api_constants::kPageBaseMask) + - api_constants::kGuardPageSize) + - api_constants::kHeapOffset); -} - -} // namespace internal -} // namespace cppgc - -#endif // INCLUDE_CPPGC_INTERNAL_ACCESSORS_H_ diff --git a/deps/v8/include/cppgc/internal/api-constants.h b/deps/v8/include/cppgc/internal/api-constants.h index ef910a48571f46..1303b8b861f648 100644 --- a/deps/v8/include/cppgc/internal/api-constants.h +++ b/deps/v8/include/cppgc/internal/api-constants.h @@ -17,6 +17,11 @@ namespace internal { // Internal constants to avoid exposing internal types on the API surface. namespace api_constants { + +constexpr size_t kKB = 1024; +constexpr size_t kMB = kKB * 1024; +constexpr size_t kGB = kMB * 1024; + // Offset of the uint16_t bitfield from the payload contaning the // in-construction bit. This is subtracted from the payload pointer to get // to the right bitfield. @@ -25,17 +30,15 @@ static constexpr size_t kFullyConstructedBitFieldOffsetFromPayload = // Mask for in-construction bit. static constexpr size_t kFullyConstructedBitMask = size_t{1}; -// Page constants used to align pointers to page begin. static constexpr size_t kPageSize = size_t{1} << 17; -static constexpr size_t kPageAlignment = kPageSize; -static constexpr size_t kPageBaseMask = ~(kPageAlignment - 1); -static constexpr size_t kGuardPageSize = 4096; - -// Offset of the Heap backref. -static constexpr size_t kHeapOffset = 0; static constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2; +#if defined(CPPGC_CAGED_HEAP) +constexpr size_t kCagedHeapReservationSize = static_cast(4) * kGB; +constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize; +#endif + } // namespace api_constants } // namespace internal diff --git a/deps/v8/include/cppgc/internal/atomic-entry-flag.h b/deps/v8/include/cppgc/internal/atomic-entry-flag.h new file mode 100644 index 00000000000000..5a7d3b8f8ac4c1 --- /dev/null +++ b/deps/v8/include/cppgc/internal/atomic-entry-flag.h @@ -0,0 +1,48 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_CPPGC_INTERNAL_ATOMIC_ENTRY_FLAG_H_ +#define INCLUDE_CPPGC_INTERNAL_ATOMIC_ENTRY_FLAG_H_ + +#include + +namespace cppgc { +namespace internal { + +// A flag which provides a fast check whether a scope may be entered on the +// current thread, without needing to access thread-local storage or mutex. Can +// have false positives (i.e., spuriously report that it might be entered), so +// it is expected that this will be used in tandem with a precise check that the +// scope is in fact entered on that thread. +// +// Example: +// g_frobnicating_flag.MightBeEntered() && +// ThreadLocalFrobnicator().IsFrobnicating() +// +// Relaxed atomic operations are sufficient, since: +// - all accesses remain atomic +// - each thread must observe its own operations in order +// - no thread ever exits the flag more times than it enters (if used correctly) +// And so if a thread observes zero, it must be because it has observed an equal +// number of exits as entries. +class AtomicEntryFlag final { + public: + void Enter() { entries_.fetch_add(1, std::memory_order_relaxed); } + void Exit() { entries_.fetch_sub(1, std::memory_order_relaxed); } + + // Returns false only if the current thread is not between a call to Enter + // and a call to Exit. Returns true if this thread or another thread may + // currently be in the scope guarded by this flag. + bool MightBeEntered() const { + return entries_.load(std::memory_order_relaxed) != 0; + } + + private: + std::atomic_int entries_{0}; +}; + +} // namespace internal +} // namespace cppgc + +#endif // INCLUDE_CPPGC_INTERNAL_ATOMIC_ENTRY_FLAG_H_ diff --git a/deps/v8/include/cppgc/internal/caged-heap-local-data.h b/deps/v8/include/cppgc/internal/caged-heap-local-data.h new file mode 100644 index 00000000000000..8c421477384c2e --- /dev/null +++ b/deps/v8/include/cppgc/internal/caged-heap-local-data.h @@ -0,0 +1,67 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_ +#define INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_ + +#include + +#include "cppgc/internal/api-constants.h" +#include "cppgc/internal/logging.h" +#include "cppgc/platform.h" + +namespace cppgc { +namespace internal { + +class HeapBase; + +#if defined(CPPGC_YOUNG_GENERATION) + +// AgeTable contains entries that correspond to 4KB memory regions. Each entry +// can be in one of three states: kOld, kYoung or kUnknown. +class AgeTable final { + static constexpr size_t kGranularityBits = 12; // 4KiB per byte. + + public: + enum class Age : uint8_t { kOld, kYoung, kUnknown }; + + static constexpr size_t kEntrySizeInBytes = 1 << kGranularityBits; + + Age& operator[](uintptr_t offset) { return table_[entry(offset)]; } + Age operator[](uintptr_t offset) const { return table_[entry(offset)]; } + + void Reset(PageAllocator* allocator); + + private: + static constexpr size_t kAgeTableSize = + api_constants::kCagedHeapReservationSize >> kGranularityBits; + + size_t entry(uintptr_t offset) const { + const size_t entry = offset >> kGranularityBits; + CPPGC_DCHECK(table_.size() > entry); + return entry; + } + + std::array table_; +}; + +static_assert(sizeof(AgeTable) == 1 * api_constants::kMB, + "Size of AgeTable is 1MB"); + +#endif // CPPGC_YOUNG_GENERATION + +struct CagedHeapLocalData final { + explicit CagedHeapLocalData(HeapBase* heap_base) : heap_base(heap_base) {} + + bool is_marking_in_progress = false; + HeapBase* heap_base = nullptr; +#if defined(CPPGC_YOUNG_GENERATION) + AgeTable age_table; +#endif +}; + +} // namespace internal +} // namespace cppgc + +#endif // INCLUDE_CPPGC_INTERNAL_CAGED_HEAP_LOCAL_DATA_H_ diff --git a/deps/v8/include/cppgc/internal/compiler-specific.h b/deps/v8/include/cppgc/internal/compiler-specific.h index e1f5c1d57fb850..c580894b35d0fe 100644 --- a/deps/v8/include/cppgc/internal/compiler-specific.h +++ b/deps/v8/include/cppgc/internal/compiler-specific.h @@ -7,6 +7,12 @@ namespace cppgc { +#if defined(__has_attribute) +#define CPPGC_HAS_ATTRIBUTE(FEATURE) __has_attribute(FEATURE) +#else +#define CPPGC_HAS_ATTRIBUTE(FEATURE) 0 +#endif + #if defined(__has_cpp_attribute) #define CPPGC_HAS_CPP_ATTRIBUTE(FEATURE) __has_cpp_attribute(FEATURE) #else @@ -21,6 +27,12 @@ namespace cppgc { #define CPPGC_NO_UNIQUE_ADDRESS #endif +#if CPPGC_HAS_ATTRIBUTE(unused) // NOLINTNEXTLINE +#define CPPGC_UNUSED __attribute__((unused)) +#else +#define CPPGC_UNUSED +#endif + } // namespace cppgc #endif // INCLUDE_CPPGC_INTERNAL_COMPILER_SPECIFIC_H_ diff --git a/deps/v8/include/cppgc/internal/gc-info.h b/deps/v8/include/cppgc/internal/gc-info.h index 9aac1361c61afd..3d361e6d71adb6 100644 --- a/deps/v8/include/cppgc/internal/gc-info.h +++ b/deps/v8/include/cppgc/internal/gc-info.h @@ -8,6 +8,7 @@ #include #include "cppgc/internal/finalizer-trait.h" +#include "cppgc/trace-trait.h" #include "v8config.h" // NOLINT(build/include_directory) namespace cppgc { @@ -18,7 +19,7 @@ using GCInfoIndex = uint16_t; class V8_EXPORT RegisteredGCInfoIndex final { public: RegisteredGCInfoIndex(FinalizationCallback finalization_callback, - bool has_v_table); + TraceCallback trace_callback, bool has_v_table); GCInfoIndex GetIndex() const { return index_; } private: @@ -32,7 +33,8 @@ struct GCInfoTrait { static GCInfoIndex Index() { static_assert(sizeof(T), "T must be fully defined"); static const RegisteredGCInfoIndex registered_index( - FinalizerTrait::kCallback, std::is_polymorphic::value); + FinalizerTrait::kCallback, TraceTrait::Trace, + std::is_polymorphic::value); return registered_index.GetIndex(); } }; diff --git a/deps/v8/include/cppgc/internal/persistent-node.h b/deps/v8/include/cppgc/internal/persistent-node.h index 11cf69623e8dad..e05efe362131d8 100644 --- a/deps/v8/include/cppgc/internal/persistent-node.h +++ b/deps/v8/include/cppgc/internal/persistent-node.h @@ -56,6 +56,11 @@ class PersistentNode final { bool IsUsed() const { return trace_; } + void* owner() const { + CPPGC_DCHECK(IsUsed()); + return owner_; + } + private: // PersistentNode acts as a designated union: // If trace_ != nullptr, owner_ points to the corresponding Persistent handle. @@ -67,11 +72,13 @@ class PersistentNode final { TraceCallback trace_ = nullptr; }; -class V8_EXPORT PersistentRegion { +class V8_EXPORT PersistentRegion final { using PersistentNodeSlots = std::array; public: PersistentRegion() = default; + // Clears Persistent fields to avoid stale pointers after heap teardown. + ~PersistentRegion(); PersistentRegion(const PersistentRegion&) = delete; PersistentRegion& operator=(const PersistentRegion&) = delete; diff --git a/deps/v8/include/cppgc/internal/pointer-policies.h b/deps/v8/include/cppgc/internal/pointer-policies.h index fe8d94b57a68bb..a6cd4e8586d9cf 100644 --- a/deps/v8/include/cppgc/internal/pointer-policies.h +++ b/deps/v8/include/cppgc/internal/pointer-policies.h @@ -8,6 +8,7 @@ #include #include +#include "cppgc/internal/write-barrier.h" #include "cppgc/source-location.h" #include "v8config.h" // NOLINT(build/include_directory) @@ -26,8 +27,8 @@ struct DijkstraWriteBarrierPolicy { // Since in initializing writes the source object is always white, having no // barrier doesn't break the tri-color invariant. } - static void AssigningBarrier(const void*, const void*) { - // TODO(chromium:1056170): Add actual implementation. + static void AssigningBarrier(const void* slot, const void* value) { + WriteBarrier::MarkingBarrier(slot, value); } }; @@ -116,7 +117,7 @@ class BasicMember; struct SentinelPointer { template operator T*() const { // NOLINT - static constexpr intptr_t kSentinelValue = -1; + static constexpr intptr_t kSentinelValue = 1; return reinterpret_cast(kSentinelValue); } // Hidden friends. diff --git a/deps/v8/include/cppgc/internal/prefinalizer-handler.h b/deps/v8/include/cppgc/internal/prefinalizer-handler.h index 939a9b8ff0a8fd..ea0eca02a0e492 100644 --- a/deps/v8/include/cppgc/internal/prefinalizer-handler.h +++ b/deps/v8/include/cppgc/internal/prefinalizer-handler.h @@ -15,14 +15,13 @@ class V8_EXPORT PreFinalizerRegistrationDispatcher final { public: using PreFinalizerCallback = bool (*)(const LivenessBroker&, void*); struct PreFinalizer { - void* object_; - PreFinalizerCallback callback_; + void* object; + PreFinalizerCallback callback; bool operator==(const PreFinalizer& other); }; - static void RegisterPrefinalizer(cppgc::Heap* heap, - PreFinalizer prefinalzier); + static void RegisterPrefinalizer(PreFinalizer pre_finalizer); }; } // namespace internal diff --git a/deps/v8/include/cppgc/internal/process-heap.h b/deps/v8/include/cppgc/internal/process-heap.h new file mode 100644 index 00000000000000..0f742a50a9cb97 --- /dev/null +++ b/deps/v8/include/cppgc/internal/process-heap.h @@ -0,0 +1,34 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_CPPGC_INTERNAL_PROCESS_HEAP_H_ +#define INCLUDE_CPPGC_INTERNAL_PROCESS_HEAP_H_ + +#include "cppgc/internal/atomic-entry-flag.h" +#include "v8config.h" // NOLINT(build/include_directory) + +namespace cppgc { +namespace internal { + +class V8_EXPORT ProcessHeap final { + public: + static void EnterIncrementalOrConcurrentMarking() { + concurrent_marking_flag_.Enter(); + } + static void ExitIncrementalOrConcurrentMarking() { + concurrent_marking_flag_.Exit(); + } + + static bool IsAnyIncrementalOrConcurrentMarking() { + return concurrent_marking_flag_.MightBeEntered(); + } + + private: + static AtomicEntryFlag concurrent_marking_flag_; +}; + +} // namespace internal +} // namespace cppgc + +#endif // INCLUDE_CPPGC_INTERNAL_PROCESS_HEAP_H_ diff --git a/deps/v8/include/cppgc/internal/write-barrier.h b/deps/v8/include/cppgc/internal/write-barrier.h new file mode 100644 index 00000000000000..5bf550b02610dd --- /dev/null +++ b/deps/v8/include/cppgc/internal/write-barrier.h @@ -0,0 +1,78 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_CPPGC_INTERNAL_WRITE_BARRIER_H_ +#define INCLUDE_CPPGC_INTERNAL_WRITE_BARRIER_H_ + +#include "cppgc/internal/api-constants.h" +#include "cppgc/internal/process-heap.h" +#include "v8config.h" // NOLINT(build/include_directory) + +#if defined(CPPGC_CAGED_HEAP) +#include "cppgc/internal/caged-heap-local-data.h" +#endif + +namespace cppgc { +namespace internal { + +class V8_EXPORT WriteBarrier final { + public: + static V8_INLINE void MarkingBarrier(const void* slot, const void* value) { +#if defined(CPPGC_CAGED_HEAP) + const uintptr_t start = + reinterpret_cast(value) & + ~(api_constants::kCagedHeapReservationAlignment - 1); + const uintptr_t slot_offset = reinterpret_cast(slot) - start; + if (slot_offset > api_constants::kCagedHeapReservationSize) { + // Check if slot is on stack or value is sentinel or nullptr. This relies + // on the fact that kSentinelPointer is encoded as 0x1. + return; + } + + CagedHeapLocalData* local_data = + reinterpret_cast(start); + if (V8_UNLIKELY(local_data->is_marking_in_progress)) { + MarkingBarrierSlow(value); + return; + } +#if defined(CPPGC_YOUNG_GENERATION) + GenerationalBarrier(local_data, slot, slot_offset, + reinterpret_cast(value) - start); +#endif +#else + if (V8_LIKELY(!ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) return; + + MarkingBarrierSlowWithSentinelCheck(value); +#endif // CPPGC_CAGED_HEAP + } + + private: + WriteBarrier() = delete; + + static void MarkingBarrierSlow(const void* value); + static void MarkingBarrierSlowWithSentinelCheck(const void* value); + +#if defined(CPPGC_YOUNG_GENERATION) + static V8_INLINE void GenerationalBarrier(CagedHeapLocalData* local_data, + const void* slot, + uintptr_t slot_offset, + uintptr_t value_offset) { + const AgeTable& age_table = local_data->age_table; + + // Bail out if the slot is in young generation. + if (V8_LIKELY(age_table[slot_offset] == AgeTable::Age::kYoung)) return; + + GenerationalBarrierSlow(local_data, age_table, slot, value_offset); + } + + static void GenerationalBarrierSlow(CagedHeapLocalData* local_data, + const AgeTable& ageTable, + const void* slot, uintptr_t value_offset); +#endif +}; + +} // namespace internal +} // namespace cppgc + +#endif // INCLUDE_CPPGC_INTERNAL_WRITE_BARRIER_H_ diff --git a/deps/v8/include/cppgc/liveness-broker.h b/deps/v8/include/cppgc/liveness-broker.h index 69dbc11f1f4a95..883be46240ff86 100644 --- a/deps/v8/include/cppgc/liveness-broker.h +++ b/deps/v8/include/cppgc/liveness-broker.h @@ -16,6 +16,30 @@ namespace internal { class LivenessBrokerFactory; } // namespace internal +/** + * The broker is passed to weak callbacks to allow (temporarily) querying + * the liveness state of an object. References to non-live objects must be + * cleared when IsHeapObjectAlive() returns false. + * + * \code + * class GCedWithCustomWeakCallback final + * : public GarbageCollected { + * public: + * UntracedMember bar; + * + * void CustomWeakCallbackMethod(const LivenessBroker& broker) { + * if (!broker.IsHeapObjectAlive(bar)) + * bar = nullptr; + * } + * + * void Trace(cppgc::Visitor* visitor) const { + * visitor->RegisterWeakCallbackMethod< + * GCedWithCustomWeakCallback, + * &GCedWithCustomWeakCallback::CustomWeakCallbackMethod>(this); + * } + * }; + * \endcode + */ class V8_EXPORT LivenessBroker final { public: template @@ -25,12 +49,6 @@ class V8_EXPORT LivenessBroker final { TraceTrait::GetTraceDescriptor(object).base_object_payload); } - template - bool IsHeapObjectAlive(const WeakMember& weak_member) const { - return (weak_member != kSentinelPointer) && - IsHeapObjectAlive(weak_member.Get()); - } - template bool IsHeapObjectAlive(const UntracedMember& untraced_member) const { return (untraced_member != kSentinelPointer) && diff --git a/deps/v8/include/cppgc/macros.h b/deps/v8/include/cppgc/macros.h index 7c7a10e433a894..c0b1814e294cfa 100644 --- a/deps/v8/include/cppgc/macros.h +++ b/deps/v8/include/cppgc/macros.h @@ -5,21 +5,19 @@ #ifndef INCLUDE_CPPGC_MACROS_H_ #define INCLUDE_CPPGC_MACROS_H_ -namespace cppgc { +#include "cppgc/internal/compiler-specific.h" -namespace internal { -class __thisIsHereToForceASemicolonAfterThisMacro {}; -} // namespace internal +namespace cppgc { // Use if the object is only stack allocated. -#define CPPGC_STACK_ALLOCATED() \ - public: \ - using IsStackAllocatedTypeMarker = int; \ - \ - private: \ - void* operator new(size_t) = delete; \ - void* operator new(size_t, void*) = delete; \ - friend class internal::__thisIsHereToForceASemicolonAfterThisMacro +#define CPPGC_STACK_ALLOCATED() \ + public: \ + using IsStackAllocatedTypeMarker CPPGC_UNUSED = int; \ + \ + private: \ + void* operator new(size_t) = delete; \ + void* operator new(size_t, void*) = delete; \ + static_assert(true, "Force semicolon.") } // namespace cppgc diff --git a/deps/v8/include/cppgc/member.h b/deps/v8/include/cppgc/member.h index a183edb96fd030..22c1adc0af7c3c 100644 --- a/deps/v8/include/cppgc/member.h +++ b/deps/v8/include/cppgc/member.h @@ -19,19 +19,43 @@ class Visitor; namespace internal { +class MemberBase { + protected: + MemberBase() = default; + explicit MemberBase(void* value) : raw_(value) {} + + void* const* GetRawSlot() const { return &raw_; } + void* GetRaw() const { return raw_; } + void SetRaw(void* value) { raw_ = value; } + + void* GetRawAtomic() const { + return reinterpret_cast*>(&raw_)->load( + std::memory_order_relaxed); + } + void SetRawAtomic(void* value) { + reinterpret_cast*>(&raw_)->store( + value, std::memory_order_relaxed); + } + + void ClearFromGC() const { raw_ = nullptr; } + + private: + mutable void* raw_ = nullptr; +}; + // The basic class from which all Member classes are 'generated'. template -class BasicMember : private CheckingPolicy { +class BasicMember final : private MemberBase, private CheckingPolicy { public: using PointeeType = T; constexpr BasicMember() = default; constexpr BasicMember(std::nullptr_t) {} // NOLINT - BasicMember(SentinelPointer s) : raw_(s) {} // NOLINT - BasicMember(T* raw) : raw_(raw) { // NOLINT + BasicMember(SentinelPointer s) : MemberBase(s) {} // NOLINT + BasicMember(T* raw) : MemberBase(raw) { // NOLINT InitializingWriteBarrier(); - this->CheckPointer(raw_); + this->CheckPointer(Get()); } BasicMember(T& raw) : BasicMember(&raw) {} // NOLINT BasicMember(const BasicMember& other) : BasicMember(other.Get()) {} @@ -106,9 +130,12 @@ class BasicMember : private CheckingPolicy { T* operator->() const { return Get(); } T& operator*() const { return *Get(); } - T* Get() const { + // CFI cast exemption to allow passing SentinelPointer through T* and support + // heterogeneous assignments between different Member and Persistent handles + // based on their actual types. + V8_CLANG_NO_SANITIZE("cfi-unrelated-cast") T* Get() const { // Executed by the mutator, hence non atomic load. - return raw_; + return static_cast(MemberBase::GetRaw()); } void Clear() { SetRawAtomic(nullptr); } @@ -120,25 +147,18 @@ class BasicMember : private CheckingPolicy { } private: - void SetRawAtomic(T* raw) { - reinterpret_cast*>(&raw_)->store(raw, - std::memory_order_relaxed); - } T* GetRawAtomic() const { - return reinterpret_cast*>(&raw_)->load( - std::memory_order_relaxed); + return static_cast(MemberBase::GetRawAtomic()); } void InitializingWriteBarrier() const { - WriteBarrierPolicy::InitializingBarrier( - reinterpret_cast(&raw_), static_cast(raw_)); + WriteBarrierPolicy::InitializingBarrier(GetRawSlot(), GetRaw()); } void AssigningWriteBarrier() const { - WriteBarrierPolicy::AssigningBarrier(reinterpret_cast(&raw_), - static_cast(raw_)); + WriteBarrierPolicy::AssigningBarrier(GetRawSlot(), GetRaw()); } - T* raw_ = nullptr; + void ClearFromGC() const { MemberBase::ClearFromGC(); } friend class cppgc::Visitor; }; diff --git a/deps/v8/include/cppgc/persistent.h b/deps/v8/include/cppgc/persistent.h index fc6b0b9d92efa1..c2d8a7a5a642c6 100644 --- a/deps/v8/include/cppgc/persistent.h +++ b/deps/v8/include/cppgc/persistent.h @@ -15,14 +15,43 @@ #include "v8config.h" // NOLINT(build/include_directory) namespace cppgc { + +class Visitor; + namespace internal { +class PersistentBase { + protected: + PersistentBase() = default; + explicit PersistentBase(void* raw) : raw_(raw) {} + + void* GetValue() const { return raw_; } + void SetValue(void* value) { raw_ = value; } + + PersistentNode* GetNode() const { return node_; } + void SetNode(PersistentNode* node) { node_ = node; } + + // Performs a shallow clear which assumes that internal persistent nodes are + // destroyed elsewhere. + void ClearFromGC() const { + raw_ = nullptr; + node_ = nullptr; + } + + private: + mutable void* raw_ = nullptr; + mutable PersistentNode* node_ = nullptr; + + friend class PersistentRegion; +}; + // The basic class from which all Persistent classes are generated. template -class BasicPersistent : public LocationPolicy, - private WeaknessPolicy, - private CheckingPolicy { +class BasicPersistent final : public PersistentBase, + public LocationPolicy, + private WeaknessPolicy, + private CheckingPolicy { public: using typename WeaknessPolicy::IsStrongPersistent; using PointeeType = T; @@ -38,15 +67,15 @@ class BasicPersistent : public LocationPolicy, BasicPersistent( // NOLINT SentinelPointer s, const SourceLocation& loc = SourceLocation::Current()) - : LocationPolicy(loc), raw_(s) {} + : PersistentBase(s), LocationPolicy(loc) {} - // Raw value contstructors. + // Raw value constructors. BasicPersistent(T* raw, // NOLINT const SourceLocation& loc = SourceLocation::Current()) - : LocationPolicy(loc), raw_(raw) { + : PersistentBase(raw), LocationPolicy(loc) { if (!IsValid()) return; - node_ = WeaknessPolicy::GetPersistentRegion(raw_).AllocateNode( - this, &BasicPersistent::Trace); + SetNode(WeaknessPolicy::GetPersistentRegion(GetValue()) + .AllocateNode(this, &BasicPersistent::Trace)); this->CheckPointer(Get()); } @@ -74,13 +103,11 @@ class BasicPersistent : public LocationPolicy, BasicPersistent( BasicPersistent&& other, const SourceLocation& loc = SourceLocation::Current()) noexcept - : LocationPolicy(std::move(other)), - raw_(std::move(other.raw_)), - node_(std::move(other.node_)) { + : PersistentBase(std::move(other)), LocationPolicy(std::move(other)) { if (!IsValid()) return; - node_->UpdateOwner(this); - other.raw_ = nullptr; - other.node_ = nullptr; + GetNode()->UpdateOwner(this); + other.SetValue(nullptr); + other.SetNode(nullptr); this->CheckPointer(Get()); } @@ -114,13 +141,12 @@ class BasicPersistent : public LocationPolicy, BasicPersistent& operator=(BasicPersistent&& other) { if (this == &other) return *this; Clear(); + PersistentBase::operator=(std::move(other)); LocationPolicy::operator=(std::move(other)); - raw_ = std::move(other.raw_); - node_ = std::move(other.node_); if (!IsValid()) return *this; - node_->UpdateOwner(this); - other.raw_ = nullptr; - other.node_ = nullptr; + GetNode()->UpdateOwner(this); + other.SetValue(nullptr); + other.SetNode(nullptr); this->CheckPointer(Get()); return *this; } @@ -156,7 +182,12 @@ class BasicPersistent : public LocationPolicy, T* operator->() const { return Get(); } T& operator*() const { return *Get(); } - T* Get() const { return raw_; } + // CFI cast exemption to allow passing SentinelPointer through T* and support + // heterogeneous assignments between different Member and Persistent handles + // based on their actual types. + V8_CLANG_NO_SANITIZE("cfi-unrelated-cast") T* Get() const { + return static_cast(GetValue()); + } void Clear() { Assign(nullptr); } @@ -176,29 +207,35 @@ class BasicPersistent : public LocationPolicy, // Ideally, handling kSentinelPointer would be done by the embedder. On the // other hand, having Persistent aware of it is beneficial since no node // gets wasted. - return raw_ != nullptr && raw_ != kSentinelPointer; + return GetValue() != nullptr && GetValue() != kSentinelPointer; } void Assign(T* ptr) { if (IsValid()) { if (ptr && ptr != kSentinelPointer) { // Simply assign the pointer reusing the existing node. - raw_ = ptr; + SetValue(ptr); this->CheckPointer(ptr); return; } - WeaknessPolicy::GetPersistentRegion(raw_).FreeNode(node_); - node_ = nullptr; + WeaknessPolicy::GetPersistentRegion(GetValue()).FreeNode(GetNode()); + SetNode(nullptr); } - raw_ = ptr; + SetValue(ptr); if (!IsValid()) return; - node_ = WeaknessPolicy::GetPersistentRegion(raw_).AllocateNode( - this, &BasicPersistent::Trace); + SetNode(WeaknessPolicy::GetPersistentRegion(GetValue()) + .AllocateNode(this, &BasicPersistent::Trace)); this->CheckPointer(Get()); } - T* raw_ = nullptr; - PersistentNode* node_ = nullptr; + void ClearFromGC() const { + if (IsValid()) { + WeaknessPolicy::GetPersistentRegion(GetValue()).FreeNode(GetNode()); + PersistentBase::ClearFromGC(); + } + } + + friend class cppgc::Visitor; }; template GetForegroundTaskRunner() { + return nullptr; + } + + /** + * Posts |job_task| to run in parallel. Returns a JobHandle associated with + * the Job, which can be joined or canceled. + * This avoids degenerate cases: + * - Calling CallOnWorkerThread() for each work item, causing significant + * overhead. + * - Fixed number of CallOnWorkerThread() calls that split the work and might + * run for a long time. This is problematic when many components post + * "num cores" tasks and all expect to use all the cores. In these cases, + * the scheduler lacks context to be fair to multiple same-priority requests + * and/or ability to request lower priority work to yield when high priority + * work comes in. + * A canonical implementation of |job_task| looks like: + * class MyJobTask : public JobTask { + * public: + * MyJobTask(...) : worker_queue_(...) {} + * // JobTask: + * void Run(JobDelegate* delegate) override { + * while (!delegate->ShouldYield()) { + * // Smallest unit of work. + * auto work_item = worker_queue_.TakeWorkItem(); // Thread safe. + * if (!work_item) return; + * ProcessWork(work_item); + * } + * } + * + * size_t GetMaxConcurrency() const override { + * return worker_queue_.GetSize(); // Thread safe. + * } + * }; + * auto handle = PostJob(TaskPriority::kUserVisible, + * std::make_unique(...)); + * handle->Join(); + * + * PostJob() and methods of the returned JobHandle/JobDelegate, must never be + * called while holding a lock that could be acquired by JobTask::Run or + * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is + * because [1] JobTask::GetMaxConcurrency may be invoked while holding + * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B) + * if that lock is *never* held while calling back into JobHandle from any + * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or + * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle + * (B=>JobHandle::foo=>B deadlock). + * + * A sufficient PostJob() implementation that uses the default Job provided in + * libplatform looks like: + * std::unique_ptr PostJob( + * TaskPriority priority, std::unique_ptr job_task) override { + * return std::make_unique( + * std::make_shared( + * this, std::move(job_task), kNumThreads)); + * } + */ + virtual std::unique_ptr PostJob( + TaskPriority priority, std::unique_ptr job_task) { + return nullptr; + } +}; + +/** + * Process-global initialization of the garbage collector. Must be called before + * creating a Heap. + */ +V8_EXPORT void InitializeProcess(PageAllocator*); + +/** + * Must be called after destroying the last used heap. + */ +V8_EXPORT void ShutdownProcess(); namespace internal { diff --git a/deps/v8/include/cppgc/prefinalizer.h b/deps/v8/include/cppgc/prefinalizer.h index 2f6d68a1dac808..bde76429ec95c4 100644 --- a/deps/v8/include/cppgc/prefinalizer.h +++ b/deps/v8/include/cppgc/prefinalizer.h @@ -5,11 +5,9 @@ #ifndef INCLUDE_CPPGC_PREFINALIZER_H_ #define INCLUDE_CPPGC_PREFINALIZER_H_ -#include "cppgc/internal/accessors.h" #include "cppgc/internal/compiler-specific.h" #include "cppgc/internal/prefinalizer-handler.h" #include "cppgc/liveness-broker.h" -#include "cppgc/macros.h" namespace cppgc { @@ -23,7 +21,7 @@ class PrefinalizerRegistration final { "USING_PRE_FINALIZER(T) must be defined."); cppgc::internal::PreFinalizerRegistrationDispatcher::RegisterPrefinalizer( - internal::GetHeapFromPayload(self), {self, T::InvokePreFinalizer}); + {self, T::InvokePreFinalizer}); } void* operator new(size_t, void* location) = delete; @@ -32,22 +30,22 @@ class PrefinalizerRegistration final { } // namespace internal -#define CPPGC_USING_PRE_FINALIZER(Class, PreFinalizer) \ - public: \ - static bool InvokePreFinalizer(const LivenessBroker& liveness_broker, \ - void* object) { \ - static_assert(internal::IsGarbageCollectedTypeV, \ - "Only garbage collected objects can have prefinalizers"); \ - Class* self = static_cast(object); \ - if (liveness_broker.IsHeapObjectAlive(self)) return false; \ - self->Class::PreFinalizer(); \ - return true; \ - } \ - \ - private: \ - CPPGC_NO_UNIQUE_ADDRESS internal::PrefinalizerRegistration \ - prefinalizer_dummy_{this}; \ - friend class internal::__thisIsHereToForceASemicolonAfterThisMacro +#define CPPGC_USING_PRE_FINALIZER(Class, PreFinalizer) \ + public: \ + static bool InvokePreFinalizer(const cppgc::LivenessBroker& liveness_broker, \ + void* object) { \ + static_assert(cppgc::internal::IsGarbageCollectedTypeV, \ + "Only garbage collected objects can have prefinalizers"); \ + Class* self = static_cast(object); \ + if (liveness_broker.IsHeapObjectAlive(self)) return false; \ + self->Class::PreFinalizer(); \ + return true; \ + } \ + \ + private: \ + CPPGC_NO_UNIQUE_ADDRESS cppgc::internal::PrefinalizerRegistration \ + prefinalizer_dummy_{this}; \ + static_assert(true, "Force semicolon.") } // namespace cppgc diff --git a/deps/v8/include/cppgc/source-location.h b/deps/v8/include/cppgc/source-location.h index 8cc52d6a539c2f..139c9d86c08140 100644 --- a/deps/v8/include/cppgc/source-location.h +++ b/deps/v8/include/cppgc/source-location.h @@ -23,10 +23,16 @@ namespace cppgc { -// Encapsulates source location information. Mimics C++20's -// std::source_location. +/** + * Encapsulates source location information. Mimics C++20's + * std::source_location. + */ class V8_EXPORT SourceLocation final { public: + /** + * Construct source location information corresponding to the location of the + * call site. + */ #if CPPGC_SUPPORTS_SOURCE_LOCATION static constexpr SourceLocation Current( const char* function = __builtin_FUNCTION(), @@ -37,12 +43,38 @@ class V8_EXPORT SourceLocation final { static constexpr SourceLocation Current() { return SourceLocation(); } #endif // CPPGC_SUPPORTS_SOURCE_LOCATION + /** + * Constructs unspecified source location information. + */ constexpr SourceLocation() = default; + /** + * Returns the name of the function associated with the position represented + * by this object, if any. + * + * \returns the function name as cstring. + */ constexpr const char* Function() const { return function_; } + + /** + * Returns the name of the current source file represented by this object. + * + * \returns the file name as cstring. + */ constexpr const char* FileName() const { return file_; } + + /** + * Returns the line number represented by this object. + * + * \returns the line number. + */ constexpr size_t Line() const { return line_; } + /** + * Returns a human-readable string representing this object. + * + * \returns a human-readable string representing source location information. + */ std::string ToString() const; private: diff --git a/deps/v8/include/cppgc/trace-trait.h b/deps/v8/include/cppgc/trace-trait.h index e246bc53b7d9aa..b0a7c7235c1ba2 100644 --- a/deps/v8/include/cppgc/trace-trait.h +++ b/deps/v8/include/cppgc/trace-trait.h @@ -6,7 +6,9 @@ #define INCLUDE_CPPGC_TRACE_TRAIT_H_ #include + #include "cppgc/type-traits.h" +#include "v8config.h" // NOLINT(build/include_directory) namespace cppgc { @@ -14,6 +16,8 @@ class Visitor; namespace internal { +// Implementation of the default TraceTrait handling GarbageCollected and +// GarbageCollectedMixin. template ::type>> @@ -21,25 +25,65 @@ struct TraceTraitImpl; } // namespace internal -using TraceCallback = void (*)(Visitor*, const void*); - -// TraceDescriptor is used to describe how to trace an object. +/** + * Callback for invoking tracing on a given object. + * + * \param visitor The visitor to dispatch to. + * \param object The object to invoke tracing on. + */ +using TraceCallback = void (*)(Visitor* visitor, const void* object); + +/** + * Describes how to trace an object, i.e., how to visit all Oilpan-relevant + * fields of an object. + */ struct TraceDescriptor { - // The adjusted base pointer of the object that should be traced. + /** + * Adjusted base pointer, i.e., the pointer to the class inheriting directly + * from GarbageCollected, of the object that is being traced. + */ const void* base_object_payload; - // A callback for tracing the object. + /** + * Callback for tracing the object. + */ TraceCallback callback; }; +namespace internal { + +struct V8_EXPORT TraceTraitFromInnerAddressImpl { + static TraceDescriptor GetTraceDescriptor(const void* address); +}; + +} // namespace internal + +/** + * Trait specifying how the garbage collector processes an object of type T. + * + * Advanced users may override handling by creating a specialization for their + * type. + */ template struct TraceTrait { static_assert(internal::IsTraceableV, "T must have a Trace() method"); + /** + * Accessor for retrieving a TraceDescriptor to process an object of type T. + * + * \param self The object to be processed. + * \returns a TraceDescriptor to process the object. + */ static TraceDescriptor GetTraceDescriptor(const void* self) { return internal::TraceTraitImpl::GetTraceDescriptor( static_cast(self)); } + /** + * Function invoking the tracing for an object of type T. + * + * \param visitor The visitor to dispatch to. + * \param self The object to invoke tracing on. + */ static void Trace(Visitor* visitor, const void* self) { static_cast(self)->Trace(visitor); } @@ -57,7 +101,7 @@ struct TraceTraitImpl { template struct TraceTraitImpl { static TraceDescriptor GetTraceDescriptor(const void* self) { - return static_cast(self)->GetTraceDescriptor(); + return internal::TraceTraitFromInnerAddressImpl::GetTraceDescriptor(self); } }; diff --git a/deps/v8/include/cppgc/visitor.h b/deps/v8/include/cppgc/visitor.h index a73a4abb2bdb7c..c671c55e058300 100644 --- a/deps/v8/include/cppgc/visitor.h +++ b/deps/v8/include/cppgc/visitor.h @@ -14,18 +14,53 @@ #include "cppgc/trace-trait.h" namespace cppgc { + namespace internal { +template +class BasicPersistent; +class ConservativeTracingVisitor; class VisitorBase; +class VisitorFactory; + } // namespace internal using WeakCallback = void (*)(const LivenessBroker&, const void*); /** * Visitor passed to trace methods. All managed pointers must have called the - * visitor's trace method on them. + * Visitor's trace method on them. + * + * \code + * class Foo final : public GarbageCollected { + * public: + * void Trace(Visitor* visitor) const { + * visitor->Trace(foo_); + * visitor->Trace(weak_foo_); + * } + * private: + * Member foo_; + * WeakMember weak_foo_; + * }; + * \endcode */ class Visitor { public: + class Key { + private: + Key() = default; + friend class internal::VisitorFactory; + }; + + explicit Visitor(Key) {} + + virtual ~Visitor() = default; + + /** + * Trace method for Member. + * + * \param member Member reference retaining an object. + */ template void Trace(const Member& member) { const T* value = member.GetRawAtomic(); @@ -33,11 +68,16 @@ class Visitor { Trace(value); } + /** + * Trace method for WeakMember. + * + * \param weak_member WeakMember reference weakly retaining an object. + */ template void Trace(const WeakMember& weak_member) { - static_assert(sizeof(T), "T must be fully defined"); + static_assert(sizeof(T), "Pointee type must be fully defined."); static_assert(internal::IsGarbageCollectedType::value, - "T must be GarabgeCollected or GarbageCollectedMixin type"); + "T must be GarbageCollected or GarbageCollectedMixin type"); const T* value = weak_member.GetRawAtomic(); @@ -52,41 +92,42 @@ class Visitor { &HandleWeak>, &weak_member); } - template * = nullptr> - void TraceRoot(const Persistent& p, const SourceLocation& loc) { - using PointeeType = typename Persistent::PointeeType; - static_assert(sizeof(PointeeType), - "Persistent's pointee type must be fully defined"); - static_assert(internal::IsGarbageCollectedType::value, - "Persisent's pointee type must be GarabgeCollected or " - "GarbageCollectedMixin"); - if (!p.Get()) { - return; - } - VisitRoot(p.Get(), TraceTrait::GetTraceDescriptor(p.Get())); - } - - template < - typename WeakPersistent, - std::enable_if_t* = nullptr> - void TraceRoot(const WeakPersistent& p, const SourceLocation& loc) { - using PointeeType = typename WeakPersistent::PointeeType; - static_assert(sizeof(PointeeType), - "Persistent's pointee type must be fully defined"); - static_assert(internal::IsGarbageCollectedType::value, - "Persisent's pointee type must be GarabgeCollected or " - "GarbageCollectedMixin"); - VisitWeakRoot(p.Get(), TraceTrait::GetTraceDescriptor(p.Get()), - &HandleWeak, &p); + /** + * Trace method for inlined objects that are not allocated themselves but + * otherwise follow managed heap layout and have a Trace() method. + * + * \param object reference of the inlined object. + */ + template + void Trace(const T& object) { +#if V8_ENABLE_CHECKS + // This object is embedded in potentially multiple nested objects. The + // outermost object must not be in construction as such objects are (a) not + // processed immediately, and (b) only processed conservatively if not + // otherwise possible. + CheckObjectNotInConstruction(&object); +#endif // V8_ENABLE_CHECKS + TraceTrait::Trace(this, &object); } + /** + * Registers a weak callback method on the object of type T. See + * LivenessBroker for an usage example. + * + * \param object of type T specifying a weak callback method. + */ template - void RegisterWeakCallbackMethod(const T* obj) { - RegisterWeakCallback(&WeakCallbackMethodDelegate, obj); + void RegisterWeakCallbackMethod(const T* object) { + RegisterWeakCallback(&WeakCallbackMethodDelegate, object); } - virtual void RegisterWeakCallback(WeakCallback, const void*) {} + /** + * Registers a weak callback that is invoked during garbage collection. + * + * \param callback to be invoked. + * \param data custom data that is passed to the callback. + */ + virtual void RegisterWeakCallback(WeakCallback callback, const void* data) {} protected: virtual void Visit(const void* self, TraceDescriptor) {} @@ -108,28 +149,62 @@ class Visitor { template static void HandleWeak(const LivenessBroker& info, const void* object) { const PointerType* weak = static_cast(object); + // Sentinel values are preserved for weak pointers. + if (*weak == kSentinelPointer) return; const auto* raw = weak->Get(); - if (raw && !info.IsHeapObjectAlive(raw)) { - // Object is passed down through the marker as const. Alternatives are - // - non-const Trace method; - // - mutable pointer in MemberBase; - const_cast(weak)->Clear(); + if (!info.IsHeapObjectAlive(raw)) { + weak->ClearFromGC(); } } - Visitor() = default; + template * = nullptr> + void TraceRoot(const Persistent& p, const SourceLocation& loc) { + using PointeeType = typename Persistent::PointeeType; + static_assert(sizeof(PointeeType), + "Persistent's pointee type must be fully defined"); + static_assert(internal::IsGarbageCollectedType::value, + "Persistent's pointee type must be GarbageCollected or " + "GarbageCollectedMixin"); + if (!p.Get()) { + return; + } + VisitRoot(p.Get(), TraceTrait::GetTraceDescriptor(p.Get())); + } + + template < + typename WeakPersistent, + std::enable_if_t* = nullptr> + void TraceRoot(const WeakPersistent& p, const SourceLocation& loc) { + using PointeeType = typename WeakPersistent::PointeeType; + static_assert(sizeof(PointeeType), + "Persistent's pointee type must be fully defined"); + static_assert(internal::IsGarbageCollectedType::value, + "Persistent's pointee type must be GarbageCollected or " + "GarbageCollectedMixin"); + VisitWeakRoot(p.Get(), TraceTrait::GetTraceDescriptor(p.Get()), + &HandleWeak, &p); + } template void Trace(const T* t) { - static_assert(sizeof(T), "T must be fully defined"); + static_assert(sizeof(T), "Pointee type must be fully defined."); static_assert(internal::IsGarbageCollectedType::value, - "T must be GarabgeCollected or GarbageCollectedMixin type"); + "T must be GarbageCollected or GarbageCollectedMixin type"); if (!t) { return; } Visit(t, TraceTrait::GetTraceDescriptor(t)); } +#if V8_ENABLE_CHECKS + V8_EXPORT void CheckObjectNotInConstruction(const void* address); +#endif // V8_ENABLE_CHECKS + + template + friend class internal::BasicPersistent; + friend class internal::ConservativeTracingVisitor; friend class internal::VisitorBase; }; diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl index 706c37f958e4a0..24a957f7fb7cd4 100644 --- a/deps/v8/include/js_protocol.pdl +++ b/deps/v8/include/js_protocol.pdl @@ -85,6 +85,13 @@ domain Debugger integer lineNumber integer columnNumber + # Location range within one script. + experimental type LocationRange extends object + properties + Runtime.ScriptId scriptId + ScriptPosition start + ScriptPosition end + # JavaScript call frame. Array of call frames form the call stack. type CallFrame extends object properties @@ -472,12 +479,17 @@ domain Debugger # Debugger will pause on the execution of the first async task which was scheduled # before next pause. experimental optional boolean breakOnAsyncCall + # The skipList specifies location ranges that should be skipped on step into. + experimental optional array of LocationRange skipList # Steps out of the function call. command stepOut # Steps over the statement. command stepOver + parameters + # The skipList specifies location ranges that should be skipped on step over. + experimental optional array of LocationRange skipList # Fired when breakpoint is resolved to an actual script and location. event breakpointResolved @@ -572,6 +584,8 @@ domain Debugger experimental optional integer codeOffset # The language of the script. experimental optional Debugger.ScriptLanguage scriptLanguage + # The name the embedder supplied for this script. + experimental optional string embedderName # Fired when virtual machine parses script. This event is also fired for all known and uncollected # scripts upon enabling debugger. @@ -613,6 +627,8 @@ domain Debugger experimental optional Debugger.ScriptLanguage scriptLanguage # If the scriptLanguage is WebASsembly, the source of debug symbols for the module. experimental optional Debugger.DebugSymbols debugSymbols + # The name the embedder supplied for this script. + experimental optional string embedderName experimental domain HeapProfiler depends on Runtime @@ -1009,7 +1025,7 @@ domain Runtime f32 f64 v128 - anyref + externref # Object class (constructor) name. Specified for `object` type values only. optional string className # Remote object value in case of primitive values or JSON values (if it was requested). @@ -1370,6 +1386,11 @@ domain Runtime # Note that `let` variables can only be re-declared if they originate from # `replMode` themselves. experimental optional boolean replMode + # The Content Security Policy (CSP) for the target might block 'unsafe-eval' + # which includes eval(), Function(), setTimeout() and setInterval() + # when called with non-callable arguments. This flag bypasses CSP for this + # evaluation and allows unsafe-eval. Defaults to true. + experimental optional boolean allowUnsafeEvalBlockedByCSP returns # Evaluation result. RemoteObject result diff --git a/deps/v8/include/libplatform/libplatform.h b/deps/v8/include/libplatform/libplatform.h index c7ea4c2bd38575..1c874ba29b2e94 100644 --- a/deps/v8/include/libplatform/libplatform.h +++ b/deps/v8/include/libplatform/libplatform.h @@ -43,6 +43,16 @@ V8_PLATFORM_EXPORT std::unique_ptr NewDefaultPlatform( InProcessStackDumping::kDisabled, std::unique_ptr tracing_controller = {}); +/** + * Returns a new instance of the default v8::JobHandle implementation. + * + * The job will be executed by spawning up to |num_worker_threads| many worker + * threads on the provided |platform| with the given |priority|. + */ +V8_PLATFORM_EXPORT std::unique_ptr NewDefaultJobHandle( + v8::Platform* platform, v8::TaskPriority priority, + std::unique_ptr job_task, size_t num_worker_threads); + /** * Pumps the message loop for the given isolate. * @@ -79,6 +89,17 @@ V8_PLATFORM_EXPORT void SetTracingController( v8::Platform* platform, v8::platform::tracing::TracingController* tracing_controller); +/** + * Notifies the given platform about the Isolate getting deleted soon. Has to be + * called for all Isolates which are deleted - unless we're shutting down the + * platform. + * + * The |platform| has to be created using |NewDefaultPlatform|. + * + */ +V8_PLATFORM_EXPORT void NotifyIsolateShutdown(v8::Platform* platform, + Isolate* isolate); + } // namespace platform } // namespace v8 diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h new file mode 100644 index 00000000000000..e202293bcf6061 --- /dev/null +++ b/deps/v8/include/v8-cppgc.h @@ -0,0 +1,226 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef INCLUDE_V8_CPPGC_H_ +#define INCLUDE_V8_CPPGC_H_ + +#include "cppgc/visitor.h" +#include "v8-internal.h" // NOLINT(build/include_directory) +#include "v8.h" // NOLINT(build/include_directory) + +namespace v8 { + +class Isolate; +template +class JSMember; + +namespace internal { + +class JSMemberBaseExtractor; + +class V8_EXPORT JSMemberBase { + public: + /** + * Returns true if the reference is empty, i.e., has not been assigned + * object. + */ + bool IsEmpty() const { return val_ == nullptr; } + + /** + * Clears the reference. IsEmpty() will return true after this call. + */ + inline void Reset(); + + private: + static internal::Address* New(v8::Isolate* isolate, + internal::Address* object_slot, + internal::Address** this_slot); + static void Delete(internal::Address* object); + static void Copy(const internal::Address* const* from_slot, + internal::Address** to_slot); + static void Move(internal::Address** from_slot, internal::Address** to_slot); + + JSMemberBase() = default; + + JSMemberBase(v8::Isolate* isolate, internal::Address* object_slot) + : val_(New(isolate, object_slot, &val_)) {} + + inline JSMemberBase& CopyImpl(const JSMemberBase& other); + inline JSMemberBase& MoveImpl(JSMemberBase&& other); + + // val_ points to a GlobalHandles node. + internal::Address* val_ = nullptr; + + template + friend class v8::JSMember; + friend class v8::internal::JSMemberBaseExtractor; +}; + +JSMemberBase& JSMemberBase::CopyImpl(const JSMemberBase& other) { + if (this != &other) { + Reset(); + if (!other.IsEmpty()) { + Copy(&other.val_, &val_); + } + } + return *this; +} + +JSMemberBase& JSMemberBase::MoveImpl(JSMemberBase&& other) { + if (this != &other) { + // No call to Reset() as Move() will conditionally reset itself when needed, + // and otherwise reuse the internal meta data. + Move(&other.val_, &val_); + } + return *this; +} + +void JSMemberBase::Reset() { + if (IsEmpty()) return; + Delete(val_); + val_ = nullptr; +} + +} // namespace internal + +/** + * A traced handle without destructor that clears the handle. The handle may + * only be used in GarbageCollected objects and must be processed in a Trace() + * method. + */ +template +class V8_EXPORT JSMember : public internal::JSMemberBase { + static_assert(std::is_base_of::value, + "JSMember only supports references to v8::Value"); + + public: + JSMember() = default; + + template ::value>> + JSMember(Isolate* isolate, Local that) + : internal::JSMemberBase(isolate, + reinterpret_cast(*that)) {} + + JSMember(const JSMember& other) { CopyImpl(other); } + + template ::value>> + JSMember(const JSMember& other) { // NOLINT + CopyImpl(other); + } + + JSMember(JSMember&& other) { MoveImpl(std::move(other)); } + + template ::value>> + JSMember(JSMember&& other) { // NOLINT + MoveImpl(std::move(other)); + } + + JSMember& operator=(const JSMember& other) { return CopyImpl(other); } + + template ::value>> + JSMember& operator=(const JSMember& other) { + return CopyImpl(other); + } + + JSMember& operator=(JSMember&& other) { return MoveImpl(other); } + + template ::value>> + JSMember& operator=(JSMember&& other) { + return MoveImpl(other); + } + + T* operator->() const { return reinterpret_cast(val_); } + T* operator*() const { return reinterpret_cast(val_); } + + using internal::JSMemberBase::Reset; + + template ::value>> + void Set(v8::Isolate* isolate, Local that) { + Reset(); + val_ = New(isolate, reinterpret_cast(*that), &val_); + } +}; + +template ::value || + std::is_base_of::value>> +inline bool operator==(const JSMember& lhs, const JSMember& rhs) { + v8::internal::Address* a = reinterpret_cast(*lhs); + v8::internal::Address* b = reinterpret_cast(*rhs); + if (a == nullptr) return b == nullptr; + if (b == nullptr) return false; + return *a == *b; +} + +template ::value || + std::is_base_of::value>> +inline bool operator!=(const JSMember& lhs, const JSMember& rhs) { + return !(lhs == rhs); +} + +template ::value || + std::is_base_of::value>> +inline bool operator==(const JSMember& lhs, const Local& rhs) { + v8::internal::Address* a = reinterpret_cast(*lhs); + v8::internal::Address* b = reinterpret_cast(*rhs); + if (a == nullptr) return b == nullptr; + if (b == nullptr) return false; + return *a == *b; +} + +template ::value || + std::is_base_of::value>> +inline bool operator==(const Local& lhs, const JSMember rhs) { + return rhs == lhs; +} + +template +inline bool operator!=(const JSMember& lhs, const T2& rhs) { + return !(lhs == rhs); +} + +template +inline bool operator!=(const T1& lhs, const JSMember& rhs) { + return !(lhs == rhs); +} + +class JSVisitor : public cppgc::Visitor { + public: + explicit JSVisitor(cppgc::Visitor::Key key) : cppgc::Visitor(key) {} + + template + void Trace(const JSMember& ref) { + if (ref.IsEmpty()) return; + Visit(ref); + } + + protected: + using cppgc::Visitor::Visit; + + virtual void Visit(const internal::JSMemberBase& ref) {} +}; + +} // namespace v8 + +namespace cppgc { + +template +struct TraceTrait> { + static void Trace(Visitor* visitor, const v8::JSMember* self) { + static_cast(visitor)->Trace(*self); + } +}; + +} // namespace cppgc + +#endif // INCLUDE_V8_CPPGC_H_ diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h index f74406493bcf2a..1cac9a6be39eed 100644 --- a/deps/v8/include/v8-fast-api-calls.h +++ b/deps/v8/include/v8-fast-api-calls.h @@ -23,13 +23,7 @@ * * \code * - * // Represents the way this type system maps C++ and JS values. - * struct WrapperTypeInfo { - * // Store e.g. a method to map from exposed C++ types to the already - * // created v8::FunctionTemplate's for instantiating them. - * }; - * - * // Helper method with a sanity check. + * // Helper method with a check for field count. * template * inline T* GetInternalField(v8::Local wrapper) { * assert(offset < wrapper->InternalFieldCount()); @@ -37,25 +31,19 @@ * wrapper->GetAlignedPointerFromInternalField(offset)); * } * - * // Returns the type info from a wrapper JS object. - * inline const WrapperTypeInfo* ToWrapperTypeInfo( - * v8::Local wrapper) { - * return GetInternalField(wrapper); - * } - * * class CustomEmbedderType { * public: - * static constexpr const WrapperTypeInfo* GetWrapperTypeInfo() { - * return &custom_type_wrapper_type_info; - * } * // Returns the raw C object from a wrapper JS object. * static CustomEmbedderType* Unwrap(v8::Local wrapper) { * return GetInternalField(wrapper); * } - * static void FastMethod(CustomEmbedderType* receiver, int param) { - * assert(receiver != nullptr); + * static void FastMethod(v8::ApiObject receiver_obj, int param) { + * v8::Object* v8_object = reinterpret_cast(&api_object); + * CustomEmbedderType* receiver = static_cast( + * receiver_obj->GetAlignedPointerFromInternalField( + * kV8EmbedderWrapperObjectIndex)); + * * // Type checks are already done by the optimized code. * // Then call some performance-critical method like: * // receiver->Method(param); @@ -67,31 +55,16 @@ * v8::Local::Cast(info.Holder()); * CustomEmbedderType* receiver = Unwrap(instance); * // TODO: Do type checks and extract {param}. - * FastMethod(receiver, param); + * receiver->Method(param); * } - * - * private: - * static const WrapperTypeInfo custom_type_wrapper_type_info; * }; * - * // Support for custom embedder types via specialization of WrapperTraits. - * namespace v8 { - * template <> - * class WrapperTraits { - * public: - * static const void* GetTypeInfo() { - * // We use the already defined machinery for the custom type. - * return CustomEmbedderType::GetWrapperTypeInfo(); - * } - * }; - * } // namespace v8 - * + * // TODO(mslekova): Clean-up these constants * // The constants kV8EmbedderWrapperTypeIndex and * // kV8EmbedderWrapperObjectIndex describe the offsets for the type info - * // struct (the one returned by WrapperTraits::GetTypeInfo) and the - * // native object, when expressed as internal field indices within a - * // JSObject. The existance of this helper function assumes that all - * // embedder objects have their JSObject-side type info at the same + * // struct and the native object, when expressed as internal field indices + * // within a JSObject. The existance of this helper function assumes that + * // all embedder objects have their JSObject-side type info at the same * // offset, but this is not a limitation of the API itself. For a detailed * // use case, see the third example. * static constexpr int kV8EmbedderWrapperTypeIndex = 0; @@ -120,8 +93,7 @@ * v8::ObjectTemplate::New(isolate); * object_template->SetInternalFieldCount( * kV8EmbedderWrapperObjectIndex + 1); - * object_template->Set( - v8::String::NewFromUtf8Literal(isolate, "method"), method_template); + * object_template->Set(isolate, "method", method_template); * * // Instantiate the wrapper JS object. * v8::Local object = @@ -150,9 +122,13 @@ * - bool * - int32_t * - uint32_t - * To be supported types: * - int64_t * - uint64_t + * The 64-bit integer types currently have the IDL (unsigned) long long + * semantics: https://heycam.github.io/webidl/#abstract-opdef-converttoint + * In the future we'll extend the API to also provide conversions from/to + * BigInt to preserve full precision. + * To be supported types: * - float32_t * - float64_t * - arrays of C types @@ -180,7 +156,7 @@ class CTypeInfo { kUint64, kFloat32, kFloat64, - kUnwrappedApiObject, + kV8Value, }; enum class ArgFlags : uint8_t { @@ -188,24 +164,15 @@ class CTypeInfo { kIsArrayBit = 1 << 0, // This argument is first in an array of values. }; - static CTypeInfo FromWrapperType(const void* wrapper_type_info, - ArgFlags flags = ArgFlags::kNone) { - uintptr_t wrapper_type_info_ptr = - reinterpret_cast(wrapper_type_info); - // Check that the lower kIsWrapperTypeBit bits are 0's. - CHECK_EQ( - wrapper_type_info_ptr & ~(static_cast(~0) - << static_cast(kIsWrapperTypeBit)), - 0u); - // TODO(mslekova): Refactor the manual bit manipulations to use - // PointerWithPayload instead. - return CTypeInfo(wrapper_type_info_ptr | static_cast(flags) | - kIsWrapperTypeBit); + static CTypeInfo FromWrapperType(ArgFlags flags = ArgFlags::kNone) { + return CTypeInfo(static_cast(flags) | kIsWrapperTypeBit); } static constexpr CTypeInfo FromCType(Type ctype, ArgFlags flags = ArgFlags::kNone) { - // ctype cannot be Type::kUnwrappedApiObject. + // TODO(mslekova): Refactor the manual bit manipulations to use + // PointerWithPayload instead. + // ctype cannot be Type::kV8Value. return CTypeInfo( ((static_cast(ctype) << kTypeOffset) & kTypeMask) | static_cast(flags)); @@ -215,7 +182,7 @@ class CTypeInfo { constexpr Type GetType() const { if (payload_ & kIsWrapperTypeBit) { - return Type::kUnwrappedApiObject; + return Type::kV8Value; } return static_cast((payload_ & kTypeMask) >> kTypeOffset); } @@ -224,6 +191,11 @@ class CTypeInfo { return payload_ & static_cast(ArgFlags::kIsArrayBit); } + static const CTypeInfo& Invalid() { + static CTypeInfo invalid = CTypeInfo(0); + return invalid; + } + private: explicit constexpr CTypeInfo(uintptr_t payload) : payload_(payload) {} @@ -247,21 +219,17 @@ class CFunctionInfo { virtual const CTypeInfo& ArgumentInfo(unsigned int index) const = 0; }; -template -class WrapperTraits { - public: - static const void* GetTypeInfo() { - static_assert(sizeof(T) != sizeof(T), - "WrapperTraits must be specialized for this type."); - return nullptr; - } +struct ApiObject { + uintptr_t address; }; namespace internal { template struct GetCType { - static_assert(sizeof(T) != sizeof(T), "Unsupported CType"); + static constexpr CTypeInfo Get() { + return CTypeInfo::FromCType(CTypeInfo::Type::kV8Value); + } }; #define SPECIALIZE_GET_C_TYPE_FOR(ctype, ctypeinfo) \ @@ -280,19 +248,11 @@ struct GetCType { V(int64_t, kInt64) \ V(uint64_t, kUint64) \ V(float, kFloat32) \ - V(double, kFloat64) + V(double, kFloat64) \ + V(ApiObject, kV8Value) SUPPORTED_C_TYPES(SPECIALIZE_GET_C_TYPE_FOR) -template -struct EnableIfHasWrapperTypeInfo {}; - -template -struct EnableIfHasWrapperTypeInfo::GetTypeInfo(), - void())> { - typedef void type; -}; - // T* where T is a primitive (array of primitives). template struct GetCTypePointerImpl { @@ -304,10 +264,8 @@ struct GetCTypePointerImpl { // T* where T is an API object. template -struct GetCTypePointerImpl::type> { - static constexpr CTypeInfo Get() { - return CTypeInfo::FromWrapperType(WrapperTraits::GetTypeInfo()); - } +struct GetCTypePointerImpl { + static constexpr CTypeInfo Get() { return CTypeInfo::FromWrapperType(); } }; // T** where T is a primitive. Not allowed. @@ -318,11 +276,9 @@ struct GetCTypePointerPointerImpl { // T** where T is an API object (array of API objects). template -struct GetCTypePointerPointerImpl< - T, typename EnableIfHasWrapperTypeInfo::type> { +struct GetCTypePointerPointerImpl { static constexpr CTypeInfo Get() { - return CTypeInfo::FromWrapperType(WrapperTraits::GetTypeInfo(), - CTypeInfo::ArgFlags::kIsArrayBit); + return CTypeInfo::FromWrapperType(CTypeInfo::ArgFlags::kIsArrayBit); } }; @@ -332,13 +288,17 @@ struct GetCType : public GetCTypePointerPointerImpl {}; template struct GetCType : public GetCTypePointerImpl {}; -template +template class CFunctionInfoImpl : public CFunctionInfo { public: + static constexpr int kHasErrorArgCount = (RaisesException ? 1 : 0); + static constexpr int kReceiverCount = 1; CFunctionInfoImpl() : return_info_(internal::GetCType::Get()), - arg_count_(sizeof...(Args)), + arg_count_(sizeof...(Args) - kHasErrorArgCount), arg_info_{internal::GetCType::Get()...} { + static_assert(sizeof...(Args) >= kHasErrorArgCount + kReceiverCount, + "The receiver or the has_error argument is missing."); static_assert( internal::GetCType::Get().GetType() == CTypeInfo::Type::kVoid, "Only void return types are currently supported."); @@ -347,14 +307,16 @@ class CFunctionInfoImpl : public CFunctionInfo { const CTypeInfo& ReturnInfo() const override { return return_info_; } unsigned int ArgumentCount() const override { return arg_count_; } const CTypeInfo& ArgumentInfo(unsigned int index) const override { - CHECK_LT(index, ArgumentCount()); + if (index >= ArgumentCount()) { + return CTypeInfo::Invalid(); + } return arg_info_[index]; } private: - CTypeInfo return_info_; + const CTypeInfo return_info_; const unsigned int arg_count_; - CTypeInfo arg_info_[sizeof...(Args)]; + const CTypeInfo arg_info_[sizeof...(Args)]; }; } // namespace internal @@ -379,15 +341,25 @@ class V8_EXPORT CFunction { return ArgUnwrap::Make(func); } + template + static CFunction MakeWithErrorSupport(F* func) { + return ArgUnwrap::MakeWithErrorSupport(func); + } + + template + static CFunction Make(F* func, const CFunctionInfo* type_info) { + return CFunction(reinterpret_cast(func), type_info); + } + private: const void* address_; const CFunctionInfo* type_info_; CFunction(const void* address, const CFunctionInfo* type_info); - template + template static CFunctionInfo* GetCFunctionInfo() { - static internal::CFunctionInfoImpl instance; + static internal::CFunctionInfoImpl instance; return &instance; } @@ -402,7 +374,11 @@ class V8_EXPORT CFunction { public: static CFunction Make(R (*func)(Args...)) { return CFunction(reinterpret_cast(func), - GetCFunctionInfo()); + GetCFunctionInfo()); + } + static CFunction MakeWithErrorSupport(R (*func)(Args...)) { + return CFunction(reinterpret_cast(func), + GetCFunctionInfo()); } }; }; diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h index 127a77dbfca070..fcc36595ba57b0 100644 --- a/deps/v8/include/v8-internal.h +++ b/deps/v8/include/v8-internal.h @@ -178,14 +178,8 @@ class Internals { // IsolateData layout guarantees. static const int kIsolateEmbedderDataOffset = 0; - static const int kExternalMemoryOffset = - kNumIsolateDataSlots * kApiSystemPointerSize; - static const int kExternalMemoryLimitOffset = - kExternalMemoryOffset + kApiInt64Size; - static const int kExternalMemoryLowSinceMarkCompactOffset = - kExternalMemoryLimitOffset + kApiInt64Size; static const int kIsolateFastCCallCallerFpOffset = - kExternalMemoryLowSinceMarkCompactOffset + kApiInt64Size; + kNumIsolateDataSlots * kApiSystemPointerSize; static const int kIsolateFastCCallCallerPcOffset = kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize; static const int kIsolateStackGuardOffset = diff --git a/deps/v8/include/v8-metrics.h b/deps/v8/include/v8-metrics.h new file mode 100644 index 00000000000000..a511c01e5afe41 --- /dev/null +++ b/deps/v8/include/v8-metrics.h @@ -0,0 +1,133 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_METRICS_H_ +#define V8_METRICS_H_ + +#include "v8.h" // NOLINT(build/include_directory) + +namespace v8 { +namespace metrics { + +struct WasmModuleDecoded { + bool async = false; + bool streamed = false; + bool success = false; + size_t module_size_in_bytes = 0; + size_t function_count = 0; + int64_t wall_clock_time_in_us = 0; +}; + +struct WasmModuleCompiled { + bool async = false; + bool streamed = false; + bool cached = false; + bool deserialized = false; + bool lazy = false; + bool success = false; + size_t code_size_in_bytes = 0; + size_t liftoff_bailout_count = 0; + int64_t wall_clock_time_in_us = 0; +}; + +struct WasmModuleInstantiated { + bool async = false; + bool success = false; + size_t imported_function_count = 0; + int64_t wall_clock_time_in_us = 0; +}; + +struct WasmModuleTieredUp { + bool lazy = false; + size_t code_size_in_bytes = 0; + int64_t wall_clock_time_in_us = 0; +}; + +struct WasmModulesPerIsolate { + size_t count = 0; +}; + +#define V8_MAIN_THREAD_METRICS_EVENTS(V) \ + V(WasmModuleDecoded) \ + V(WasmModuleCompiled) \ + V(WasmModuleInstantiated) \ + V(WasmModuleTieredUp) + +#define V8_THREAD_SAFE_METRICS_EVENTS(V) V(WasmModulesPerIsolate) + +/** + * This class serves as a base class for recording event-based metrics in V8. + * There a two kinds of metrics, those which are expected to be thread-safe and + * whose implementation is required to fulfill this requirement and those whose + * implementation does not have that requirement and only needs to be + * executable on the main thread. If such an event is triggered from a + * background thread, it will be delayed and executed by the foreground task + * runner. + * + * The thread-safe events are listed in the V8_THREAD_SAFE_METRICS_EVENTS + * macro above while the main thread event are listed in + * V8_MAIN_THREAD_METRICS_EVENTS above. For the former, a virtual method + * AddMainThreadEvent(const E& event, v8::Context::Token token) will be + * generated and for the latter AddThreadSafeEvent(const E& event). + * + * Thread-safe events are not allowed to access the context and therefore do + * not carry a context ID with them. These IDs can be generated using + * Recorder::GetContextId() and the ID will be valid throughout the lifetime + * of the isolate. It is not guaranteed that the ID will still resolve to + * a valid context using Recorder::GetContext() at the time the metric is + * recorded. In this case, an empty handle will be returned. + * + * The embedder is expected to call v8::Isolate::SetMetricsRecorder() + * providing its implementation and have the virtual methods overwritten + * for the events it cares about. + */ +class V8_EXPORT Recorder { + public: + // A unique identifier for a context in this Isolate. + // It is guaranteed to not be reused throughout the lifetime of the Isolate. + class ContextId { + public: + ContextId() : id_(kEmptyId) {} + + bool IsEmpty() const { return id_ == kEmptyId; } + static const ContextId Empty() { return ContextId{kEmptyId}; } + + bool operator==(const ContextId& other) const { return id_ == other.id_; } + bool operator!=(const ContextId& other) const { return id_ != other.id_; } + + private: + friend class ::v8::Context; + friend class ::v8::internal::Isolate; + + explicit ContextId(uintptr_t id) : id_(id) {} + + static constexpr uintptr_t kEmptyId = 0; + uintptr_t id_; + }; + + virtual ~Recorder() = default; + +#define ADD_MAIN_THREAD_EVENT(E) \ + virtual void AddMainThreadEvent(const E& event, ContextId context_id) {} + V8_MAIN_THREAD_METRICS_EVENTS(ADD_MAIN_THREAD_EVENT) +#undef ADD_MAIN_THREAD_EVENT + +#define ADD_THREAD_SAFE_EVENT(E) \ + virtual void AddThreadSafeEvent(const E& event) {} + V8_THREAD_SAFE_METRICS_EVENTS(ADD_THREAD_SAFE_EVENT) +#undef ADD_THREAD_SAFE_EVENT + + virtual void NotifyIsolateDisposal() {} + + // Return the context with the given id or an empty handle if the context + // was already garbage collected. + static MaybeLocal GetContext(Isolate* isolate, ContextId id); + // Return the unique id corresponding to the given context. + static ContextId GetContextId(Local context); +}; + +} // namespace metrics +} // namespace v8 + +#endif // V8_METRICS_H_ diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h index 7cfd18b5708d57..aae381b080617f 100644 --- a/deps/v8/include/v8-platform.h +++ b/deps/v8/include/v8-platform.h @@ -80,6 +80,14 @@ class TaskRunner { * implementation takes ownership of |task|. The |task| cannot be nested * within other task executions. * + * Tasks which shouldn't be interleaved with JS execution must be posted with + * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the + * embedder may process tasks in a callback which is called during JS + * execution. + * + * In particular, tasks which execute JS must be non-nestable, since JS + * execution is not allowed to nest. + * * Requires that |TaskRunner::NonNestableTasksEnabled()| is true. */ virtual void PostNonNestableTask(std::unique_ptr task) {} @@ -98,6 +106,14 @@ class TaskRunner { * implementation takes ownership of |task|. The |task| cannot be nested * within other task executions. * + * Tasks which shouldn't be interleaved with JS execution must be posted with + * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the + * embedder may process tasks in a callback which is called during JS + * execution. + * + * In particular, tasks which execute JS must be non-nestable, since JS + * execution is not allowed to nest. + * * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true. */ virtual void PostNonNestableDelayedTask(std::unique_ptr task, @@ -154,6 +170,14 @@ class JobDelegate { * details. */ virtual void NotifyConcurrencyIncrease() = 0; + + /** + * Returns a task_id unique among threads currently running this job, such + * that GetTaskId() < worker count. To achieve this, the same task_id may be + * reused by a different thread after a worker_task returns. + * TODO(etiennep): Make pure virtual once custom embedders implement it. + */ + virtual uint8_t GetTaskId() { return 0; } }; /** @@ -186,6 +210,12 @@ class JobHandle { */ virtual void Cancel() = 0; + /** + * Returns true if there's no work pending and no worker running. + * TODO(etiennep): Make pure virtual once custom embedders implement it. + */ + virtual bool IsCompleted() { return true; } + /** * Returns true if associated with a Job and other methods may be called. * Returns false after Join() or Cancel() was called. @@ -209,6 +239,17 @@ class JobTask { * must not call back any JobHandle methods. */ virtual size_t GetMaxConcurrency() const = 0; + + /* + * Meant to replace the version above, given the number of threads currently + * assigned to this job and executing Run(). This is useful when the result + * must include local work items not visible globaly by other workers. + * TODO(etiennep): Replace the version above by this once custom embedders are + * migrated. + */ + size_t GetMaxConcurrency(size_t worker_count) const { + return GetMaxConcurrency(); + } }; /** @@ -375,6 +416,69 @@ class PageAllocator { * memory area brings the memory transparently back. */ virtual bool DiscardSystemPages(void* address, size_t size) { return true; } + + /** + * INTERNAL ONLY: This interface has not been stabilised and may change + * without notice from one release to another without being deprecated first. + */ + class SharedMemoryMapping { + public: + // Implementations are expected to free the shared memory mapping in the + // destructor. + virtual ~SharedMemoryMapping() = default; + virtual void* GetMemory() const = 0; + }; + + /** + * INTERNAL ONLY: This interface has not been stabilised and may change + * without notice from one release to another without being deprecated first. + */ + class SharedMemory { + public: + // Implementations are expected to free the shared memory in the destructor. + virtual ~SharedMemory() = default; + virtual std::unique_ptr RemapTo( + void* new_address) const = 0; + virtual void* GetMemory() const = 0; + virtual size_t GetSize() const = 0; + }; + + /** + * INTERNAL ONLY: This interface has not been stabilised and may change + * without notice from one release to another without being deprecated first. + * + * Reserve pages at a fixed address returning whether the reservation is + * possible. The reserved memory is detached from the PageAllocator and so + * should not be freed by it. It's intended for use with + * SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory. + */ + virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) { + return false; + } + + /** + * INTERNAL ONLY: This interface has not been stabilised and may change + * without notice from one release to another without being deprecated first. + * + * Allocates shared memory pages. Not all PageAllocators need support this and + * so this method need not be overridden. + * Allocates a new read-only shared memory region of size |length| and copies + * the memory at |original_address| into it. + */ + virtual std::unique_ptr AllocateSharedPages( + size_t length, const void* original_address) { + return {}; + } + + /** + * INTERNAL ONLY: This interface has not been stabilised and may change + * without notice from one release to another without being deprecated first. + * + * If not overridden and changed to return true, V8 will not attempt to call + * AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages + * and RemapSharedPages must also be overridden. + */ + virtual bool CanAllocateSharedPages() { return false; } }; /** @@ -519,15 +623,12 @@ class Platform { * libplatform looks like: * std::unique_ptr PostJob( * TaskPriority priority, std::unique_ptr job_task) override { - * return std::make_unique( - * std::make_shared( - * this, std::move(job_task), kNumThreads)); + * return v8::platform::NewDefaultJobHandle( + * this, priority, std::move(job_task), NumberOfWorkerThreads()); * } */ virtual std::unique_ptr PostJob( - TaskPriority priority, std::unique_ptr job_task) { - return nullptr; - } + TaskPriority priority, std::unique_ptr job_task) = 0; /** * Monotonically increasing time in seconds from an arbitrary fixed point in diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h index cee7990e4bc193..5d4e5bc8920a77 100644 --- a/deps/v8/include/v8-version.h +++ b/deps/v8/include/v8-version.h @@ -9,9 +9,9 @@ // NOTE these macros are used by some of the tool scripts and the build // system so their names cannot be changed without changing the scripts. #define V8_MAJOR_VERSION 8 -#define V8_MINOR_VERSION 4 -#define V8_BUILD_NUMBER 371 -#define V8_PATCH_LEVEL 19 +#define V8_MINOR_VERSION 6 +#define V8_BUILD_NUMBER 395 +#define V8_PATCH_LEVEL 16 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h index 43420e60d90a15..0991d0ab6e4132 100644 --- a/deps/v8/include/v8.h +++ b/deps/v8/include/v8.h @@ -45,6 +45,7 @@ class BigInt; class BigIntObject; class Boolean; class BooleanObject; +class CFunction; class Context; class Data; class Date; @@ -127,7 +128,6 @@ template class Arguments; template class CustomArguments; -class DeferredHandles; class FunctionCallbackArguments; class GlobalHandles; class Heap; @@ -149,6 +149,10 @@ class StreamingDecoder; } // namespace internal +namespace metrics { +class Recorder; +} // namespace metrics + namespace debug { class ConsoleCallArguments; } // namespace debug @@ -900,6 +904,23 @@ class TracedReferenceBase { const_cast&>(*this)); } + protected: + /** + * Update this reference in a thread-safe way + */ + void SetSlotThreadSafe(T* new_val) { + reinterpret_cast*>(&val_)->store(new_val, + std::memory_order_relaxed); + } + + /** + * Get this reference in a thread-safe way + */ + const T* GetSlotThreadSafe() const { + return reinterpret_cast const*>(&val_)->load( + std::memory_order_relaxed); + } + private: enum DestructionMode { kWithDestructor, kWithoutDestructor }; @@ -1152,6 +1173,14 @@ class TracedReference : public TracedReferenceBase { return reinterpret_cast&>( const_cast&>(*this)); } + + /** + * Returns true if this TracedReference is empty, i.e., has not been + * assigned an object. This version of IsEmpty is thread-safe. + */ + bool IsEmptyThreadSafe() const { + return this->GetSlotThreadSafe() == nullptr; + } }; /** @@ -1545,6 +1574,23 @@ class V8_EXPORT Module : public Data { */ Local GetUnboundModuleScript(); + /** + * Returns the underlying script's id. + * + * The module must be a SourceTextModule and must not have a kErrored status. + */ + int ScriptId(); + + /** + * Returns whether the module is a SourceTextModule. + */ + bool IsSourceTextModule() const; + + /** + * Returns whether the module is a SyntheticModule. + */ + bool IsSyntheticModule() const; + /* * Callback defined in the embedder. This is responsible for setting * the module's exported values with calls to SetSyntheticModuleExport(). @@ -3618,11 +3664,11 @@ enum PropertyFilter { /** * Options for marking whether callbacks may trigger JS-observable side effects. - * Side-effect-free callbacks are whitelisted during debug evaluation with + * Side-effect-free callbacks are allowlisted during debug evaluation with * throwOnSideEffect. It applies when calling a Function, FunctionTemplate, * or an Accessor callback. For Interceptors, please see * PropertyHandlerFlags's kHasNoSideEffect. - * Callbacks that only cause side effects to the receiver are whitelisted if + * Callbacks that only cause side effects to the receiver are allowlisted if * invoked on receiver objects that are created within the same debug-evaluate * call, as these objects are temporary and the side effect does not escape. */ @@ -5350,9 +5396,10 @@ class V8_EXPORT TypedArray : public ArrayBufferView { /* * The largest typed array size that can be constructed using New. */ - static constexpr size_t kMaxLength = internal::kApiSystemPointerSize == 4 - ? internal::kSmiMaxValue - : 0xFFFFFFFF; + static constexpr size_t kMaxLength = + internal::kApiSystemPointerSize == 4 + ? internal::kSmiMaxValue + : static_cast(uint64_t{1} << 32); /** * Number of elements in this typed array @@ -5958,13 +6005,14 @@ class V8_EXPORT External : public Value { static void CheckCast(v8::Value* obj); }; -#define V8_INTRINSICS_LIST(F) \ - F(ArrayProto_entries, array_entries_iterator) \ - F(ArrayProto_forEach, array_for_each_iterator) \ - F(ArrayProto_keys, array_keys_iterator) \ - F(ArrayProto_values, array_values_iterator) \ - F(ErrorPrototype, initial_error_prototype) \ - F(IteratorPrototype, initial_iterator_prototype) \ +#define V8_INTRINSICS_LIST(F) \ + F(ArrayProto_entries, array_entries_iterator) \ + F(ArrayProto_forEach, array_for_each_iterator) \ + F(ArrayProto_keys, array_keys_iterator) \ + F(ArrayProto_values, array_values_iterator) \ + F(AsyncIteratorPrototype, initial_async_iterator_prototype) \ + F(ErrorPrototype, initial_error_prototype) \ + F(IteratorPrototype, initial_iterator_prototype) \ F(ObjProto_valueOf, object_value_of_function) enum Intrinsic { @@ -6311,7 +6359,6 @@ typedef bool (*AccessCheckCallback)(Local accessing_context, Local accessed_object, Local data); -class CFunction; /** * A FunctionTemplate is used to create functions at runtime. There * can only be one function created from a FunctionTemplate in a @@ -7223,8 +7270,7 @@ typedef MaybeLocal (*HostImportModuleDynamicallyCallback)( /** * HostInitializeImportMetaObjectCallback is called the first time import.meta - * is accessed for a module. Subsequent access will reuse the same value. The - * callback must not throw. + * is accessed for a module. Subsequent access will reuse the same value. * * The method combines two implementation-defined abstract operations into one: * HostGetImportMetaProperties and HostFinalizeImportMeta. @@ -8069,10 +8115,11 @@ enum class MeasureMemoryMode { kSummary, kDetailed }; /** * Controls how promptly a memory measurement request is executed. * By default the measurement is folded with the next scheduled GC which may - * happen after a while. The kEager starts increment GC right away and - * is useful for testing. + * happen after a while and is forced after some timeout. + * The kEager mode starts incremental GC right away and is useful for testing. + * The kLazy mode does not force GC. */ -enum class MeasureMemoryExecution { kDefault, kEager }; +enum class MeasureMemoryExecution { kDefault, kEager, kLazy }; /** * The delegate is used in Isolate::MeasureMemory API. @@ -8309,7 +8356,7 @@ class V8_EXPORT Isolate { /** * This scope allows terminations inside direct V8 API calls and forbid them - * inside any recursice API calls without explicit SafeForTerminationScope. + * inside any recursive API calls without explicit SafeForTerminationScope. */ class V8_EXPORT SafeForTerminationScope { public: @@ -8392,8 +8439,8 @@ class V8_EXPORT Isolate { kFunctionTokenOffsetTooLongForToString = 49, kWasmSharedMemory = 50, kWasmThreadOpcodes = 51, - kAtomicsNotify = 52, - kAtomicsWake = 53, + kAtomicsNotify = 52, // Unused. + kAtomicsWake = 53, // Unused. kCollator = 54, kNumberFormat = 55, kDateTimeFormat = 56, @@ -8448,6 +8495,9 @@ class V8_EXPORT Isolate { kInvalidatedTypedArraySpeciesLookupChainProtector = 105, kWasmSimdOpcodes = 106, kVarRedeclaredCatchBinding = 107, + kWasmRefTypes = 108, + kWasmBulkMemory = 109, + kWasmMultiValue = 110, // If you add new values here, you'll also need to update Chromium's: // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to @@ -8544,7 +8594,7 @@ class V8_EXPORT Isolate { HostImportModuleDynamicallyCallback callback); /** - * This specifies the callback called by the upcoming importa.meta + * This specifies the callback called by the upcoming import.meta * language feature to retrieve host-defined meta data for a module. */ void SetHostInitializeImportMetaObjectCallback( @@ -8728,8 +8778,7 @@ class V8_EXPORT Isolate { * kept alive by JavaScript objects. * \returns the adjusted value. */ - V8_INLINE int64_t - AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes); + int64_t AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes); /** * Returns the number of phantom handles without callbacks that were reset @@ -8979,6 +9028,13 @@ class V8_EXPORT Isolate { */ void RequestInterrupt(InterruptCallback callback, void* data); + /** + * Returns true if there is ongoing background work within V8 that will + * eventually post a foreground task, like asynchronous WebAssembly + * compilation. + */ + bool HasPendingBackgroundTasks(); + /** * Request garbage collection in this Isolate. It is only valid to call this * function if --expose_gc was specified. @@ -9115,6 +9171,18 @@ class V8_EXPORT Isolate { void SetCreateHistogramFunction(CreateHistogramCallback); void SetAddHistogramSampleFunction(AddHistogramSampleCallback); + /** + * Enables the host application to provide a mechanism for recording + * event based metrics. In order to use this interface + * include/v8-metrics.h + * needs to be included and the recorder needs to be derived from the + * Recorder base class defined there. + * This method can only be called once per isolate and must happen during + * isolate initialization before background threads are spawned. + */ + void SetMetricsRecorder( + const std::shared_ptr& metrics_recorder); + /** * Enables the host application to provide a mechanism for recording a * predefined set of data as crash keys to be used in postmortem debugging in @@ -9500,12 +9568,16 @@ class V8_EXPORT StartupData { * Only valid for StartupData returned by SnapshotCreator::CreateBlob(). */ bool CanBeRehashed() const; + /** + * Allows embedders to verify whether the data is valid for the current + * V8 instance. + */ + bool IsValid() const; const char* data; int raw_size; }; - /** * EntropySource is used as a callback function when v8 needs a source * of entropy. @@ -10064,8 +10136,6 @@ class V8_EXPORT TryCatch { /** * Returns the exception caught by this try/catch block. If no exception has * been caught an empty handle is returned. - * - * The returned handle is valid until this TryCatch block has been destroyed. */ Local Exception() const; @@ -10087,9 +10157,6 @@ class V8_EXPORT TryCatch { /** * Returns the message associated with this exception. If there is * no message associated an empty handle is returned. - * - * The returned handle is valid until this TryCatch block has been - * destroyed. */ Local Message() const; @@ -10805,8 +10872,15 @@ V8_INLINE void PersistentBase::SetWeak( P* parameter, typename WeakCallbackInfo

::Callback callback, WeakCallbackType type) { typedef typename WeakCallbackInfo::Callback Callback; +#if (__GNUC__ >= 8) && !defined(__clang__) +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wcast-function-type" +#endif V8::MakeWeak(reinterpret_cast(this->val_), parameter, reinterpret_cast(callback), type); +#if (__GNUC__ >= 8) && !defined(__clang__) +#pragma GCC diagnostic pop +#endif } template @@ -10887,7 +10961,7 @@ template void TracedReferenceBase::Reset() { if (IsEmpty()) return; V8::DisposeTracedGlobal(reinterpret_cast(val_)); - val_ = nullptr; + SetSlotThreadSafe(nullptr); } template @@ -10943,10 +11017,11 @@ template template void TracedReference::Reset(Isolate* isolate, const Local& other) { static_assert(std::is_base_of::value, "type check"); - Reset(); + this->Reset(); if (other.IsEmpty()) return; - this->val_ = this->New(isolate, other.val_, &this->val_, - TracedReferenceBase::kWithoutDestructor); + this->SetSlotThreadSafe( + this->New(isolate, other.val_, &this->val_, + TracedReferenceBase::kWithoutDestructor)); } template @@ -11932,37 +12007,6 @@ MaybeLocal Isolate::GetDataFromSnapshotOnce(size_t index) { return Local(data); } -int64_t Isolate::AdjustAmountOfExternalAllocatedMemory( - int64_t change_in_bytes) { - typedef internal::Internals I; - int64_t* external_memory = reinterpret_cast( - reinterpret_cast(this) + I::kExternalMemoryOffset); - int64_t* external_memory_limit = reinterpret_cast( - reinterpret_cast(this) + I::kExternalMemoryLimitOffset); - int64_t* external_memory_low_since_mc = - reinterpret_cast(reinterpret_cast(this) + - I::kExternalMemoryLowSinceMarkCompactOffset); - - // Embedders are weird: we see both over- and underflows here. Perform the - // addition with unsigned types to avoid undefined behavior. - const int64_t amount = - static_cast(static_cast(change_in_bytes) + - static_cast(*external_memory)); - *external_memory = amount; - - if (amount < *external_memory_low_since_mc) { - *external_memory_low_since_mc = amount; - *external_memory_limit = amount + I::kExternalAllocationSoftLimit; - } - - if (change_in_bytes <= 0) return *external_memory; - - if (amount > *external_memory_limit) { - ReportExternalAllocationLimitReached(); - } - return *external_memory; -} - Local Context::GetEmbedderData(int index) { #ifndef V8_ENABLE_CHECKS typedef internal::Address A; diff --git a/deps/v8/include/v8config.h b/deps/v8/include/v8config.h index 9825232d6a10ac..bbd1d6ce978e6c 100644 --- a/deps/v8/include/v8config.h +++ b/deps/v8/include/v8config.h @@ -433,6 +433,16 @@ #define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */ #endif +// Helper macro to define no_sanitize attributes only with clang. +#if defined(__clang__) && defined(__has_attribute) +#if __has_attribute(no_sanitize) +#define V8_CLANG_NO_SANITIZE(what) __attribute__((no_sanitize(what))) +#endif +#endif +#if !defined(V8_CLANG_NO_SANITIZE) +#define V8_CLANG_NO_SANITIZE(what) +#endif + #if defined(BUILDING_V8_SHARED) && defined(USING_V8_SHARED) #error Inconsistent build configuration: To build the V8 shared library \ set BUILDING_V8_SHARED, to include its headers for linking against the \ diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl index d4abcf89eb15e9..e385083aef8fcd 100644 --- a/deps/v8/infra/mb/mb_config.pyl +++ b/deps/v8/infra/mb/mb_config.pyl @@ -94,9 +94,11 @@ 'V8 Fuchsia - builder': 'release_x64_fuchsia', 'V8 Fuchsia - debug builder': 'debug_x64_fuchsia', 'V8 Linux64 - cfi': 'release_x64_cfi', + 'V8 Linux64 - reverse jsargs': 'debug_x64_reverse_jsargs', 'V8 Linux64 UBSan': 'release_x64_ubsan', 'V8 Linux - vtunejit': 'debug_x86_vtunejit', 'V8 Linux64 - gcov coverage': 'release_x64_gcc_coverage', + 'V8 Linux64 - Fuzzilli': 'release_x64_fuzzilli', 'V8 Linux - predictable': 'release_x86_predictable', 'V8 Linux - full debug': 'full_debug_x86', 'V8 Mac64 - full debug': 'full_debug_x64', @@ -154,7 +156,7 @@ # Arm64. 'V8 Android Arm64 - builder': 'release_android_arm64', 'V8 Android Arm64 - debug builder': 'debug_android_arm64', - 'V8 Arm64 - builder': 'release_arm64', + 'V8 Arm64 - builder': 'release_arm64_hard_float', 'V8 Linux - arm64 - sim': 'release_simulate_arm64', 'V8 Linux - arm64 - sim - debug': 'debug_simulate_arm64', 'V8 Linux - arm64 - sim - gc stress': 'debug_simulate_arm64', @@ -211,13 +213,12 @@ 'v8_linux_dbg_ng': 'debug_x86_trybot', 'v8_linux_noi18n_compile_dbg': 'debug_x86_no_i18n', 'v8_linux_noi18n_rel_ng': 'release_x86_no_i18n_trybot', - 'v8_linux_gc_stress_dbg': 'debug_x86_trybot', + 'v8_linux_gc_stress_dbg_ng': 'debug_x86_trybot', 'v8_linux_gcc_compile_rel': 'release_x86_gcc_minimal_symbols', - 'v8_linux_gcc_rel': 'release_x86_gcc_minimal_symbols', + 'v8_linux_gcc_rel_ng': 'release_x86_gcc_minimal_symbols', 'v8_linux_shared_compile_rel': 'release_x86_shared_verify_heap', 'v8_linux64_arm64_pointer_compression_rel_ng': 'release_simulate_arm64_pointer_compression', - 'v8_linux64_compile_rel_xg': 'release_x64_test_features_trybot', 'v8_linux64_dbg_ng': 'debug_x64_trybot', 'v8_linux64_gc_stress_custom_snapshot_dbg_ng': 'debug_x64_trybot_custom', 'v8_linux64_gcc_compile_dbg': 'debug_x64_gcc', @@ -227,43 +228,46 @@ 'v8_linux64_perfetto_dbg_ng': 'debug_x64_perfetto', 'v8_linux64_pointer_compression_rel_ng': 'release_x64_pointer_compression', 'v8_linux64_rel_ng': 'release_x64_test_features_trybot', + 'v8_linux64_reverse_jsargs_dbg_ng': 'debug_x64_reverse_jsargs', 'v8_linux64_shared_compile_rel': 'release_x64_shared_verify_heap', 'v8_linux64_verify_csa_rel_ng': 'release_x64_verify_csa', 'v8_linux64_asan_rel_ng': 'release_x64_asan_minimal_symbols', 'v8_linux64_cfi_rel_ng': 'release_x64_cfi', - 'v8_linux64_msan_rel': 'release_simulate_arm64_msan_minimal_symbols', - 'v8_linux64_sanitizer_coverage_rel': - 'release_x64_asan_minimal_symbols_coverage', - 'v8_linux64_tsan_rel': 'release_x64_tsan_minimal_symbols', + 'v8_linux64_fuzzilli_ng': 'release_x64_fuzzilli', + 'v8_linux64_msan_rel_ng': 'release_simulate_arm64_msan_minimal_symbols', + 'v8_linux64_tsan_rel_ng': 'release_x64_tsan_minimal_symbols', 'v8_linux64_tsan_isolates_rel_ng': 'release_x64_tsan_minimal_symbols', 'v8_linux64_ubsan_rel_ng': 'release_x64_ubsan_minimal_symbols', 'v8_odroid_arm_rel_ng': 'release_arm', 'v8_linux_torque_compare': 'torque_compare', # TODO(machenbach): Remove after switching to x64 on infra side. - 'v8_win_dbg': 'debug_x86_trybot', + 'v8_win_dbg_ng': 'debug_x86_trybot', 'v8_win_compile_dbg': 'debug_x86_trybot', 'v8_win_rel_ng': 'release_x86_trybot', 'v8_win64_asan_rel_ng': 'release_x64_asan_no_lsan', 'v8_win64_msvc_compile_rel': 'release_x64_msvc', - 'v8_win64_dbg': 'debug_x64_minimal_symbols', + 'v8_win64_dbg_ng': 'debug_x64_minimal_symbols', 'v8_win64_msvc_rel_ng': 'release_x64_msvc', 'v8_win64_rel_ng': 'release_x64_trybot', - 'v8_mac64_gc_stress_dbg': 'debug_x64_trybot', + 'v8_mac_arm64_rel_ng': 'release_arm64', + 'v8_mac_arm64_dbg_ng': 'debug_arm64', + 'v8_mac_arm64_full_dbg_ng': 'full_debug_arm64', + 'v8_mac64_gc_stress_dbg_ng': 'debug_x64_trybot', 'v8_mac64_rel_ng': 'release_x64_trybot', 'v8_mac64_dbg': 'debug_x64', 'v8_mac64_dbg_ng': 'debug_x64', 'v8_mac64_compile_full_dbg_ng': 'full_debug_x64', - 'v8_mac64_asan_rel': 'release_x64_asan_no_lsan', + 'v8_mac64_asan_rel_ng': 'release_x64_asan_no_lsan', 'v8_linux_arm_rel_ng': 'release_simulate_arm_trybot', 'v8_linux_arm_lite_rel_ng': 'release_simulate_arm_lite_trybot', - 'v8_linux_arm_dbg': 'debug_simulate_arm', + 'v8_linux_arm_dbg_ng': 'debug_simulate_arm', 'v8_linux_arm_armv8a_rel': 'release_simulate_arm_trybot', 'v8_linux_arm_armv8a_dbg': 'debug_simulate_arm', 'v8_linux_arm64_rel_ng': 'release_simulate_arm64_trybot', 'v8_linux_arm64_cfi_rel_ng' : 'release_simulate_arm64_cfi', - 'v8_linux_arm64_dbg': 'debug_simulate_arm64', - 'v8_linux_arm64_gc_stress_dbg': 'debug_simulate_arm64', + 'v8_linux_arm64_dbg_ng': 'debug_simulate_arm64', + 'v8_linux_arm64_gc_stress_dbg_ng': 'debug_simulate_arm64', 'v8_linux_mipsel_compile_rel': 'release_simulate_mipsel', 'v8_linux_mips64el_compile_rel': 'release_simulate_mips64el', }, @@ -399,11 +403,17 @@ 'debug_bot', 'arm64', 'android', 'minimal_symbols'], 'debug_arm': [ 'debug_bot', 'arm', 'hard_float'], + 'debug_arm64': [ + 'debug_bot', 'arm64'], + 'full_debug_arm64': [ + 'debug_bot', 'arm64', 'v8_full_debug'], # Release configs for arm. 'release_arm': [ 'release_bot', 'arm', 'hard_float'], 'release_arm64': [ + 'release_bot', 'arm64'], + 'release_arm64_hard_float': [ 'release_bot', 'arm64', 'hard_float'], 'release_android_arm': [ 'release_bot', 'arm', 'android', 'minimal_symbols', @@ -431,8 +441,6 @@ 'release_bot', 'x64', 'asan', 'lsan'], 'release_x64_asan_minimal_symbols': [ 'release_bot', 'x64', 'asan', 'lsan', 'minimal_symbols'], - 'release_x64_asan_minimal_symbols_coverage': [ - 'release_bot', 'x64', 'asan', 'bb', 'coverage', 'lsan', 'minimal_symbols'], 'release_x64_asan_no_lsan': [ 'release_bot', 'x64', 'asan'], 'release_x64_asan_no_lsan_verify_heap': [ @@ -449,6 +457,9 @@ 'release_bot', 'x64', 'cfi'], 'release_x64_cfi_clusterfuzz': [ 'release_bot', 'x64', 'cfi_clusterfuzz'], + 'release_x64_fuzzilli': [ + 'release_bot', 'x64', 'dcheck_always_on', 'v8_enable_slow_dchecks', 'v8_verify_heap', + 'v8_verify_csa', 'v8_enable_verify_predictable', 'fuzzilli'], 'release_x64_msvc': [ 'release_bot_no_goma', 'x64', 'minimal_symbols', 'msvc'], 'release_x64_correctness_fuzzer' : [ @@ -517,6 +528,8 @@ 'debug_bot', 'x64', 'perfetto'], 'debug_x64_trybot': [ 'debug_trybot', 'x64'], + 'debug_x64_reverse_jsargs': [ + 'debug_bot', 'x64', 'reverse_jsargs'], 'debug_x64_trybot_custom': [ 'debug_trybot', 'x64', 'v8_snapshot_custom'], 'full_debug_x64': [ @@ -593,10 +606,6 @@ 'gn_args': 'is_asan=true', }, - 'bb': { - 'gn_args': 'sanitizer_coverage_flags="bb,trace-pc-guard"', - }, - 'cfi': { 'mixins': ['v8_enable_test_features'], 'gn_args': ('is_cfi=true use_cfi_cast=true use_cfi_icall=true ' @@ -645,6 +654,11 @@ 'gn_args': 'target_os="fuchsia"', }, + 'fuzzilli': { + 'gn_args': 'v8_static_library=true v8_enable_v8_checks=true ' + 'sanitizer_coverage_flags="trace-pc-guard" v8_fuzzilli=true', + }, + 'gcc': { 'gn_args': 'is_clang=false', }, @@ -662,7 +676,7 @@ }, 'ios_simulator': { - 'gn_args': 'target_cpu="x64" target_os="ios"', + 'gn_args': 'target_cpu="x64" target_os="ios" ios_use_goma_rbe=true', }, 'lsan': { @@ -720,6 +734,10 @@ 'mixins': ['release_bot', 'minimal_symbols', 'dcheck_always_on'], }, + 'reverse_jsargs': { + 'gn_args': 'v8_enable_reverse_jsargs=true', + }, + 'official': { 'gn_args': 'is_official_build=true', }, diff --git a/deps/v8/infra/testing/PRESUBMIT.py b/deps/v8/infra/testing/PRESUBMIT.py index f1a64707b9cb1c..c701f02e56ecce 100644 --- a/deps/v8/infra/testing/PRESUBMIT.py +++ b/deps/v8/infra/testing/PRESUBMIT.py @@ -169,7 +169,7 @@ def error_msg(msg): def CheckChangeOnCommit(input_api, output_api): def file_filter(regexp): - return lambda f: input_api.FilterSourceFile(f, white_list=(regexp,)) + return lambda f: input_api.FilterSourceFile(f, files_to_check=(regexp,)) # Calculate which files are affected. if input_api.AffectedFiles(False, file_filter(r'.*PRESUBMIT\.py')): diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index 72f739487ccec3..d652fdbdbfde00 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -83,7 +83,7 @@ {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, ], }, - 'v8_linux_gc_stress_dbg': { + 'v8_linux_gc_stress_dbg_ng_triggered': { 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', }, @@ -92,7 +92,7 @@ {'name': 'd8testing', 'test_args': ['--gc-stress'], 'shards': 5}, ], }, - 'v8_linux_gcc_rel': { + 'v8_linux_gcc_rel_ng_triggered': { 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', }, @@ -214,7 +214,7 @@ }, ############################################################################## # Linux32 with arm simulators - 'v8_linux_arm_dbg': { + 'v8_linux_arm_dbg_ng_triggered': { 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', }, @@ -293,6 +293,22 @@ {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, ], }, + 'v8_linux64_fuzzilli_ng_triggered': { + 'swarming_dimensions' : { + 'os': 'Ubuntu-16.04', + }, + # TODO(almuthanna): Add a new test config for the fuzzilli suite. + 'tests': [], + }, + 'v8_linux64_reverse_jsargs_dbg_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64-avx2', + 'os': 'Ubuntu-16.04', + }, + 'tests': [ + {'name': 'v8testing', 'shards': 3}, + ], + }, 'v8_linux64_gc_stress_custom_snapshot_dbg_ng_triggered': { 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', @@ -314,14 +330,17 @@ {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2}, # Native context independent code. {'name': 'v8testing', 'variant': 'nci'}, + {'name': 'v8testing', 'variant': 'nci_as_highest_tier'}, # Stress sampling. {'name': 'mjsunit', 'variant': 'stress_sampling'}, {'name': 'webkit', 'variant': 'stress_sampling'}, # Stress snapshot. {'name': 'mjsunit', 'variant': 'stress_snapshot'}, + # Experimental regexp engine. + {'name': 'mjsunit', 'variant': 'experimental_regexp'}, ], }, - 'v8_linux64_msan_rel': { + 'v8_linux64_msan_rel_ng_triggered': { 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', }, @@ -391,27 +410,7 @@ {'name': 'v8testing', 'variant': 'stress_instruction_scheduling'}, ], }, - # TODO(machenbach): Experimental builder with incomplete configs. Should be - # similar to v8_linux64_rel_ng_triggered after testing. - 'v8_linux64_rel_xg': { - 'swarming_dimensions' : { - 'cpu': 'x86-64-avx2', - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'v8initializers'}, - {'name': 'v8testing', 'shards': 2}, - ], - }, - 'v8_linux64_sanitizer_coverage_rel': { - 'swarming_dimensions' : { - 'os': 'Ubuntu-16.04', - }, - 'tests': [ - {'name': 'v8testing', 'shards': 3}, - ], - }, - 'v8_linux64_tsan_rel': { + 'v8_linux64_tsan_rel_ng_triggered': { 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', }, @@ -450,7 +449,7 @@ }, ############################################################################## # Linux64 with arm64 simulators - 'v8_linux_arm64_dbg': { + 'v8_linux_arm64_dbg_ng_triggered': { 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', }, @@ -463,7 +462,7 @@ {'name': 'v8testing', 'variant': 'trusted', 'shards': 5}, ], }, - 'v8_linux_arm64_gc_stress_dbg': { + 'v8_linux_arm64_gc_stress_dbg_ng_triggered': { 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', }, @@ -521,7 +520,7 @@ }, ############################################################################## # Win32 - 'v8_win_dbg': { + 'v8_win_dbg_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Windows-7-SP1', @@ -552,7 +551,7 @@ {'name': 'v8testing', 'shards': 5}, ], }, - 'v8_win64_dbg': { + 'v8_win64_dbg_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Windows-10-15063', @@ -589,7 +588,7 @@ }, ############################################################################## # Mac64 - 'v8_mac64_asan_rel': { + 'v8_mac64_asan_rel_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.13', @@ -610,7 +609,7 @@ {'name': 'v8testing', 'variant': 'extra', 'shards': 2}, ], }, - 'v8_mac64_gc_stress_dbg': { + 'v8_mac64_gc_stress_dbg_ng_triggered': { 'swarming_dimensions' : { 'cpu': 'x86-64', 'os': 'Mac-10.13', @@ -631,6 +630,36 @@ {'name': 'v8testing', 'variant': 'extra'}, ], }, + 'v8_mac_arm64_rel_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Mac-10.13', + }, + 'tests': [ + {'name': 'v8testing'}, + {'name': 'test262'}, + ], + }, + 'v8_mac_arm64_dbg_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Mac-10.13', + }, + 'tests': [ + {'name': 'v8testing'}, + {'name': 'test262'}, + ], + }, + 'v8_mac_arm64_full_dbg_ng_triggered': { + 'swarming_dimensions' : { + 'cpu': 'x86-64', + 'os': 'Mac-10.13', + }, + 'tests': [ + {'name': 'v8testing'}, + {'name': 'test262'}, + ], + }, ############################################################################## ### luci.v8.ci ############################################################################## @@ -964,11 +993,14 @@ {'name': 'v8testing', 'variant': 'infra_staging', 'shards': 2}, # Native context independent code. {'name': 'v8testing', 'variant': 'nci'}, + {'name': 'v8testing', 'variant': 'nci_as_highest_tier'}, # Stress sampling. {'name': 'mjsunit', 'variant': 'stress_sampling'}, {'name': 'webkit', 'variant': 'stress_sampling'}, # Stress snapshot. {'name': 'mjsunit', 'variant': 'stress_snapshot'}, + # Experimental regexp engine. + {'name': 'mjsunit', 'variant': 'experimental_regexp'}, ], }, 'V8 Linux64 - debug - perfetto': { @@ -993,11 +1025,14 @@ {'name': 'v8testing', 'variant': 'infra_staging'}, # Native context independent code. {'name': 'v8testing', 'variant': 'nci'}, + {'name': 'v8testing', 'variant': 'nci_as_highest_tier'}, # Stress sampling. {'name': 'mjsunit', 'variant': 'stress_sampling'}, {'name': 'webkit', 'variant': 'stress_sampling'}, # Stress snapshot. {'name': 'mjsunit', 'variant': 'stress_snapshot'}, + # Experimental regexp engine. + {'name': 'mjsunit', 'variant': 'experimental_regexp'}, ], }, 'V8 Linux64 - gcov coverage': { @@ -1024,6 +1059,15 @@ {'name': 'v8testing', 'shards': 2}, ], }, + 'V8 Linux64 - reverse jsargs': { + 'swarming_dimensions' : { + 'cpu': 'x86-64-avx2', + 'os': 'Ubuntu-16.04', + }, + 'tests': [ + {'name': 'v8testing', 'shards': 3}, + ], + }, 'V8 Linux64 - shared': { 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', @@ -1078,7 +1122,7 @@ {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, ], }, - 'V8 Linux64 TSAN - concurrent marking': { + 'V8 Linux64 TSAN - stress-incremental-marking': { 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', }, @@ -1105,7 +1149,7 @@ { 'name': 'v8testing', 'test_args': ['--extra-flags=--stress-incremental-marking'], - 'shards': 4, + 'shards': 6, }, ], }, diff --git a/deps/v8/samples/cppgc/cppgc-for-v8-embedders.cc b/deps/v8/samples/cppgc/cppgc-for-v8-embedders.cc new file mode 100644 index 00000000000000..8aaa9cd39ce654 --- /dev/null +++ b/deps/v8/samples/cppgc/cppgc-for-v8-embedders.cc @@ -0,0 +1,106 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/** + * This sample program shows how to set up a stand-alone cppgc heap as an + * embedder of V8. Most importantly, this example shows how to reuse V8's + * platform for cppgc. + */ + +/** + * Platform used by cppgc. Can just redirect to v8::Platform for most calls. + * Exception: GetForegroundTaskRunner(), see below. + * + * This example uses V8's default platform implementation to drive the cppgc + * platform. + */ +class Platform final : public cppgc::Platform { + public: + Platform() : v8_platform_(v8::platform::NewDefaultPlatform()) {} + + cppgc::PageAllocator* GetPageAllocator() final { + return v8_platform_->GetPageAllocator(); + } + + double MonotonicallyIncreasingTime() final { + return v8_platform_->MonotonicallyIncreasingTime(); + } + + std::shared_ptr GetForegroundTaskRunner() final { + // V8's default platform creates a new task runner when passed the + // v8::Isolate pointer the first time. For non-default platforms this will + // require getting the appropriate task runner. + return v8_platform_->GetForegroundTaskRunner(nullptr); + } + + std::unique_ptr PostJob( + cppgc::TaskPriority priority, + std::unique_ptr job_task) final { + return v8_platform_->PostJob(priority, std::move(job_task)); + } + + private: + std::unique_ptr v8_platform_; +}; + +/** + * Simple string rope to illustrate allocation and garbage collection below. The + * rope keeps the next parts alive via regular managed reference. + */ +class Rope final : public cppgc::GarbageCollected { + public: + explicit Rope(std::string part, Rope* next = nullptr) + : part_(part), next_(next) {} + + void Trace(cppgc::Visitor* visitor) const { visitor->Trace(next_); } + + private: + std::string part_; + cppgc::Member next_; + + friend std::ostream& operator<<(std::ostream& os, const Rope& rope); +}; + +std::ostream& operator<<(std::ostream& os, const Rope& rope) { + os << rope.part_; + if (rope.next_) { + os << *rope.next_; + } + return os; +} + +int main(int argc, char* argv[]) { + // Create a platform that is used by cppgc::Heap for execution and backend + // allocation. + auto cppgc_platform = std::make_shared(); + // Initialize the process. This must happen before any cppgc::Heap::Create() + // calls. + cppgc::InitializeProcess(cppgc_platform->GetPageAllocator()); + // Create a managed heap. + std::unique_ptr heap = cppgc::Heap::Create(cppgc_platform); + // Allocate a string rope on the managed heap. + auto* greeting = cppgc::MakeGarbageCollected( + heap->GetAllocationHandle(), "Hello ", + cppgc::MakeGarbageCollected(heap->GetAllocationHandle(), "World!")); + // Manually trigger garbage collection. The object greeting is held alive + // through conservative stack scanning. + heap->ForceGarbageCollectionSlow("V8 embedders example", "Testing"); + std::cout << *greeting << std::endl; + // Gracefully shutdown the process. + cppgc::ShutdownProcess(); + return 0; +} diff --git a/deps/v8/samples/cppgc/cppgc-standalone.cc b/deps/v8/samples/cppgc/cppgc-standalone.cc new file mode 100644 index 00000000000000..f8cb4020c3a53d --- /dev/null +++ b/deps/v8/samples/cppgc/cppgc-standalone.cc @@ -0,0 +1,64 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/** + * This sample program shows how to set up a stand-alone cppgc heap. + */ + +/** + * Simple string rope to illustrate allocation and garbage collection below. + * The rope keeps the next parts alive via regular managed reference. + */ +class Rope final : public cppgc::GarbageCollected { + public: + explicit Rope(std::string part, Rope* next = nullptr) + : part_(part), next_(next) {} + + void Trace(cppgc::Visitor* visitor) const { visitor->Trace(next_); } + + private: + std::string part_; + cppgc::Member next_; + + friend std::ostream& operator<<(std::ostream& os, const Rope& rope) { + os << rope.part_; + if (rope.next_) { + os << *rope.next_; + } + return os; + } +}; + +int main(int argc, char* argv[]) { + // Create a default platform that is used by cppgc::Heap for execution and + // backend allocation. + auto cppgc_platform = std::make_shared(); + // Initialize the process. This must happen before any + // cppgc::Heap::Create() calls. + cppgc::InitializeProcess(cppgc_platform->GetPageAllocator()); + // Create a managed heap. + std::unique_ptr heap = cppgc::Heap::Create(cppgc_platform); + // Allocate a string rope on the managed heap. + auto* greeting = cppgc::MakeGarbageCollected( + heap->GetAllocationHandle(), "Hello ", + cppgc::MakeGarbageCollected(heap->GetAllocationHandle(), "World!")); + // Manually trigger garbage collection. The object greeting is held alive + // through conservative stack scanning. + heap->ForceGarbageCollectionSlow("CppGC stand-alone example", "Testing"); + std::cout << *greeting << std::endl; + // Gracefully shutdown the process. + cppgc::ShutdownProcess(); + return 0; +} diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc index aed050ceccd283..70450296c76389 100644 --- a/deps/v8/samples/shell.cc +++ b/deps/v8/samples/shell.cc @@ -108,21 +108,15 @@ v8::Local CreateShellContext(v8::Isolate* isolate) { // Create a template for the global object. v8::Local global = v8::ObjectTemplate::New(isolate); // Bind the global 'print' function to the C++ Print callback. - global->Set(v8::String::NewFromUtf8Literal(isolate, "print"), - v8::FunctionTemplate::New(isolate, Print)); + global->Set(isolate, "print", v8::FunctionTemplate::New(isolate, Print)); // Bind the global 'read' function to the C++ Read callback. - global->Set(v8::String::NewFromUtf8Literal(isolate, "read"), - v8::FunctionTemplate::New(isolate, Read)); + global->Set(isolate, "read", v8::FunctionTemplate::New(isolate, Read)); // Bind the global 'load' function to the C++ Load callback. - global->Set(v8::String::NewFromUtf8Literal(isolate, "load"), - v8::FunctionTemplate::New(isolate, Load)); + global->Set(isolate, "load", v8::FunctionTemplate::New(isolate, Load)); // Bind the 'quit' function - global->Set(v8::String::NewFromUtf8Literal(isolate, "quit"), - v8::FunctionTemplate::New(isolate, Quit)); + global->Set(isolate, "quit", v8::FunctionTemplate::New(isolate, Quit)); // Bind the 'version' function - global->Set(v8::String::NewFromUtf8Literal(isolate, "version"), - v8::FunctionTemplate::New(isolate, Version)); - + global->Set(isolate, "version", v8::FunctionTemplate::New(isolate, Version)); return v8::Context::New(isolate, NULL, global); } diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS index abea95558da2c2..6b4b6661bd3690 100644 --- a/deps/v8/src/DEPS +++ b/deps/v8/src/DEPS @@ -18,13 +18,13 @@ include_rules = [ "+src/heap/heap-inl.h", "+src/heap/heap-write-barrier-inl.h", "+src/heap/heap-write-barrier.h", + "+src/heap/local-factory-inl.h", + "+src/heap/local-factory.h", "+src/heap/local-heap.h", + "+src/heap/local-heap-inl.h", # TODO(v8:10496): Don't expose memory chunk outside of heap/. "+src/heap/memory-chunk.h", "+src/heap/memory-chunk-inl.h", - "+src/heap/off-thread-factory-inl.h", - "+src/heap/off-thread-factory.h", - "+src/heap/off-thread-heap.h", "+src/heap/read-only-heap-inl.h", "+src/heap/read-only-heap.h", "+src/heap/safepoint.h", diff --git a/deps/v8/src/api/api-natives.cc b/deps/v8/src/api/api-natives.cc index 410c37ce98d754..e21dbd0eeedbb3 100644 --- a/deps/v8/src/api/api-natives.cc +++ b/deps/v8/src/api/api-natives.cc @@ -371,7 +371,7 @@ MaybeHandle InstantiateObject(Isolate* isolate, Handle new_target, bool is_prototype) { Handle constructor; - int serial_number = Smi::ToInt(info->serial_number()); + int serial_number = info->serial_number(); if (!new_target.is_null()) { if (IsSimpleInstantiation(isolate, *info, *new_target)) { constructor = Handle::cast(new_target); @@ -462,7 +462,7 @@ MaybeHandle GetInstancePrototype(Isolate* isolate, MaybeHandle InstantiateFunction( Isolate* isolate, Handle native_context, Handle data, MaybeHandle maybe_name) { - int serial_number = Smi::ToInt(data->serial_number()); + int serial_number = data->serial_number(); if (serial_number) { Handle result; if (ProbeInstantiationsCache(isolate, native_context, serial_number, diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc index fe0ca064833999..9f0f6673d65781 100644 --- a/deps/v8/src/api/api.cc +++ b/deps/v8/src/api/api.cc @@ -11,11 +11,11 @@ #include // For move #include -#include "src/api/api-inl.h" - +#include "include/v8-cppgc.h" #include "include/v8-fast-api-calls.h" #include "include/v8-profiler.h" #include "include/v8-util.h" +#include "src/api/api-inl.h" #include "src/api/api-natives.h" #include "src/base/functional.h" #include "src/base/logging.h" @@ -49,6 +49,7 @@ #include "src/execution/v8threads.h" #include "src/execution/vm-state-inl.h" #include "src/handles/global-handles.h" +#include "src/handles/persistent-handles.h" #include "src/heap/embedder-tracing.h" #include "src/heap/heap-inl.h" #include "src/init/bootstrapper.h" @@ -58,6 +59,8 @@ #include "src/json/json-parser.h" #include "src/json/json-stringifier.h" #include "src/logging/counters.h" +#include "src/logging/metrics.h" +#include "src/logging/tracing-flags.h" #include "src/numbers/conversions-inl.h" #include "src/objects/api-callbacks.h" #include "src/objects/contexts.h" @@ -107,6 +110,7 @@ #include "src/utils/detachable-vector.h" #include "src/utils/version.h" #include "src/wasm/streaming-decoder.h" +#include "src/wasm/value-type.h" #include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-objects-inl.h" #include "src/wasm/wasm-result.h" @@ -503,7 +507,11 @@ void Utils::ReportOOMFailure(i::Isolate* isolate, const char* location, if (fatal_callback == nullptr) { base::OS::PrintError("\n#\n# Fatal %s OOM in %s\n#\n\n", is_heap_oom ? "javascript" : "process", location); +#ifdef V8_FUZZILLI + exit(0); +#else base::OS::Abort(); +#endif // V8_FUZZILLI } else { fatal_callback(location, is_heap_oom @@ -823,6 +831,8 @@ bool StartupData::CanBeRehashed() const { return i::Snapshot::ExtractRehashability(this); } +bool StartupData::IsValid() const { return i::Snapshot::VersionIsValid(this); } + void V8::SetDcheckErrorHandler(DcheckErrorCallback that) { v8::base::SetDcheckFunction(that); } @@ -837,7 +847,9 @@ void V8::SetFlagsFromString(const char* str, size_t length) { } void V8::SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags) { - i::FlagList::SetFlagsFromCommandLine(argc, argv, remove_flags); + using HelpOptions = i::FlagList::HelpOptions; + i::FlagList::SetFlagsFromCommandLine(argc, argv, remove_flags, + HelpOptions(HelpOptions::kDontExit)); } RegisteredExtension* RegisteredExtension::first_extension_ = nullptr; @@ -979,6 +991,42 @@ i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj, return result.location(); } +// static +i::Address* i::JSMemberBase::New(v8::Isolate* isolate, i::Address* object_slot, + i::Address** this_slot) { + i::Isolate* i_isolate = reinterpret_cast(isolate); + LOG_API(i_isolate, JSMemberBase, New); +#ifdef DEBUG + Utils::ApiCheck((object_slot != nullptr), "i::JSMemberBase::New", + "the object must be not null"); +#endif + i::Handle result = i_isolate->global_handles()->CreateTraced( + *object_slot, reinterpret_cast(this_slot), + false /* no destructor */); +#ifdef VERIFY_HEAP + if (i::FLAG_verify_heap) { + i::Object(*object_slot).ObjectVerify(i_isolate); + } +#endif // VERIFY_HEAP + return result.location(); +} + +// static +void i::JSMemberBase::Delete(i::Address* object) { + i::GlobalHandles::DestroyTraced(object); +} + +// static +void i::JSMemberBase::Copy(const i::Address* const* from_slot, + i::Address** to_slot) { + i::GlobalHandles::CopyTracedGlobal(from_slot, to_slot); +} + +// static +void i::JSMemberBase::Move(i::Address** from_slot, i::Address** to_slot) { + i::GlobalHandles::MoveTracedGlobal(from_slot, to_slot); +} + i::Address* V8::CopyGlobalReference(i::Address* from) { i::Handle result = i::GlobalHandles::CopyGlobal(from); return result.location(); @@ -1274,7 +1322,7 @@ void Context::SetAlignedPointerInEmbedderData(int index, void* value) { static void InitializeTemplate(i::Handle that, int type) { that->set_number_of_properties(0); - that->set_tag(i::Smi::FromInt(type)); + that->set_tag(type); } void Template::Set(v8::Local name, v8::Local value, @@ -1286,7 +1334,7 @@ void Template::Set(v8::Local name, v8::Local value, auto value_obj = Utils::OpenHandle(*value); CHECK(!value_obj->IsJSReceiver() || value_obj->IsTemplateInfo()); if (value_obj->IsObjectTemplateInfo()) { - templ->set_serial_number(i::Smi::zero()); + templ->set_serial_number(0); if (templ->IsFunctionTemplateInfo()) { i::Handle::cast(templ)->set_do_not_cache(true); } @@ -1336,7 +1384,7 @@ Local FunctionTemplate::PrototypeTemplate() { auto self = Utils::OpenHandle(this); i::Isolate* i_isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - i::Handle result(self->GetPrototypeTemplate(), i_isolate); + i::Handle result(self->GetPrototypeTemplate(), i_isolate); if (result->IsUndefined(i_isolate)) { // Do not cache prototype objects. result = Utils::OpenHandle( @@ -1351,7 +1399,8 @@ void FunctionTemplate::SetPrototypeProviderTemplate( auto self = Utils::OpenHandle(this); i::Isolate* i_isolate = self->GetIsolate(); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - i::Handle result = Utils::OpenHandle(*prototype_provider); + i::Handle result = + Utils::OpenHandle(*prototype_provider); CHECK(self->GetPrototypeTemplate().IsUndefined(i_isolate)); CHECK(self->GetParentTemplate().IsUndefined(i_isolate)); i::FunctionTemplateInfo::SetPrototypeProviderTemplate(i_isolate, self, @@ -1394,7 +1443,7 @@ static Local FunctionTemplateNew( if (!do_not_cache) { next_serial_number = isolate->heap()->GetNextTemplateSerialNumber(); } - obj->set_serial_number(i::Smi::FromInt(next_serial_number)); + obj->set_serial_number(next_serial_number); } if (callback != nullptr) { Utils::ToLocal(obj)->SetCallHandler(callback, data, side_effect_type, @@ -1605,15 +1654,19 @@ static Local ObjectTemplateNew( i::OBJECT_TEMPLATE_INFO_TYPE, i::AllocationType::kOld); i::Handle obj = i::Handle::cast(struct_obj); - InitializeTemplate(obj, Consts::OBJECT_TEMPLATE); - int next_serial_number = 0; - if (!do_not_cache) { - next_serial_number = isolate->heap()->GetNextTemplateSerialNumber(); + { + // Disallow GC until all fields of obj have acceptable types. + i::DisallowHeapAllocation no_gc; + InitializeTemplate(obj, Consts::OBJECT_TEMPLATE); + int next_serial_number = 0; + if (!do_not_cache) { + next_serial_number = isolate->heap()->GetNextTemplateSerialNumber(); + } + obj->set_serial_number(next_serial_number); + obj->set_data(0); } - obj->set_serial_number(i::Smi::FromInt(next_serial_number)); if (!constructor.IsEmpty()) obj->set_constructor(*Utils::OpenHandle(*constructor)); - obj->set_data(i::Smi::zero()); return Utils::ToLocal(obj); } @@ -2234,6 +2287,28 @@ Local Module::GetUnboundModuleScript() { self->GetIsolate())); } +int Module::ScriptId() { + i::Handle self = Utils::OpenHandle(this); + Utils::ApiCheck(self->IsSourceTextModule(), "v8::Module::ScriptId", + "v8::Module::ScriptId must be used on an SourceTextModule"); + + // The SharedFunctionInfo is not available for errored modules. + Utils::ApiCheck(GetStatus() != kErrored, "v8::Module::ScriptId", + "v8::Module::ScriptId must not be used on an errored module"); + i::Handle sfi( + i::Handle::cast(self)->GetSharedFunctionInfo(), + self->GetIsolate()); + return ToApiHandle(sfi)->GetId(); +} + +bool Module::IsSourceTextModule() const { + return Utils::OpenHandle(this)->IsSourceTextModule(); +} + +bool Module::IsSyntheticModule() const { + return Utils::OpenHandle(this)->IsSyntheticModule(); +} + int Module::GetIdentityHash() const { return Utils::OpenHandle(this)->hash(); } Maybe Module::InstantiateModule(Local context, @@ -5037,7 +5112,7 @@ struct OneByteMask<4> { }; template <> struct OneByteMask<8> { - static const uint64_t value = V8_2PART_UINT64_C(0xFF00FF00, FF00FF00); + static const uint64_t value = 0xFF00'FF00'FF00'FF00; }; static const uintptr_t kOneByteMask = OneByteMask::value; static const uintptr_t kAlignmentMask = sizeof(uintptr_t) - 1; @@ -5804,9 +5879,9 @@ static i::Handle CreateEnvironment( v8::Local proxy_template; i::Handle proxy_constructor; i::Handle global_constructor; - i::Handle named_interceptor( + i::Handle named_interceptor( isolate->factory()->undefined_value()); - i::Handle indexed_interceptor( + i::Handle indexed_interceptor( isolate->factory()->undefined_value()); if (!maybe_global_template.IsEmpty()) { @@ -6078,6 +6153,20 @@ void Context::SetContinuationPreservedEmbedderData(Local data) { *i::Handle::cast(Utils::OpenHandle(*data))); } +MaybeLocal metrics::Recorder::GetContext( + Isolate* isolate, metrics::Recorder::ContextId id) { + i::Isolate* i_isolate = reinterpret_cast(isolate); + return i_isolate->GetContextFromRecorderContextId(id); +} + +metrics::Recorder::ContextId metrics::Recorder::GetContextId( + Local context) { + i::Handle i_context = Utils::OpenHandle(*context); + i::Isolate* isolate = i_context->GetIsolate(); + return isolate->GetOrRegisterRecorderContextId( + handle(i_context->native_context(), isolate)); +} + namespace { i::Address* GetSerializedDataFromFixedArray(i::Isolate* isolate, i::FixedArray list, size_t index) { @@ -8146,6 +8235,11 @@ void Isolate::RequestInterrupt(InterruptCallback callback, void* data) { isolate->RequestInterrupt(callback, data); } +bool Isolate::HasPendingBackgroundTasks() { + i::Isolate* isolate = reinterpret_cast(this); + return isolate->wasm_engine()->HasRunningCompileJob(isolate); +} + void Isolate::RequestGarbageCollectionForTesting(GarbageCollectionType type) { CHECK(i::FLAG_expose_gc); if (type == kMinorGarbageCollection) { @@ -8413,13 +8507,13 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) { heap_statistics->total_global_handles_size_ = heap->TotalGlobalHandlesSize(); heap_statistics->used_global_handles_size_ = heap->UsedGlobalHandlesSize(); -#ifndef V8_SHARED_RO_HEAP - i::ReadOnlySpace* ro_space = heap->read_only_space(); - heap_statistics->total_heap_size_ += ro_space->CommittedMemory(); - heap_statistics->total_physical_size_ += ro_space->CommittedPhysicalMemory(); - heap_statistics->total_available_size_ += ro_space->Available(); - heap_statistics->used_heap_size_ += ro_space->SizeOfObjects(); -#endif // V8_SHARED_RO_HEAP + if (!i::ReadOnlyHeap::IsReadOnlySpaceShared()) { + i::ReadOnlySpace* ro_space = heap->read_only_space(); + heap_statistics->total_heap_size_ += ro_space->CommittedMemory(); + heap_statistics->total_physical_size_ += + ro_space->CommittedPhysicalMemory(); + heap_statistics->used_heap_size_ += ro_space->Size(); + } heap_statistics->total_heap_size_executable_ = heap->CommittedMemoryExecutable(); @@ -8429,7 +8523,8 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) { // now we just add the values, thereby over-approximating the peak slightly. heap_statistics->malloced_memory_ = isolate->allocator()->GetCurrentMemoryUsage() + - isolate->wasm_engine()->allocator()->GetCurrentMemoryUsage(); + isolate->wasm_engine()->allocator()->GetCurrentMemoryUsage() + + isolate->string_table()->GetCurrentMemoryUsage(); heap_statistics->external_memory_ = isolate->heap()->backing_store_bytes(); heap_statistics->peak_malloced_memory_ = isolate->allocator()->GetMaxMemoryUsage() + @@ -8452,18 +8547,26 @@ bool Isolate::GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics, i::Isolate* isolate = reinterpret_cast(this); i::Heap* heap = isolate->heap(); - i::Space* space = heap->space(static_cast(index)); i::AllocationSpace allocation_space = static_cast(index); - space_statistics->space_name_ = i::Heap::GetSpaceName(allocation_space); - - if (allocation_space == i::RO_SPACE && V8_SHARED_RO_HEAP_BOOL) { - // RO_SPACE memory is accounted for elsewhere when ReadOnlyHeap is shared. - space_statistics->space_size_ = 0; - space_statistics->space_used_size_ = 0; - space_statistics->space_available_size_ = 0; - space_statistics->physical_space_size_ = 0; + space_statistics->space_name_ = i::BaseSpace::GetSpaceName(allocation_space); + + if (allocation_space == i::RO_SPACE) { + if (i::ReadOnlyHeap::IsReadOnlySpaceShared()) { + // RO_SPACE memory is accounted for elsewhere when ReadOnlyHeap is shared. + space_statistics->space_size_ = 0; + space_statistics->space_used_size_ = 0; + space_statistics->space_available_size_ = 0; + space_statistics->physical_space_size_ = 0; + } else { + i::ReadOnlySpace* space = heap->read_only_space(); + space_statistics->space_size_ = space->CommittedMemory(); + space_statistics->space_used_size_ = space->Size(); + space_statistics->space_available_size_ = 0; + space_statistics->physical_space_size_ = space->CommittedPhysicalMemory(); + } } else { + i::Space* space = heap->space(static_cast(index)); space_statistics->space_size_ = space->CommittedMemory(); space_statistics->space_used_size_ = space->SizeOfObjects(); space_statistics->space_available_size_ = space->Available(); @@ -8562,6 +8665,19 @@ size_t Isolate::NumberOfPhantomHandleResetsSinceLastCall() { return isolate->global_handles()->GetAndResetGlobalHandleResetCount(); } +int64_t Isolate::AdjustAmountOfExternalAllocatedMemory( + int64_t change_in_bytes) { + i::Isolate* i_isolate = reinterpret_cast(this); + int64_t amount = i_isolate->heap()->update_external_memory(change_in_bytes); + + if (change_in_bytes <= 0) return amount; + + if (amount > i_isolate->heap()->external_memory_limit()) { + ReportExternalAllocationLimitReached(); + } + return amount; +} + void Isolate::SetEventLogger(LogEventCallback that) { // Do not overwrite the event logger if we want to log explicitly. if (i::FLAG_log_internal_timer_events) return; @@ -8705,6 +8821,12 @@ void Isolate::SetAddHistogramSampleFunction( ->SetAddHistogramSampleFunction(callback); } +void Isolate::SetMetricsRecorder( + const std::shared_ptr& metrics_recorder) { + i::Isolate* isolate = reinterpret_cast(this); + isolate->metrics_recorder()->SetRecorder(isolate, metrics_recorder); +} + void Isolate::SetAddCrashKeyCallback(AddCrashKeyCallback callback) { i::Isolate* isolate = reinterpret_cast(this); isolate->SetAddCrashKeyCallback(callback); @@ -8819,9 +8941,9 @@ UnwindState Isolate::GetUnwindState() { i::Isolate* isolate = reinterpret_cast(this); unwind_state.embedded_code_range.start = - reinterpret_cast(isolate->embedded_blob()); + reinterpret_cast(isolate->embedded_blob_code()); unwind_state.embedded_code_range.length_in_bytes = - isolate->embedded_blob_size(); + isolate->embedded_blob_code_size(); std::array, 3> entry_stubs = { {{i::Builtins::kJSEntry, &unwind_state.js_entry_stub}, @@ -9666,6 +9788,14 @@ v8::Platform* debug::GetCurrentPlatform() { return i::V8::GetCurrentPlatform(); } +void debug::ForceGarbageCollection( + v8::Isolate* isolate, + v8::EmbedderHeapTracer::EmbedderStackState embedder_stack_state) { + i::Heap* heap = reinterpret_cast(isolate)->heap(); + heap->SetEmbedderStackStateForNextFinalizaton(embedder_stack_state); + isolate->LowMemoryNotification(); +} + debug::WasmScript* debug::WasmScript::Cast(debug::Script* script) { CHECK(script->IsWasm()); return static_cast(script); @@ -10060,7 +10190,7 @@ void debug::GlobalLexicalScopeNames( i::Handle table( context->global_object().native_context().script_context_table(), isolate); - for (int i = 0; i < table->used(); i++) { + for (int i = 0; i < table->synchronized_used(); i++) { i::Handle context = i::ScriptContextTable::GetContext(isolate, table, i); DCHECK(context->IsScriptContext()); @@ -10307,9 +10437,11 @@ int debug::WasmValue::value_type() { v8::Local debug::WasmValue::bytes() { i::Handle obj = Utils::OpenHandle(this); - // Should only be called on i32, i64, f32, f64, s128. - DCHECK_GE(1, obj->value_type()); - DCHECK_LE(5, obj->value_type()); + DCHECK(i::wasm::ValueType::Kind::kI32 == obj->value_type() || + i::wasm::ValueType::Kind::kI64 == obj->value_type() || + i::wasm::ValueType::Kind::kF32 == obj->value_type() || + i::wasm::ValueType::Kind::kF64 == obj->value_type() || + i::wasm::ValueType::Kind::kS128 == obj->value_type()); i::Isolate* isolate = obj->GetIsolate(); i::Handle bytes_or_ref(obj->bytes_or_ref(), isolate); @@ -10331,8 +10463,7 @@ v8::Local debug::WasmValue::bytes() { v8::Local debug::WasmValue::ref() { i::Handle obj = Utils::OpenHandle(this); - // Should only be called on anyref. - DCHECK_EQ(6, obj->value_type()); + DCHECK_EQ(i::wasm::HeapType::kExtern, obj->value_type()); i::Isolate* isolate = obj->GetIsolate(); i::Handle bytes_or_ref(obj->bytes_or_ref(), isolate); @@ -11049,32 +11180,41 @@ char* HandleScopeImplementer::Iterate(RootVisitor* v, char* storage) { return storage + ArchiveSpacePerThread(); } -std::unique_ptr HandleScopeImplementer::Detach( +std::unique_ptr HandleScopeImplementer::DetachPersistent( Address* prev_limit) { - std::unique_ptr deferred( - new DeferredHandles(isolate()->handle_scope_data()->next, isolate())); + std::unique_ptr ph(new PersistentHandles(isolate())); + DCHECK_NOT_NULL(prev_limit); while (!blocks_.empty()) { Address* block_start = blocks_.back(); Address* block_limit = &block_start[kHandleBlockSize]; // We should not need to check for SealHandleScope here. Assert this. - DCHECK(prev_limit == block_limit || - !(block_start <= prev_limit && prev_limit <= block_limit)); + DCHECK_IMPLIES(block_start <= prev_limit && prev_limit <= block_limit, + prev_limit == block_limit); if (prev_limit == block_limit) break; - deferred->blocks_.push_back(blocks_.back()); + ph->blocks_.push_back(blocks_.back()); +#if DEBUG + ph->ordered_blocks_.insert(blocks_.back()); +#endif blocks_.pop_back(); } - // deferred->blocks_ now contains the blocks installed on the + // ph->blocks_ now contains the blocks installed on the // HandleScope stack since BeginDeferredScope was called, but in // reverse order. - DCHECK(prev_limit == nullptr || !blocks_.empty()); + // Switch first and last blocks, such that the last block is the one + // that is potentially half full. + DCHECK(!blocks_.empty() && !ph->blocks_.empty()); + std::swap(ph->blocks_.front(), ph->blocks_.back()); + + ph->block_next_ = isolate()->handle_scope_data()->next; + Address* block_start = ph->blocks_.back(); + ph->block_limit_ = block_start + kHandleBlockSize; - DCHECK(!blocks_.empty() && prev_limit != nullptr); DCHECK_NOT_NULL(last_handle_before_deferred_block_); last_handle_before_deferred_block_ = nullptr; - return deferred; + return ph; } void HandleScopeImplementer::BeginDeferredScope() { @@ -11082,40 +11222,6 @@ void HandleScopeImplementer::BeginDeferredScope() { last_handle_before_deferred_block_ = isolate()->handle_scope_data()->next; } -DeferredHandles::~DeferredHandles() { - isolate_->UnlinkDeferredHandles(this); - - for (size_t i = 0; i < blocks_.size(); i++) { -#ifdef ENABLE_HANDLE_ZAPPING - HandleScope::ZapRange(blocks_[i], &blocks_[i][kHandleBlockSize]); -#endif - isolate_->handle_scope_implementer()->ReturnBlock(blocks_[i]); - } -} - -void DeferredHandles::Iterate(RootVisitor* v) { - DCHECK(!blocks_.empty()); - - // Comparing pointers that do not point into the same array is undefined - // behavior, which means if we didn't cast everything to plain Address - // before comparing, the compiler would be allowed to assume that all - // comparisons evaluate to true and drop the entire check. - DCHECK((reinterpret_cast
(first_block_limit_) >= - reinterpret_cast
(blocks_.front())) && - (reinterpret_cast
(first_block_limit_) <= - reinterpret_cast
(&(blocks_.front())[kHandleBlockSize]))); - - v->VisitRootPointers(Root::kHandleScope, nullptr, - FullObjectSlot(blocks_.front()), - FullObjectSlot(first_block_limit_)); - - for (size_t i = 1; i < blocks_.size(); i++) { - v->VisitRootPointers(Root::kHandleScope, nullptr, - FullObjectSlot(blocks_[i]), - FullObjectSlot(&blocks_[i][kHandleBlockSize])); - } -} - void InvokeAccessorGetterCallback( v8::Local property, const v8::PropertyCallbackInfo& info, diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h index ad879657c99247..6c0ea8faea2ee7 100644 --- a/deps/v8/src/api/api.h +++ b/deps/v8/src/api/api.h @@ -302,30 +302,7 @@ inline bool ToLocal(v8::internal::MaybeHandle maybe, namespace internal { -class V8_EXPORT_PRIVATE DeferredHandles { - public: - ~DeferredHandles(); - - private: - DeferredHandles(Address* first_block_limit, Isolate* isolate) - : next_(nullptr), - previous_(nullptr), - first_block_limit_(first_block_limit), - isolate_(isolate) { - isolate->LinkDeferredHandles(this); - } - - void Iterate(RootVisitor* v); - - std::vector blocks_; - DeferredHandles* next_; - DeferredHandles* previous_; - Address* first_block_limit_; - Isolate* isolate_; - - friend class HandleScopeImplementer; - friend class Isolate; -}; +class PersistentHandles; // This class is here in order to be able to declare it a friend of // HandleScope. Moving these methods to be members of HandleScope would be @@ -431,7 +408,7 @@ class HandleScopeImplementer { } void BeginDeferredScope(); - std::unique_ptr Detach(Address* prev_limit); + std::unique_ptr DetachPersistent(Address* prev_limit); Isolate* isolate_; DetachableVector blocks_; @@ -455,9 +432,8 @@ class HandleScopeImplementer { char* RestoreThreadHelper(char* from); char* ArchiveThreadHelper(char* to); - friend class DeferredHandles; - friend class DeferredHandleScope; friend class HandleScopeImplementerOffsets; + friend class PersistentHandlesScope; DISALLOW_COPY_AND_ASSIGN(HandleScopeImplementer); }; diff --git a/deps/v8/src/asmjs/asm-js.cc b/deps/v8/src/asmjs/asm-js.cc index 17bf39c8538722..ce9f653ee3b162 100644 --- a/deps/v8/src/asmjs/asm-js.cc +++ b/deps/v8/src/asmjs/asm-js.cc @@ -187,8 +187,7 @@ class AsmJsCompilationJob final : public UnoptimizedCompilationJob { explicit AsmJsCompilationJob(ParseInfo* parse_info, FunctionLiteral* literal, AccountingAllocator* allocator) : UnoptimizedCompilationJob(parse_info->stack_limit(), parse_info, - &compilation_info_, - CanOffThreadFinalize::kNo), + &compilation_info_), allocator_(allocator), zone_(allocator, ZONE_NAME), compilation_info_(&zone_, parse_info, literal), @@ -202,8 +201,8 @@ class AsmJsCompilationJob final : public UnoptimizedCompilationJob { Status FinalizeJobImpl(Handle shared_info, Isolate* isolate) final; Status FinalizeJobImpl(Handle shared_info, - OffThreadIsolate* isolate) final { - UNREACHABLE(); + LocalIsolate* isolate) final { + return CompilationJob::RETRY_ON_MAIN_THREAD; } private: @@ -241,9 +240,9 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::ExecuteJobImpl() { } return FAILED; } - module_ = new (compile_zone) wasm::ZoneBuffer(compile_zone); + module_ = compile_zone->New(compile_zone); parser.module_builder()->WriteTo(module_); - asm_offsets_ = new (compile_zone) wasm::ZoneBuffer(compile_zone); + asm_offsets_ = compile_zone->New(compile_zone); parser.module_builder()->WriteAsmJsOffsetTable(asm_offsets_); stdlib_uses_ = *parser.stdlib_uses(); @@ -278,8 +277,8 @@ UnoptimizedCompilationJob::Status AsmJsCompilationJob::FinalizeJobImpl( RecordHistograms(isolate); ReportCompilationSuccess(handle(Script::cast(shared_info->script()), isolate), - compilation_info()->literal()->position(), - compile_time_, module_->size()); + shared_info->StartPosition(), compile_time_, + module_->size()); return SUCCEEDED; } diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc index 652fe83a3cb890..559c6b12d0b618 100644 --- a/deps/v8/src/asmjs/asm-parser.cc +++ b/deps/v8/src/asmjs/asm-parser.cc @@ -74,22 +74,9 @@ AsmJsParser::AsmJsParser(Zone* zone, uintptr_t stack_limit, Utf16CharacterStream* stream) : zone_(zone), scanner_(stream), - module_builder_(new (zone) WasmModuleBuilder(zone)), - return_type_(nullptr), + module_builder_(zone->New(zone)), stack_limit_(stack_limit), - global_var_info_(zone), - local_var_info_(zone), - failed_(false), - failure_location_(kNoSourcePosition), - stdlib_name_(kTokenNone), - foreign_name_(kTokenNone), - heap_name_(kTokenNone), - inside_heap_assignment_(false), - heap_access_type_(nullptr), block_stack_(zone), - call_coercion_(nullptr), - call_coercion_deferred_(nullptr), - pending_label_(0), global_imports_(zone) { module_builder_->SetMinMemorySize(0); InitializeStdlibTypes(); @@ -211,24 +198,21 @@ class AsmJsParser::TemporaryVariableScope { wasm::AsmJsParser::VarInfo* AsmJsParser::GetVarInfo( AsmJsScanner::token_t token) { - if (AsmJsScanner::IsGlobal(token)) { - size_t old = global_var_info_.size(); - size_t index = AsmJsScanner::GlobalIndex(token); - size_t sz = std::max(old, index + 1); - if (sz != old) { - global_var_info_.resize(sz); - } - return &global_var_info_[index]; - } else if (AsmJsScanner::IsLocal(token)) { - size_t old = local_var_info_.size(); - size_t index = AsmJsScanner::LocalIndex(token); - size_t sz = std::max(old, index + 1); - if (sz != old) { - local_var_info_.resize(sz); - } - return &local_var_info_[index]; - } - UNREACHABLE(); + const bool is_global = AsmJsScanner::IsGlobal(token); + DCHECK(is_global || AsmJsScanner::IsLocal(token)); + Vector& var_info = is_global ? global_var_info_ : local_var_info_; + size_t old_capacity = var_info.size(); + size_t index = is_global ? AsmJsScanner::GlobalIndex(token) + : AsmJsScanner::LocalIndex(token); + if (is_global && index + 1 > num_globals_) num_globals_ = index + 1; + if (index + 1 > old_capacity) { + size_t new_size = std::max(2 * old_capacity, index + 1); + Vector new_info{zone_->NewArray(new_size), new_size}; + std::uninitialized_fill(new_info.begin(), new_info.end(), VarInfo{}); + std::copy(var_info.begin(), var_info.end(), new_info.begin()); + var_info = new_info; + } + return &var_info[index]; } uint32_t AsmJsParser::VarIndex(VarInfo* info) { @@ -250,10 +234,10 @@ void AsmJsParser::AddGlobalImport(Vector name, AsmType* type, void AsmJsParser::DeclareGlobal(VarInfo* info, bool mutable_variable, AsmType* type, ValueType vtype, - const WasmInitExpr& init) { + WasmInitExpr init) { info->kind = VarKind::kGlobal; info->type = type; - info->index = module_builder_->AddGlobal(vtype, true, init); + info->index = module_builder_->AddGlobal(vtype, true, std::move(init)); info->mutable_variable = mutable_variable; } @@ -365,7 +349,7 @@ void AsmJsParser::ValidateModule() { EXPECT_TOKEN('}'); // Check that all functions were eventually defined. - for (auto& info : global_var_info_) { + for (auto& info : global_var_info_.SubVector(0, num_globals_)) { if (info.kind == VarKind::kFunction && !info.function_defined) { FAIL("Undefined function"); } @@ -564,8 +548,7 @@ void AsmJsParser::ValidateModuleVarImport(VarInfo* info, AddGlobalImport(name, AsmType::Int(), kWasmI32, mutable_variable, info); } else { info->kind = VarKind::kImportedFunction; - info->import = new (zone()->New(sizeof(FunctionImportInfo))) - FunctionImportInfo(name, zone()); + info->import = zone()->New(name, zone()); info->mutable_variable = false; } } @@ -838,7 +821,7 @@ void AsmJsParser::ValidateFunction() { } scanner_.ResetLocals(); - local_var_info_.clear(); + std::fill(local_var_info_.begin(), local_var_info_.end(), VarInfo{}); } // 6.4 ValidateFunction diff --git a/deps/v8/src/asmjs/asm-parser.h b/deps/v8/src/asmjs/asm-parser.h index 66b213abc8563d..cd39bcb686438b 100644 --- a/deps/v8/src/asmjs/asm-parser.h +++ b/deps/v8/src/asmjs/asm-parser.h @@ -168,11 +168,12 @@ class AsmJsParser { AsmJsScanner scanner_; WasmModuleBuilder* module_builder_; WasmFunctionBuilder* current_function_builder_; - AsmType* return_type_; + AsmType* return_type_ = nullptr; uintptr_t stack_limit_; StdlibSet stdlib_uses_; - ZoneVector global_var_info_; - ZoneVector local_var_info_; + Vector global_var_info_; + Vector local_var_info_; + size_t num_globals_ = 0; CachedVectors cached_valuetype_vectors_{zone_}; CachedVectors cached_asm_type_p_vectors_{zone_}; @@ -184,20 +185,20 @@ class AsmJsParser { int function_temp_locals_depth_; // Error Handling related - bool failed_; + bool failed_ = false; const char* failure_message_; - int failure_location_; + int failure_location_ = kNoSourcePosition; // Module Related. - AsmJsScanner::token_t stdlib_name_; - AsmJsScanner::token_t foreign_name_; - AsmJsScanner::token_t heap_name_; + AsmJsScanner::token_t stdlib_name_ = kTokenNone; + AsmJsScanner::token_t foreign_name_ = kTokenNone; + AsmJsScanner::token_t heap_name_ = kTokenNone; static const AsmJsScanner::token_t kTokenNone = 0; // Track if parsing a heap assignment. - bool inside_heap_assignment_; - AsmType* heap_access_type_; + bool inside_heap_assignment_ = false; + AsmType* heap_access_type_ = nullptr; ZoneVector block_stack_; @@ -214,7 +215,7 @@ class AsmJsParser { // When making calls, the return type is needed to lookup signatures. // For `+callsite(..)` or `fround(callsite(..))` use this value to pass // along the coercion. - AsmType* call_coercion_; + AsmType* call_coercion_ = nullptr; // The source position associated with the above {call_coercion}. size_t call_coercion_position_; @@ -222,7 +223,7 @@ class AsmJsParser { // When making calls, the coercion can also appear in the source stream // syntactically "behind" the call site. For `callsite(..)|0` use this // value to flag that such a coercion must happen. - AsmType* call_coercion_deferred_; + AsmType* call_coercion_deferred_ = nullptr; // The source position at which requesting a deferred coercion via the // aforementioned {call_coercion_deferred} is allowed. @@ -238,7 +239,7 @@ class AsmJsParser { // Used to track the last label we've seen so it can be matched to later // statements it's attached to. - AsmJsScanner::token_t pending_label_; + AsmJsScanner::token_t pending_label_ = kTokenNone; // Global imports. The list of imported variables that are copied during // module instantiation into a corresponding global variable. @@ -313,8 +314,7 @@ class AsmJsParser { VarInfo* GetVarInfo(AsmJsScanner::token_t token); uint32_t VarIndex(VarInfo* info); void DeclareGlobal(VarInfo* info, bool mutable_variable, AsmType* type, - ValueType vtype, - const WasmInitExpr& init = WasmInitExpr()); + ValueType vtype, WasmInitExpr init = WasmInitExpr()); void DeclareStdlibFunc(VarInfo* info, VarKind kind, AsmType* type); void AddGlobalImport(Vector name, AsmType* type, ValueType vtype, bool mutable_variable, VarInfo* info); diff --git a/deps/v8/src/asmjs/asm-scanner.cc b/deps/v8/src/asmjs/asm-scanner.cc index 73140867084c7a..3ac9ef2d6fe9a0 100644 --- a/deps/v8/src/asmjs/asm-scanner.cc +++ b/deps/v8/src/asmjs/asm-scanner.cc @@ -99,7 +99,7 @@ void AsmJsScanner::Next() { preceded_by_newline_ = true; break; - case kEndOfInput: + case kEndOfInputU: token_ = kEndOfInput; return; @@ -354,7 +354,7 @@ bool AsmJsScanner::ConsumeCComment() { if (ch == '\n') { preceded_by_newline_ = true; } - if (ch == kEndOfInput) { + if (ch == kEndOfInputU) { return false; } } @@ -367,7 +367,7 @@ void AsmJsScanner::ConsumeCPPComment() { preceded_by_newline_ = true; return; } - if (ch == kEndOfInput) { + if (ch == kEndOfInputU) { return; } } @@ -377,7 +377,7 @@ void AsmJsScanner::ConsumeString(uc32 quote) { // Only string allowed is 'use asm' / "use asm". const char* expected = "use asm"; for (; *expected != '\0'; ++expected) { - if (stream_->Advance() != *expected) { + if (stream_->Advance() != static_cast(*expected)) { token_ = kParseError; return; } diff --git a/deps/v8/src/asmjs/asm-scanner.h b/deps/v8/src/asmjs/asm-scanner.h index 076a7607e38624..9e7250ff2cd72f 100644 --- a/deps/v8/src/asmjs/asm-scanner.h +++ b/deps/v8/src/asmjs/asm-scanner.h @@ -135,6 +135,8 @@ class V8_EXPORT_PRIVATE AsmJsScanner { }; // clang-format on + static constexpr uc32 kEndOfInputU = static_cast(kEndOfInput); + private: Utf16CharacterStream* stream_; token_t token_; diff --git a/deps/v8/src/asmjs/asm-types.cc b/deps/v8/src/asmjs/asm-types.cc index 1fc12df2c93c5d..5ad53f237cd15d 100644 --- a/deps/v8/src/asmjs/asm-types.cc +++ b/deps/v8/src/asmjs/asm-types.cc @@ -172,7 +172,7 @@ class AsmFroundType final : public AsmCallableType { } // namespace AsmType* AsmType::FroundType(Zone* zone) { - auto* Fround = new (zone) AsmFroundType(); + auto* Fround = zone->New(); return reinterpret_cast(Fround); } @@ -195,6 +195,7 @@ namespace { class AsmMinMaxType final : public AsmCallableType { private: friend AsmType; + friend Zone; AsmMinMaxType(AsmType* dest, AsmType* src) : AsmCallableType(), return_type_(dest), arg_(src) {} @@ -231,7 +232,7 @@ class AsmMinMaxType final : public AsmCallableType { AsmType* AsmType::MinMaxType(Zone* zone, AsmType* dest, AsmType* src) { DCHECK_NOT_NULL(dest->AsValueType()); DCHECK_NOT_NULL(src->AsValueType()); - auto* MinMax = new (zone) AsmMinMaxType(dest, src); + auto* MinMax = zone->New(dest, src); return reinterpret_cast(MinMax); } diff --git a/deps/v8/src/asmjs/asm-types.h b/deps/v8/src/asmjs/asm-types.h index 8bb9e5d8e0b0c0..9f6390c1a9d8da 100644 --- a/deps/v8/src/asmjs/asm-types.h +++ b/deps/v8/src/asmjs/asm-types.h @@ -139,6 +139,7 @@ class V8_EXPORT_PRIVATE AsmFunctionType final : public AsmCallableType { private: friend AsmType; + friend Zone; std::string Name() override; bool IsA(AsmType* other) override; @@ -160,6 +161,7 @@ class V8_EXPORT_PRIVATE AsmOverloadedFunctionType final private: friend AsmType; + friend Zone; explicit AsmOverloadedFunctionType(Zone* zone) : overloads_(zone) {} @@ -196,14 +198,14 @@ class V8_EXPORT_PRIVATE AsmType { // A function returning ret. Callers still need to invoke AddArgument with the // returned type to fully create this type. static AsmType* Function(Zone* zone, AsmType* ret) { - AsmFunctionType* f = new (zone) AsmFunctionType(zone, ret); + AsmFunctionType* f = zone->New(zone, ret); return reinterpret_cast(f); } // Overloaded function types. Not creatable by asm source, but useful to // represent the overloaded stdlib functions. static AsmType* OverloadedFunction(Zone* zone) { - auto* f = new (zone) AsmOverloadedFunctionType(zone); + auto* f = zone->New(zone); return reinterpret_cast(f); } diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc index b583b5e4214ad4..8c9318bfe7475d 100644 --- a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc +++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc @@ -54,10 +54,10 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) { // Private fields have their key and value present in // instance_members_initializer_function, so they will // already have been visited. - if (prop->value()->IsFunctionLiteral()) { - Visit(prop->value()); - } else { + if (prop->kind() == ClassLiteralProperty::Kind::FIELD) { CheckVisited(prop->value()); + } else { + Visit(prop->value()); } } ZonePtrList* props = expr->public_members(); @@ -67,7 +67,8 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) { // Public fields with computed names have their key // and value present in instance_members_initializer_function, so they will // already have been visited. - if (prop->is_computed_name() && !prop->value()->IsFunctionLiteral()) { + if (prop->is_computed_name() && + prop->kind() == ClassLiteralProperty::Kind::FIELD) { if (!prop->key()->IsLiteral()) { CheckVisited(prop->key()); } diff --git a/deps/v8/src/ast/ast-value-factory.cc b/deps/v8/src/ast/ast-value-factory.cc index 23f28b834ac7ec..598096ba10d269 100644 --- a/deps/v8/src/ast/ast-value-factory.cc +++ b/deps/v8/src/ast/ast-value-factory.cc @@ -29,9 +29,8 @@ #include "src/base/logging.h" #include "src/common/globals.h" -#include "src/execution/off-thread-isolate.h" #include "src/heap/factory-inl.h" -#include "src/heap/off-thread-factory-inl.h" +#include "src/heap/local-factory-inl.h" #include "src/objects/objects-inl.h" #include "src/objects/objects.h" #include "src/strings/char-predicates-inl.h" @@ -46,8 +45,8 @@ namespace { // For using StringToIndex. class OneByteStringStream { public: - explicit OneByteStringStream(Vector lb) : - literal_bytes_(lb), pos_(0) {} + explicit OneByteStringStream(Vector lb) + : literal_bytes_(lb), pos_(0) {} bool HasMore() { return pos_ < literal_bytes_.length(); } uint16_t GetNext() { return literal_bytes_[pos_++]; } @@ -59,7 +58,8 @@ class OneByteStringStream { } // namespace -void AstRawString::Internalize(Isolate* isolate) { +template +void AstRawString::Internalize(LocalIsolate* isolate) { DCHECK(!has_string_); if (literal_bytes_.length() == 0) { set_string(isolate->factory()->empty_string()); @@ -73,27 +73,10 @@ void AstRawString::Internalize(Isolate* isolate) { } } -void AstRawString::Internalize(OffThreadIsolate* isolate) { - DCHECK(!has_string_); - if (literal_bytes_.length() == 0) { - set_string(isolate->factory()->empty_string()); - return; - } - - // For the off-thread case, we already de-duplicated the AstRawStrings during - // construction and don't have access to the main thread string table yet, so - // we just unconditionally create strings and will internalize them properly - // during merging. - Handle string; - if (is_one_byte()) { - string = isolate->factory()->NewOneByteInternalizedString( - Vector::cast(literal_bytes_), hash_field()); - } else { - string = isolate->factory()->NewTwoByteInternalizedString( - Vector::cast(literal_bytes_), hash_field()); - } - set_string(string); -} +template EXPORT_TEMPLATE_DEFINE( + V8_EXPORT_PRIVATE) void AstRawString::Internalize(Isolate* isolate); +template EXPORT_TEMPLATE_DEFINE( + V8_EXPORT_PRIVATE) void AstRawString::Internalize(LocalIsolate* isolate); bool AstRawString::AsArrayIndex(uint32_t* index) const { // The StringHasher will set up the hash. Bail out early if we know it @@ -185,8 +168,8 @@ Handle AstConsString::Allocate(LocalIsolate* isolate) const { template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Handle AstConsString::Allocate(Isolate* isolate) const; template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) - Handle AstConsString::Allocate( - OffThreadIsolate* isolate) const; + Handle AstConsString::Allocate( + LocalIsolate* isolate) const; template Handle AstConsString::AllocateFlat(LocalIsolate* isolate) const { @@ -246,8 +229,8 @@ Handle AstConsString::AllocateFlat(LocalIsolate* isolate) const { template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Handle AstConsString::AllocateFlat(Isolate* isolate) const; template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) - Handle AstConsString::AllocateFlat( - OffThreadIsolate* isolate) const; + Handle AstConsString::AllocateFlat( + LocalIsolate* isolate) const; std::forward_list AstConsString::ToRawStrings() const { std::forward_list result; @@ -268,21 +251,21 @@ AstStringConstants::AstStringConstants(Isolate* isolate, uint64_t hash_seed) string_table_(AstRawString::Compare), hash_seed_(hash_seed) { DCHECK_EQ(ThreadId::Current(), isolate->thread_id()); -#define F(name, str) \ - { \ - const char* data = str; \ - Vector literal(reinterpret_cast(data), \ - static_cast(strlen(data))); \ - uint32_t hash_field = StringHasher::HashSequentialString( \ - literal.begin(), literal.length(), hash_seed_); \ - name##_string_ = new (&zone_) AstRawString(true, literal, hash_field); \ - /* The Handle returned by the factory is located on the roots */ \ - /* array, not on the temporary HandleScope, so this is safe. */ \ - name##_string_->set_string(isolate->factory()->name##_string()); \ - base::HashMap::Entry* entry = \ - string_table_.InsertNew(name##_string_, name##_string_->Hash()); \ - DCHECK_NULL(entry->value); \ - entry->value = reinterpret_cast(1); \ +#define F(name, str) \ + { \ + const char* data = str; \ + Vector literal(reinterpret_cast(data), \ + static_cast(strlen(data))); \ + uint32_t hash_field = StringHasher::HashSequentialString( \ + literal.begin(), literal.length(), hash_seed_); \ + name##_string_ = zone_.New(true, literal, hash_field); \ + /* The Handle returned by the factory is located on the roots */ \ + /* array, not on the temporary HandleScope, so this is safe. */ \ + name##_string_->set_string(isolate->factory()->name##_string()); \ + base::HashMap::Entry* entry = \ + string_table_.InsertNew(name##_string_, name##_string_->Hash()); \ + DCHECK_NULL(entry->value); \ + entry->value = reinterpret_cast(1); \ } AST_STRING_CONSTANTS(F) #undef F @@ -333,7 +316,7 @@ const AstRawString* AstValueFactory::CloneFromOtherFactory( } AstConsString* AstValueFactory::NewConsString() { - return new (zone()) AstConsString; + return zone()->New(); } AstConsString* AstValueFactory::NewConsString(const AstRawString* str) { @@ -361,10 +344,9 @@ void AstValueFactory::Internalize(LocalIsolate* isolate) { zone_ = nullptr; } template EXPORT_TEMPLATE_DEFINE( - V8_EXPORT_PRIVATE) void AstValueFactory::Internalize(Isolate* - isolate); -template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void AstValueFactory:: - Internalize(OffThreadIsolate* isolate); + V8_EXPORT_PRIVATE) void AstValueFactory::Internalize(Isolate* isolate); +template EXPORT_TEMPLATE_DEFINE( + V8_EXPORT_PRIVATE) void AstValueFactory::Internalize(LocalIsolate* isolate); AstRawString* AstValueFactory::GetString(uint32_t hash_field, bool is_one_byte, Vector literal_bytes) { @@ -379,7 +361,7 @@ AstRawString* AstValueFactory::GetString(uint32_t hash_field, bool is_one_byte, int length = literal_bytes.length(); byte* new_literal_bytes = zone()->NewArray(length); memcpy(new_literal_bytes, literal_bytes.begin(), length); - AstRawString* new_string = new (zone()) AstRawString( + AstRawString* new_string = zone()->New( is_one_byte, Vector(new_literal_bytes, length), hash_field); CHECK_NOT_NULL(new_string); AddString(new_string); diff --git a/deps/v8/src/ast/ast-value-factory.h b/deps/v8/src/ast/ast-value-factory.h index 134612f1fd02a5..1752498123507b 100644 --- a/deps/v8/src/ast/ast-value-factory.h +++ b/deps/v8/src/ast/ast-value-factory.h @@ -45,7 +45,6 @@ namespace v8 { namespace internal { class Isolate; -class OffThreadIsolate; class AstRawString final : public ZoneObject { public: @@ -59,8 +58,8 @@ class AstRawString final : public ZoneObject { V8_EXPORT_PRIVATE bool IsOneByteEqualTo(const char* data) const; uint16_t FirstCharacter() const; - void Internalize(Isolate* isolate); - void Internalize(OffThreadIsolate* isolate); + template + void Internalize(LocalIsolate* isolate); // Access the physical representation: bool is_one_byte() const { return is_one_byte_; } @@ -83,6 +82,7 @@ class AstRawString final : public ZoneObject { friend class AstRawStringInternalizationKey; friend class AstStringConstants; friend class AstValueFactory; + friend Zone; // Members accessed only by the AstValueFactory & related classes: static bool Compare(void* a, void* b); @@ -126,6 +126,11 @@ class AstRawString final : public ZoneObject { #endif }; +extern template EXPORT_TEMPLATE_DECLARE( + V8_EXPORT_PRIVATE) void AstRawString::Internalize(Isolate* isolate); +extern template EXPORT_TEMPLATE_DECLARE( + V8_EXPORT_PRIVATE) void AstRawString::Internalize(LocalIsolate* isolate); + class AstConsString final : public ZoneObject { public: AstConsString* AddString(Zone* zone, const AstRawString* s) { @@ -133,8 +138,7 @@ class AstConsString final : public ZoneObject { if (!IsEmpty()) { // We're putting the new string to the head of the list, meaning // the string segments will be in reverse order. - Segment* tmp = new (zone->New(sizeof(Segment))) Segment; - *tmp = segment_; + Segment* tmp = zone->New(segment_); segment_.next = tmp; } segment_.string = s; @@ -163,6 +167,7 @@ class AstConsString final : public ZoneObject { private: friend class AstValueFactory; + friend Zone; AstConsString() : string_(), segment_({nullptr, nullptr}) {} @@ -380,7 +385,7 @@ extern template EXPORT_TEMPLATE_DECLARE( extern template EXPORT_TEMPLATE_DECLARE( V8_EXPORT_PRIVATE) void AstValueFactory:: - Internalize(OffThreadIsolate* isolate); + Internalize(LocalIsolate* isolate); } // namespace internal } // namespace v8 diff --git a/deps/v8/src/ast/ast.cc b/deps/v8/src/ast/ast.cc index 651508b677f05d..b40cf83c8209d6 100644 --- a/deps/v8/src/ast/ast.cc +++ b/deps/v8/src/ast/ast.cc @@ -14,8 +14,7 @@ #include "src/builtins/builtins-constructor.h" #include "src/builtins/builtins.h" #include "src/common/assert-scope.h" -#include "src/execution/off-thread-isolate.h" -#include "src/heap/off-thread-factory-inl.h" +#include "src/heap/local-factory-inl.h" #include "src/numbers/conversions-inl.h" #include "src/numbers/double.h" #include "src/objects/contexts.h" @@ -335,10 +334,9 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) { const auto GETTER = ObjectLiteral::Property::GETTER; const auto SETTER = ObjectLiteral::Property::SETTER; - ZoneAllocationPolicy allocator(zone); - - CustomMatcherZoneHashMap table( - Literal::Match, ZoneHashMap::kDefaultHashMapCapacity, allocator); + CustomMatcherZoneHashMap table(Literal::Match, + ZoneHashMap::kDefaultHashMapCapacity, + ZoneAllocationPolicy(zone)); for (int i = properties()->length() - 1; i >= 0; i--) { ObjectLiteral::Property* property = properties()->at(i); if (property->is_computed_name()) continue; @@ -347,7 +345,7 @@ void ObjectLiteral::CalculateEmitStore(Zone* zone) { DCHECK(!literal->IsNullLiteral()); uint32_t hash = literal->Hash(); - ZoneHashMap::Entry* entry = table.LookupOrInsert(literal, hash, allocator); + ZoneHashMap::Entry* entry = table.LookupOrInsert(literal, hash); if (entry->value == nullptr) { entry->value = property; } else { @@ -522,7 +520,7 @@ void ObjectLiteral::BuildBoilerplateDescription(LocalIsolate* isolate) { template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void ObjectLiteral:: BuildBoilerplateDescription(Isolate* isolate); template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void ObjectLiteral:: - BuildBoilerplateDescription(OffThreadIsolate* isolate); + BuildBoilerplateDescription(LocalIsolate* isolate); bool ObjectLiteral::IsFastCloningSupported() const { // The CreateShallowObjectLiteratal builtin doesn't copy elements, and object @@ -696,8 +694,9 @@ void ArrayLiteral::BuildBoilerplateDescription(LocalIsolate* isolate) { template EXPORT_TEMPLATE_DEFINE( V8_BASE_EXPORT) void ArrayLiteral::BuildBoilerplateDescription(Isolate* isolate); -template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) void ArrayLiteral:: - BuildBoilerplateDescription(OffThreadIsolate* isolate); +template EXPORT_TEMPLATE_DEFINE( + V8_BASE_EXPORT) void ArrayLiteral::BuildBoilerplateDescription(LocalIsolate* + isolate); bool ArrayLiteral::IsFastCloningSupported() const { return depth() <= 1 && @@ -737,7 +736,7 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Expression* expression, Isolate* isolate); template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Handle MaterializedLiteral::GetBoilerplateValue( - Expression* expression, OffThreadIsolate* isolate); + Expression* expression, LocalIsolate* isolate); int MaterializedLiteral::InitDepthAndFlags() { if (IsArrayLiteral()) return AsArrayLiteral()->InitDepthAndFlags(); @@ -772,7 +771,7 @@ void MaterializedLiteral::BuildConstants(LocalIsolate* isolate) { template EXPORT_TEMPLATE_DEFINE( V8_BASE_EXPORT) void MaterializedLiteral::BuildConstants(Isolate* isolate); template EXPORT_TEMPLATE_DEFINE( - V8_BASE_EXPORT) void MaterializedLiteral::BuildConstants(OffThreadIsolate* + V8_BASE_EXPORT) void MaterializedLiteral::BuildConstants(LocalIsolate* isolate); template @@ -814,7 +813,7 @@ template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) Isolate* isolate); template EXPORT_TEMPLATE_DEFINE(V8_BASE_EXPORT) Handle GetTemplateObject::GetOrBuildDescription( - OffThreadIsolate* isolate); + LocalIsolate* isolate); static bool IsCommutativeOperationWithSmiLiteral(Token::Value op) { // Add is not commutative due to potential for string addition. @@ -927,6 +926,7 @@ Call::CallType Call::GetCallType() const { } if (property != nullptr) { if (property->IsPrivateReference()) { + if (is_optional_chain) return PRIVATE_OPTIONAL_CHAIN_CALL; return PRIVATE_CALL; } bool is_super = property->IsSuperAccess(); @@ -949,9 +949,7 @@ Call::CallType Call::GetCallType() const { CaseClause::CaseClause(Zone* zone, Expression* label, const ScopedPtrList& statements) - : label_(label), statements_(0, nullptr) { - statements.CopyTo(&statements_, zone); -} + : label_(label), statements_(statements.ToConstVector(), zone) {} bool Literal::IsPropertyName() const { if (type() != kString) return false; @@ -1008,7 +1006,7 @@ Handle Literal::BuildValue(LocalIsolate* isolate) const { template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) Handle Literal::BuildValue(Isolate* isolate) const; template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) - Handle Literal::BuildValue(OffThreadIsolate* isolate) const; + Handle Literal::BuildValue(LocalIsolate* isolate) const; bool Literal::ToBooleanIsTrue() const { switch (type()) { @@ -1062,7 +1060,7 @@ Literal* AstNodeFactory::NewNumberLiteral(double number, int pos) { if (DoubleToSmiInteger(number, &int_value)) { return NewSmiLiteral(int_value, pos); } - return new (zone_) Literal(number, pos); + return zone_->New(number, pos); } const char* CallRuntime::debug_name() { diff --git a/deps/v8/src/ast/ast.h b/deps/v8/src/ast/ast.h index 6fcf30499a5f0d..4213c60f246e40 100644 --- a/deps/v8/src/ast/ast.h +++ b/deps/v8/src/ast/ast.h @@ -21,6 +21,7 @@ #include "src/objects/smi.h" #include "src/parsing/token.h" #include "src/runtime/runtime.h" +#include "src/zone/zone-list.h" namespace v8 { namespace internal { @@ -116,7 +117,6 @@ namespace internal { // Forward declarations class Isolate; -class OffThreadIsolate; class AstNode; class AstNodeFactory; @@ -143,8 +143,6 @@ class AstNode: public ZoneObject { }; #undef DECLARE_TYPE_ENUM - void* operator new(size_t size, Zone* zone) { return zone->New(size); } - NodeType node_type() const { return NodeTypeField::decode(bit_field_); } int position() const { return position_; } @@ -165,10 +163,6 @@ class AstNode: public ZoneObject { MaterializedLiteral* AsMaterializedLiteral(); private: - // Hidden to prevent accidental usage. It would have to load the - // current zone from the TLS. - void* operator new(size_t size); - int position_; using NodeTypeField = base::BitField; @@ -289,6 +283,7 @@ class Expression : public AstNode { class FailureExpression : public Expression { private: friend class AstNodeFactory; + friend Zone; FailureExpression() : Expression(kNoSourcePosition, kFailureExpression) {} }; @@ -324,11 +319,12 @@ class Block final : public BreakableStatement { void InitializeStatements(const ScopedPtrList& statements, Zone* zone) { DCHECK_EQ(0, statements_.length()); - statements.CopyTo(&statements_, zone); + statements_ = ZonePtrList(statements.ToConstVector(), zone); } private: friend class AstNodeFactory; + friend Zone; ZonePtrList statements_; Scope* scope_; @@ -375,6 +371,7 @@ class VariableDeclaration : public Declaration { private: friend class AstNodeFactory; + friend Zone; using IsNestedField = Declaration::NextBitField; @@ -397,6 +394,7 @@ class NestedVariableDeclaration final : public VariableDeclaration { private: friend class AstNodeFactory; + friend Zone; NestedVariableDeclaration(Scope* scope, int pos) : VariableDeclaration(pos, true), scope_(scope) {} @@ -417,6 +415,7 @@ class FunctionDeclaration final : public Declaration { private: friend class AstNodeFactory; + friend Zone; FunctionDeclaration(FunctionLiteral* fun, int pos) : Declaration(pos, kFunctionDeclaration), fun_(fun) {} @@ -451,6 +450,7 @@ class DoWhileStatement final : public IterationStatement { private: friend class AstNodeFactory; + friend Zone; explicit DoWhileStatement(int pos) : IterationStatement(pos, kDoWhileStatement), cond_(nullptr) {} @@ -470,6 +470,7 @@ class WhileStatement final : public IterationStatement { private: friend class AstNodeFactory; + friend Zone; explicit WhileStatement(int pos) : IterationStatement(pos, kWhileStatement), cond_(nullptr) {} @@ -494,6 +495,7 @@ class ForStatement final : public IterationStatement { private: friend class AstNodeFactory; + friend Zone; explicit ForStatement(int pos) : IterationStatement(pos, kForStatement), @@ -531,6 +533,7 @@ class ForEachStatement : public IterationStatement { protected: friend class AstNodeFactory; + friend Zone; ForEachStatement(int pos, NodeType type) : IterationStatement(pos, type), each_(nullptr), subject_(nullptr) {} @@ -542,6 +545,7 @@ class ForEachStatement : public IterationStatement { class ForInStatement final : public ForEachStatement { private: friend class AstNodeFactory; + friend Zone; explicit ForInStatement(int pos) : ForEachStatement(pos, kForInStatement) {} }; @@ -553,6 +557,7 @@ class ForOfStatement final : public ForEachStatement { private: friend class AstNodeFactory; + friend Zone; ForOfStatement(int pos, IteratorType type) : ForEachStatement(pos, kForOfStatement), type_(type) {} @@ -567,6 +572,7 @@ class ExpressionStatement final : public Statement { private: friend class AstNodeFactory; + friend Zone; ExpressionStatement(Expression* expression, int pos) : Statement(pos, kExpressionStatement), expression_(expression) {} @@ -587,6 +593,7 @@ class ContinueStatement final : public JumpStatement { private: friend class AstNodeFactory; + friend Zone; ContinueStatement(IterationStatement* target, int pos) : JumpStatement(pos, kContinueStatement), target_(target) {} @@ -601,6 +608,7 @@ class BreakStatement final : public JumpStatement { private: friend class AstNodeFactory; + friend Zone; BreakStatement(BreakableStatement* target, int pos) : JumpStatement(pos, kBreakStatement), target_(target) {} @@ -624,6 +632,7 @@ class ReturnStatement final : public JumpStatement { private: friend class AstNodeFactory; + friend Zone; ReturnStatement(Expression* expression, Type type, int pos, int end_position) : JumpStatement(pos, kReturnStatement), @@ -648,6 +657,7 @@ class WithStatement final : public Statement { private: friend class AstNodeFactory; + friend Zone; WithStatement(Scope* scope, Expression* expression, Statement* statement, int pos) @@ -672,6 +682,7 @@ class CaseClause final : public ZoneObject { private: friend class AstNodeFactory; + friend Zone; CaseClause(Zone* zone, Expression* label, const ScopedPtrList& statements); @@ -690,6 +701,7 @@ class SwitchStatement final : public BreakableStatement { private: friend class AstNodeFactory; + friend Zone; SwitchStatement(Zone* zone, Expression* tag, int pos) : BreakableStatement(pos, kSwitchStatement), tag_(tag), cases_(4, zone) {} @@ -718,6 +730,7 @@ class IfStatement final : public Statement { private: friend class AstNodeFactory; + friend Zone; IfStatement(Expression* condition, Statement* then_statement, Statement* else_statement, int pos) @@ -815,6 +828,7 @@ class TryCatchStatement final : public TryStatement { private: friend class AstNodeFactory; + friend Zone; TryCatchStatement(Block* try_block, Scope* scope, Block* catch_block, HandlerTable::CatchPrediction catch_prediction, int pos) @@ -836,6 +850,7 @@ class TryFinallyStatement final : public TryStatement { private: friend class AstNodeFactory; + friend Zone; TryFinallyStatement(Block* try_block, Block* finally_block, int pos) : TryStatement(try_block, pos, kTryFinallyStatement), @@ -848,6 +863,7 @@ class TryFinallyStatement final : public TryStatement { class DebuggerStatement final : public Statement { private: friend class AstNodeFactory; + friend Zone; explicit DebuggerStatement(int pos) : Statement(pos, kDebuggerStatement) {} }; @@ -856,6 +872,7 @@ class DebuggerStatement final : public Statement { class EmptyStatement final : public Statement { private: friend class AstNodeFactory; + friend Zone; EmptyStatement() : Statement(kNoSourcePosition, kEmptyStatement) {} }; @@ -876,6 +893,7 @@ class SloppyBlockFunctionStatement final : public Statement { private: friend class AstNodeFactory; + friend Zone; using TokenField = Statement::NextBitField; @@ -978,6 +996,7 @@ class Literal final : public Expression { private: friend class AstNodeFactory; + friend Zone; using TypeField = Expression::NextBitField; @@ -1067,6 +1086,7 @@ class RegExpLiteral final : public MaterializedLiteral { private: friend class AstNodeFactory; + friend Zone; RegExpLiteral(const AstRawString* pattern, int flags, int pos) : MaterializedLiteral(pos, kRegExpLiteral), @@ -1086,6 +1106,7 @@ class AggregateLiteral : public MaterializedLiteral { kIsShallow = 1, kDisableMementos = 1 << 1, kNeedsInitialAllocationSite = 1 << 2, + kIsShallowAndDisableMementos = kIsShallow | kDisableMementos, }; bool is_initialized() const { return 0 < depth_; } @@ -1125,6 +1146,7 @@ class AggregateLiteral : public MaterializedLiteral { protected: friend class AstNodeFactory; + friend Zone; AggregateLiteral(int pos, NodeType type) : MaterializedLiteral(pos, type), depth_(0) { bit_field_ |= @@ -1203,6 +1225,7 @@ class ObjectLiteralProperty final : public LiteralProperty { private: friend class AstNodeFactory; + friend Zone; ObjectLiteralProperty(Expression* key, Expression* value, Kind kind, bool is_computed_name); @@ -1294,18 +1317,18 @@ class ObjectLiteral final : public AggregateLiteral { private: friend class AstNodeFactory; + friend Zone; ObjectLiteral(Zone* zone, const ScopedPtrList& properties, uint32_t boilerplate_properties, int pos, bool has_rest_property) : AggregateLiteral(pos, kObjectLiteral), boilerplate_properties_(boilerplate_properties), - properties_(0, nullptr) { + properties_(properties.ToConstVector(), zone) { bit_field_ |= HasElementsField::encode(false) | HasRestPropertyField::encode(has_rest_property) | FastElementsField::encode(false) | HasNullPrototypeField::encode(false); - properties.CopyTo(&properties_, zone); } void InitFlagsForPendingNullPrototype(int i); @@ -1368,14 +1391,13 @@ class ArrayLiteral final : public AggregateLiteral { private: friend class AstNodeFactory; + friend Zone; ArrayLiteral(Zone* zone, const ScopedPtrList& values, int first_spread_index, int pos) : AggregateLiteral(pos, kArrayLiteral), first_spread_index_(first_spread_index), - values_(0, nullptr) { - values.CopyTo(&values_, zone); - } + values_(values.ToConstVector(), zone) {} int first_spread_index_; Handle boilerplate_description_; @@ -1387,7 +1409,8 @@ enum class HoleCheckMode { kRequired, kElided }; class ThisExpression final : public Expression { private: friend class AstNodeFactory; - ThisExpression() : Expression(kNoSourcePosition, kThisExpression) {} + friend Zone; + explicit ThisExpression(int pos) : Expression(pos, kThisExpression) {} }; class VariableProxy final : public Expression { @@ -1478,6 +1501,7 @@ class VariableProxy final : public Expression { private: friend class AstNodeFactory; + friend Zone; VariableProxy(Variable* var, int start_position); @@ -1519,6 +1543,7 @@ class OptionalChain final : public Expression { private: friend class AstNodeFactory; + friend Zone; explicit OptionalChain(Expression* expression) : Expression(0, kOptionalChain), expression_(expression) {} @@ -1587,6 +1612,7 @@ class Property final : public Expression { private: friend class AstNodeFactory; + friend Zone; Property(Expression* obj, Expression* key, int pos, bool optional_chain) : Expression(pos, kProperty), obj_(obj), key_(key) { @@ -1630,6 +1656,7 @@ class Call final : public Expression { NAMED_SUPER_PROPERTY_CALL, KEYED_SUPER_PROPERTY_CALL, PRIVATE_CALL, + PRIVATE_OPTIONAL_CHAIN_CALL, SUPER_CALL, OTHER_CALL, }; @@ -1646,18 +1673,18 @@ class Call final : public Expression { private: friend class AstNodeFactory; + friend Zone; Call(Zone* zone, Expression* expression, const ScopedPtrList& arguments, int pos, PossiblyEval possibly_eval, bool optional_chain) : Expression(pos, kCall), expression_(expression), - arguments_(0, nullptr) { + arguments_(arguments.ToConstVector(), zone) { bit_field_ |= IsPossiblyEvalField::encode(possibly_eval == IS_POSSIBLY_EVAL) | IsTaggedTemplateField::encode(false) | IsOptionalChainLinkField::encode(optional_chain); - arguments.CopyTo(&arguments_, zone); } Call(Zone* zone, Expression* expression, @@ -1665,11 +1692,10 @@ class Call final : public Expression { TaggedTemplateTag tag) : Expression(pos, kCall), expression_(expression), - arguments_(0, nullptr) { + arguments_(arguments.ToConstVector(), zone) { bit_field_ |= IsPossiblyEvalField::encode(false) | IsTaggedTemplateField::encode(true) | IsOptionalChainLinkField::encode(false); - arguments.CopyTo(&arguments_, zone); } using IsPossiblyEvalField = Expression::NextBitField; @@ -1692,14 +1718,13 @@ class CallNew final : public Expression { private: friend class AstNodeFactory; + friend Zone; CallNew(Zone* zone, Expression* expression, const ScopedPtrList& arguments, int pos) : Expression(pos, kCallNew), expression_(expression), - arguments_(0, nullptr) { - arguments.CopyTo(&arguments_, zone); - } + arguments_(arguments.ToConstVector(), zone) {} Expression* expression_; ZonePtrList arguments_; @@ -1727,22 +1752,19 @@ class CallRuntime final : public Expression { private: friend class AstNodeFactory; + friend Zone; CallRuntime(Zone* zone, const Runtime::Function* function, const ScopedPtrList& arguments, int pos) : Expression(pos, kCallRuntime), function_(function), - arguments_(0, nullptr) { - arguments.CopyTo(&arguments_, zone); - } + arguments_(arguments.ToConstVector(), zone) {} CallRuntime(Zone* zone, int context_index, const ScopedPtrList& arguments, int pos) : Expression(pos, kCallRuntime), context_index_(context_index), function_(nullptr), - arguments_(0, nullptr) { - arguments.CopyTo(&arguments_, zone); - } + arguments_(arguments.ToConstVector(), zone) {} int context_index_; const Runtime::Function* function_; @@ -1757,6 +1779,7 @@ class UnaryOperation final : public Expression { private: friend class AstNodeFactory; + friend Zone; UnaryOperation(Token::Value op, Expression* expression, int pos) : Expression(pos, kUnaryOperation), expression_(expression) { @@ -1782,6 +1805,7 @@ class BinaryOperation final : public Expression { private: friend class AstNodeFactory; + friend Zone; BinaryOperation(Token::Value op, Expression* left, Expression* right, int pos) : Expression(pos, kBinaryOperation), left_(left), right_(right) { @@ -1814,6 +1838,7 @@ class NaryOperation final : public Expression { private: friend class AstNodeFactory; + friend Zone; NaryOperation(Zone* zone, Token::Value op, Expression* first, size_t initial_subsequent_size) @@ -1865,6 +1890,7 @@ class CountOperation final : public Expression { private: friend class AstNodeFactory; + friend Zone; CountOperation(Token::Value op, bool is_prefix, Expression* expr, int pos) : Expression(pos, kCountOperation), expression_(expr) { @@ -1891,6 +1917,7 @@ class CompareOperation final : public Expression { private: friend class AstNodeFactory; + friend Zone; CompareOperation(Token::Value op, Expression* left, Expression* right, int pos) @@ -1914,6 +1941,7 @@ class Spread final : public Expression { private: friend class AstNodeFactory; + friend Zone; Spread(Expression* expression, int pos, int expr_pos) : Expression(pos, kSpread), @@ -1932,6 +1960,7 @@ class Conditional final : public Expression { private: friend class AstNodeFactory; + friend Zone; Conditional(Expression* condition, Expression* then_expression, Expression* else_expression, int position) @@ -1969,6 +1998,7 @@ class Assignment : public Expression { private: friend class AstNodeFactory; + friend Zone; using TokenField = Expression::NextBitField; using LookupHoistingModeField = TokenField::Next; @@ -1983,6 +2013,7 @@ class CompoundAssignment final : public Assignment { private: friend class AstNodeFactory; + friend Zone; CompoundAssignment(Token::Value op, Expression* target, Expression* value, int pos, BinaryOperation* binary_operation) @@ -2017,6 +2048,7 @@ class Suspend : public Expression { private: friend class AstNodeFactory; + friend Zone; friend class Yield; friend class YieldStar; friend class Await; @@ -2035,6 +2067,7 @@ class Suspend : public Expression { class Yield final : public Suspend { private: friend class AstNodeFactory; + friend Zone; Yield(Expression* expression, int pos, OnAbruptResume on_abrupt_resume) : Suspend(kYield, expression, pos, on_abrupt_resume) {} }; @@ -2042,6 +2075,7 @@ class Yield final : public Suspend { class YieldStar final : public Suspend { private: friend class AstNodeFactory; + friend Zone; YieldStar(Expression* expression, int pos) : Suspend(kYieldStar, expression, pos, Suspend::OnAbruptResume::kNoControl) {} @@ -2050,6 +2084,7 @@ class YieldStar final : public Suspend { class Await final : public Suspend { private: friend class AstNodeFactory; + friend Zone; Await(Expression* expression, int pos) : Suspend(kAwait, expression, pos, Suspend::kOnExceptionThrow) {} @@ -2061,6 +2096,7 @@ class Throw final : public Expression { private: friend class AstNodeFactory; + friend Zone; Throw(Expression* exception, int pos) : Expression(pos, kThrow), exception_(exception) {} @@ -2149,7 +2185,7 @@ class FunctionLiteral final : public Expression { } UNREACHABLE(); } - Handle GetInferredName(OffThreadIsolate* isolate) const { + Handle GetInferredName(LocalIsolate* isolate) const { DCHECK(inferred_name_.is_null()); DCHECK_NOT_NULL(raw_inferred_name_); return raw_inferred_name_->GetString(isolate); @@ -2240,6 +2276,7 @@ class FunctionLiteral final : public Expression { private: friend class AstNodeFactory; + friend Zone; FunctionLiteral(Zone* zone, const AstConsString* name, AstValueFactory* ast_value_factory, DeclarationScope* scope, @@ -2259,7 +2296,7 @@ class FunctionLiteral final : public Expression { function_literal_id_(function_literal_id), raw_name_(name), scope_(scope), - body_(0, nullptr), + body_(body.ToConstVector(), zone), raw_inferred_name_(ast_value_factory->empty_cons_string()), produced_preparse_data_(produced_preparse_data) { bit_field_ |= FunctionSyntaxKindBits::encode(function_syntax_kind) | @@ -2271,7 +2308,6 @@ class FunctionLiteral final : public Expression { HasBracesField::encode(has_braces) | OneshotIIFEBit::encode(false); if (eager_compile_hint == kShouldEagerCompile) SetShouldEagerCompile(); - body.CopyTo(&body_, zone); } using FunctionSyntaxKindBits = @@ -2346,6 +2382,7 @@ class ClassLiteralProperty final : public LiteralProperty { private: friend class AstNodeFactory; + friend Zone; ClassLiteralProperty(Expression* key, Expression* value, Kind kind, bool is_static, bool is_computed_name, bool is_private); @@ -2364,6 +2401,7 @@ class InitializeClassMembersStatement final : public Statement { private: friend class AstNodeFactory; + friend Zone; InitializeClassMembersStatement(ZonePtrList* fields, int pos) : Statement(pos, kInitializeClassMembersStatement), fields_(fields) {} @@ -2409,6 +2447,7 @@ class ClassLiteral final : public Expression { private: friend class AstNodeFactory; + friend Zone; ClassLiteral(ClassScope* scope, Expression* extends, FunctionLiteral* constructor, @@ -2458,6 +2497,7 @@ class NativeFunctionLiteral final : public Expression { private: friend class AstNodeFactory; + friend Zone; NativeFunctionLiteral(const AstRawString* name, v8::Extension* extension, int pos) @@ -2476,6 +2516,7 @@ class SuperPropertyReference final : public Expression { private: friend class AstNodeFactory; + friend Zone; // We take in ThisExpression* only as a proof that it was accessed. SuperPropertyReference(Expression* home_object, int pos) @@ -2494,6 +2535,7 @@ class SuperCallReference final : public Expression { private: friend class AstNodeFactory; + friend Zone; // We take in ThisExpression* only as a proof that it was accessed. SuperCallReference(VariableProxy* new_target_var, @@ -2517,6 +2559,7 @@ class ImportCallExpression final : public Expression { private: friend class AstNodeFactory; + friend Zone; ImportCallExpression(Expression* argument, int pos) : Expression(pos, kImportCallExpression), argument_(argument) {} @@ -2529,6 +2572,7 @@ class ImportCallExpression final : public Expression { class EmptyParentheses final : public Expression { private: friend class AstNodeFactory; + friend Zone; explicit EmptyParentheses(int pos) : Expression(pos, kEmptyParentheses) { mark_parenthesized(); @@ -2552,6 +2596,7 @@ class GetTemplateObject final : public Expression { private: friend class AstNodeFactory; + friend Zone; GetTemplateObject(const ZonePtrList* cooked_strings, const ZonePtrList* raw_strings, int pos) @@ -2574,6 +2619,7 @@ class TemplateLiteral final : public Expression { private: friend class AstNodeFactory; + friend Zone; TemplateLiteral(const ZonePtrList* parts, const ZonePtrList* substitutions, int pos) : Expression(pos, kTemplateLiteral), @@ -2689,32 +2735,32 @@ class AstNodeFactory final { AstNodeFactory(AstValueFactory* ast_value_factory, Zone* zone) : zone_(zone), ast_value_factory_(ast_value_factory), - empty_statement_(new (zone) class EmptyStatement()), - this_expression_(new (zone) class ThisExpression()), - failure_expression_(new (zone) class FailureExpression()) {} + empty_statement_(zone->New()), + this_expression_(zone->New(kNoSourcePosition)), + failure_expression_(zone->New()) {} AstNodeFactory* ast_node_factory() { return this; } AstValueFactory* ast_value_factory() const { return ast_value_factory_; } VariableDeclaration* NewVariableDeclaration(int pos) { - return new (zone_) VariableDeclaration(pos); + return zone_->New(pos); } NestedVariableDeclaration* NewNestedVariableDeclaration(Scope* scope, int pos) { - return new (zone_) NestedVariableDeclaration(scope, pos); + return zone_->New(scope, pos); } FunctionDeclaration* NewFunctionDeclaration(FunctionLiteral* fun, int pos) { - return new (zone_) FunctionDeclaration(fun, pos); + return zone_->New(fun, pos); } Block* NewBlock(int capacity, bool ignore_completion_value) { - return new (zone_) Block(zone_, capacity, ignore_completion_value, false); + return zone_->New(zone_, capacity, ignore_completion_value, false); } Block* NewBlock(bool ignore_completion_value, bool is_breakable) { - return new (zone_) Block(ignore_completion_value, is_breakable); + return zone_->New(ignore_completion_value, is_breakable); } Block* NewBlock(bool ignore_completion_value, @@ -2725,60 +2771,60 @@ class AstNodeFactory final { } #define STATEMENT_WITH_POSITION(NodeType) \ - NodeType* New##NodeType(int pos) { return new (zone_) NodeType(pos); } + NodeType* New##NodeType(int pos) { return zone_->New(pos); } STATEMENT_WITH_POSITION(DoWhileStatement) STATEMENT_WITH_POSITION(WhileStatement) STATEMENT_WITH_POSITION(ForStatement) #undef STATEMENT_WITH_POSITION SwitchStatement* NewSwitchStatement(Expression* tag, int pos) { - return new (zone_) SwitchStatement(zone_, tag, pos); + return zone_->New(zone_, tag, pos); } ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode, int pos) { switch (visit_mode) { case ForEachStatement::ENUMERATE: { - return new (zone_) ForInStatement(pos); + return zone_->New(pos); } case ForEachStatement::ITERATE: { - return new (zone_) ForOfStatement(pos, IteratorType::kNormal); + return zone_->New(pos, IteratorType::kNormal); } } UNREACHABLE(); } ForOfStatement* NewForOfStatement(int pos, IteratorType type) { - return new (zone_) ForOfStatement(pos, type); + return zone_->New(pos, type); } ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) { - return new (zone_) ExpressionStatement(expression, pos); + return zone_->New(expression, pos); } ContinueStatement* NewContinueStatement(IterationStatement* target, int pos) { - return new (zone_) ContinueStatement(target, pos); + return zone_->New(target, pos); } BreakStatement* NewBreakStatement(BreakableStatement* target, int pos) { - return new (zone_) BreakStatement(target, pos); + return zone_->New(target, pos); } ReturnStatement* NewReturnStatement(Expression* expression, int pos, int end_position = kNoSourcePosition) { - return new (zone_) ReturnStatement(expression, ReturnStatement::kNormal, + return zone_->New(expression, ReturnStatement::kNormal, pos, end_position); } ReturnStatement* NewAsyncReturnStatement( Expression* expression, int pos, int end_position = kNoSourcePosition) { - return new (zone_) ReturnStatement( + return zone_->New( expression, ReturnStatement::kAsyncReturn, pos, end_position); } ReturnStatement* NewSyntheticAsyncReturnStatement( Expression* expression, int pos, int end_position = kNoSourcePosition) { - return new (zone_) ReturnStatement( + return zone_->New( expression, ReturnStatement::kSyntheticAsyncReturn, pos, end_position); } @@ -2786,18 +2832,18 @@ class AstNodeFactory final { Expression* expression, Statement* statement, int pos) { - return new (zone_) WithStatement(scope, expression, statement, pos); + return zone_->New(scope, expression, statement, pos); } IfStatement* NewIfStatement(Expression* condition, Statement* then_statement, Statement* else_statement, int pos) { - return new (zone_) - IfStatement(condition, then_statement, else_statement, pos); + return zone_->New(condition, then_statement, else_statement, + pos); } TryCatchStatement* NewTryCatchStatement(Block* try_block, Scope* scope, Block* catch_block, int pos) { - return new (zone_) TryCatchStatement(try_block, scope, catch_block, + return zone_->New(try_block, scope, catch_block, HandlerTable::CAUGHT, pos); } @@ -2805,7 +2851,7 @@ class AstNodeFactory final { Scope* scope, Block* catch_block, int pos) { - return new (zone_) TryCatchStatement(try_block, scope, catch_block, + return zone_->New(try_block, scope, catch_block, HandlerTable::UNCAUGHT, pos); } @@ -2813,7 +2859,7 @@ class AstNodeFactory final { Scope* scope, Block* catch_block, int pos) { - return new (zone_) TryCatchStatement(try_block, scope, catch_block, + return zone_->New(try_block, scope, catch_block, HandlerTable::DESUGARING, pos); } @@ -2821,7 +2867,7 @@ class AstNodeFactory final { Scope* scope, Block* catch_block, int pos) { - return new (zone_) TryCatchStatement(try_block, scope, catch_block, + return zone_->New(try_block, scope, catch_block, HandlerTable::ASYNC_AWAIT, pos); } @@ -2829,17 +2875,17 @@ class AstNodeFactory final { Scope* scope, Block* catch_block, int pos) { - return new (zone_) TryCatchStatement( + return zone_->New( try_block, scope, catch_block, HandlerTable::UNCAUGHT_ASYNC_AWAIT, pos); } TryFinallyStatement* NewTryFinallyStatement(Block* try_block, Block* finally_block, int pos) { - return new (zone_) TryFinallyStatement(try_block, finally_block, pos); + return zone_->New(try_block, finally_block, pos); } DebuggerStatement* NewDebuggerStatement(int pos) { - return new (zone_) DebuggerStatement(pos); + return zone_->New(pos); } class EmptyStatement* EmptyStatement() { @@ -2857,203 +2903,209 @@ class AstNodeFactory final { return this_expression_; } + class ThisExpression* NewThisExpression(int pos) { + DCHECK_NE(pos, kNoSourcePosition); + return zone_->New(pos); + } + class FailureExpression* FailureExpression() { return failure_expression_; } SloppyBlockFunctionStatement* NewSloppyBlockFunctionStatement( int pos, Variable* var, Token::Value init) { - return new (zone_) - SloppyBlockFunctionStatement(pos, var, init, EmptyStatement()); + return zone_->New(pos, var, init, + EmptyStatement()); } CaseClause* NewCaseClause(Expression* label, const ScopedPtrList& statements) { - return new (zone_) CaseClause(zone_, label, statements); + return zone_->New(zone_, label, statements); } Literal* NewStringLiteral(const AstRawString* string, int pos) { DCHECK_NOT_NULL(string); - return new (zone_) Literal(string, pos); + return zone_->New(string, pos); } // A JavaScript symbol (ECMA-262 edition 6). Literal* NewSymbolLiteral(AstSymbol symbol, int pos) { - return new (zone_) Literal(symbol, pos); + return zone_->New(symbol, pos); } Literal* NewNumberLiteral(double number, int pos); Literal* NewSmiLiteral(int number, int pos) { - return new (zone_) Literal(number, pos); + return zone_->New(number, pos); } Literal* NewBigIntLiteral(AstBigInt bigint, int pos) { - return new (zone_) Literal(bigint, pos); + return zone_->New(bigint, pos); } Literal* NewBooleanLiteral(bool b, int pos) { - return new (zone_) Literal(b, pos); + return zone_->New(b, pos); } Literal* NewNullLiteral(int pos) { - return new (zone_) Literal(Literal::kNull, pos); + return zone_->New(Literal::kNull, pos); } Literal* NewUndefinedLiteral(int pos) { - return new (zone_) Literal(Literal::kUndefined, pos); + return zone_->New(Literal::kUndefined, pos); } Literal* NewTheHoleLiteral() { - return new (zone_) Literal(Literal::kTheHole, kNoSourcePosition); + return zone_->New(Literal::kTheHole, kNoSourcePosition); } ObjectLiteral* NewObjectLiteral( const ScopedPtrList& properties, uint32_t boilerplate_properties, int pos, bool has_rest_property) { - return new (zone_) ObjectLiteral(zone_, properties, boilerplate_properties, + return zone_->New(zone_, properties, boilerplate_properties, pos, has_rest_property); } ObjectLiteral::Property* NewObjectLiteralProperty( Expression* key, Expression* value, ObjectLiteralProperty::Kind kind, bool is_computed_name) { - return new (zone_) - ObjectLiteral::Property(key, value, kind, is_computed_name); + return zone_->New(key, value, kind, + is_computed_name); } ObjectLiteral::Property* NewObjectLiteralProperty(Expression* key, Expression* value, bool is_computed_name) { - return new (zone_) ObjectLiteral::Property(ast_value_factory_, key, value, + return zone_->New(ast_value_factory_, key, value, is_computed_name); } RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern, int flags, int pos) { - return new (zone_) RegExpLiteral(pattern, flags, pos); + return zone_->New(pattern, flags, pos); } ArrayLiteral* NewArrayLiteral(const ScopedPtrList& values, int pos) { - return new (zone_) ArrayLiteral(zone_, values, -1, pos); + return zone_->New(zone_, values, -1, pos); } ArrayLiteral* NewArrayLiteral(const ScopedPtrList& values, int first_spread_index, int pos) { - return new (zone_) ArrayLiteral(zone_, values, first_spread_index, pos); + return zone_->New(zone_, values, first_spread_index, pos); } VariableProxy* NewVariableProxy(Variable* var, int start_position = kNoSourcePosition) { - return new (zone_) VariableProxy(var, start_position); + return zone_->New(var, start_position); } VariableProxy* NewVariableProxy(const AstRawString* name, VariableKind variable_kind, int start_position = kNoSourcePosition) { DCHECK_NOT_NULL(name); - return new (zone_) VariableProxy(name, variable_kind, start_position); + return zone_->New(name, variable_kind, start_position); } // Recreates the VariableProxy in this Zone. VariableProxy* CopyVariableProxy(VariableProxy* proxy) { - return new (zone_) VariableProxy(proxy); + return zone_->New(proxy); } Variable* CopyVariable(Variable* variable) { - return new (zone_) Variable(variable); + return zone_->New(variable); } OptionalChain* NewOptionalChain(Expression* expression) { - return new (zone_) OptionalChain(expression); + return zone_->New(expression); } Property* NewProperty(Expression* obj, Expression* key, int pos, bool optional_chain = false) { - return new (zone_) Property(obj, key, pos, optional_chain); + return zone_->New(obj, key, pos, optional_chain); } Call* NewCall(Expression* expression, const ScopedPtrList& arguments, int pos, Call::PossiblyEval possibly_eval = Call::NOT_EVAL, bool optional_chain = false) { - return new (zone_) - Call(zone_, expression, arguments, pos, possibly_eval, optional_chain); + DCHECK_IMPLIES(possibly_eval == Call::IS_POSSIBLY_EVAL, !optional_chain); + return zone_->New(zone_, expression, arguments, pos, possibly_eval, + optional_chain); } Call* NewTaggedTemplate(Expression* expression, const ScopedPtrList& arguments, int pos) { - return new (zone_) - Call(zone_, expression, arguments, pos, Call::TaggedTemplateTag::kTrue); + return zone_->New(zone_, expression, arguments, pos, + Call::TaggedTemplateTag::kTrue); } CallNew* NewCallNew(Expression* expression, const ScopedPtrList& arguments, int pos) { - return new (zone_) CallNew(zone_, expression, arguments, pos); + return zone_->New(zone_, expression, arguments, pos); } CallRuntime* NewCallRuntime(Runtime::FunctionId id, const ScopedPtrList& arguments, int pos) { - return new (zone_) - CallRuntime(zone_, Runtime::FunctionForId(id), arguments, pos); + return zone_->New(zone_, Runtime::FunctionForId(id), arguments, + pos); } CallRuntime* NewCallRuntime(const Runtime::Function* function, const ScopedPtrList& arguments, int pos) { - return new (zone_) CallRuntime(zone_, function, arguments, pos); + return zone_->New(zone_, function, arguments, pos); } CallRuntime* NewCallRuntime(int context_index, const ScopedPtrList& arguments, int pos) { - return new (zone_) CallRuntime(zone_, context_index, arguments, pos); + return zone_->New(zone_, context_index, arguments, pos); } UnaryOperation* NewUnaryOperation(Token::Value op, Expression* expression, int pos) { - return new (zone_) UnaryOperation(op, expression, pos); + return zone_->New(op, expression, pos); } BinaryOperation* NewBinaryOperation(Token::Value op, Expression* left, Expression* right, int pos) { - return new (zone_) BinaryOperation(op, left, right, pos); + return zone_->New(op, left, right, pos); } NaryOperation* NewNaryOperation(Token::Value op, Expression* first, size_t initial_subsequent_size) { - return new (zone_) NaryOperation(zone_, op, first, initial_subsequent_size); + return zone_->New(zone_, op, first, initial_subsequent_size); } CountOperation* NewCountOperation(Token::Value op, bool is_prefix, Expression* expr, int pos) { - return new (zone_) CountOperation(op, is_prefix, expr, pos); + return zone_->New(op, is_prefix, expr, pos); } CompareOperation* NewCompareOperation(Token::Value op, Expression* left, Expression* right, int pos) { - return new (zone_) CompareOperation(op, left, right, pos); + return zone_->New(op, left, right, pos); } Spread* NewSpread(Expression* expression, int pos, int expr_pos) { - return new (zone_) Spread(expression, pos, expr_pos); + return zone_->New(expression, pos, expr_pos); } Conditional* NewConditional(Expression* condition, Expression* then_expression, Expression* else_expression, int position) { - return new (zone_) - Conditional(condition, then_expression, else_expression, position); + return zone_->New(condition, then_expression, else_expression, + position); } Assignment* NewAssignment(Token::Value op, @@ -3069,10 +3121,10 @@ class AstNodeFactory final { } if (op == Token::ASSIGN || op == Token::INIT) { - return new (zone_) - Assignment(AstNode::kAssignment, op, target, value, pos); + return zone_->New(AstNode::kAssignment, op, target, value, + pos); } else { - return new (zone_) CompoundAssignment( + return zone_->New( op, target, value, pos, NewBinaryOperation(Token::BinaryOpForAssignment(op), target, value, pos + 1)); @@ -3082,20 +3134,20 @@ class AstNodeFactory final { Suspend* NewYield(Expression* expression, int pos, Suspend::OnAbruptResume on_abrupt_resume) { if (!expression) expression = NewUndefinedLiteral(pos); - return new (zone_) Yield(expression, pos, on_abrupt_resume); + return zone_->New(expression, pos, on_abrupt_resume); } YieldStar* NewYieldStar(Expression* expression, int pos) { - return new (zone_) YieldStar(expression, pos); + return zone_->New(expression, pos); } Await* NewAwait(Expression* expression, int pos) { if (!expression) expression = NewUndefinedLiteral(pos); - return new (zone_) Await(expression, pos); + return zone_->New(expression, pos); } Throw* NewThrow(Expression* exception, int pos) { - return new (zone_) Throw(exception, pos); + return zone_->New(exception, pos); } FunctionLiteral* NewFunctionLiteral( @@ -3107,7 +3159,7 @@ class AstNodeFactory final { FunctionLiteral::EagerCompileHint eager_compile_hint, int position, bool has_braces, int function_literal_id, ProducedPreparseData* produced_preparse_data = nullptr) { - return new (zone_) FunctionLiteral( + return zone_->New( zone_, name ? ast_value_factory_->NewConsString(name) : nullptr, ast_value_factory_, scope, body, expected_property_count, parameter_count, function_length, function_syntax_kind, @@ -3121,7 +3173,7 @@ class AstNodeFactory final { FunctionLiteral* NewScriptOrEvalFunctionLiteral( DeclarationScope* scope, const ScopedPtrList& body, int expected_property_count, int parameter_count) { - return new (zone_) FunctionLiteral( + return zone_->New( zone_, ast_value_factory_->empty_cons_string(), ast_value_factory_, scope, body, expected_property_count, parameter_count, parameter_count, FunctionSyntaxKind::kAnonymousExpression, @@ -3133,7 +3185,7 @@ class AstNodeFactory final { ClassLiteral::Property* NewClassLiteralProperty( Expression* key, Expression* value, ClassLiteralProperty::Kind kind, bool is_static, bool is_computed_name, bool is_private) { - return new (zone_) ClassLiteral::Property(key, value, kind, is_static, + return zone_->New(key, value, kind, is_static, is_computed_name, is_private); } @@ -3146,7 +3198,7 @@ class AstNodeFactory final { int start_position, int end_position, bool has_name_static_property, bool has_static_computed_names, bool is_anonymous, bool has_private_methods) { - return new (zone_) ClassLiteral( + return zone_->New( scope, extends, constructor, public_members, private_members, static_fields_initializer, instance_members_initializer_function, start_position, end_position, has_name_static_property, @@ -3156,44 +3208,44 @@ class AstNodeFactory final { NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name, v8::Extension* extension, int pos) { - return new (zone_) NativeFunctionLiteral(name, extension, pos); + return zone_->New(name, extension, pos); } SuperPropertyReference* NewSuperPropertyReference(Expression* home_object, int pos) { - return new (zone_) SuperPropertyReference(home_object, pos); + return zone_->New(home_object, pos); } SuperCallReference* NewSuperCallReference(VariableProxy* new_target_var, VariableProxy* this_function_var, int pos) { - return new (zone_) - SuperCallReference(new_target_var, this_function_var, pos); + return zone_->New(new_target_var, this_function_var, + pos); } EmptyParentheses* NewEmptyParentheses(int pos) { - return new (zone_) EmptyParentheses(pos); + return zone_->New(pos); } GetTemplateObject* NewGetTemplateObject( const ZonePtrList* cooked_strings, const ZonePtrList* raw_strings, int pos) { - return new (zone_) GetTemplateObject(cooked_strings, raw_strings, pos); + return zone_->New(cooked_strings, raw_strings, pos); } TemplateLiteral* NewTemplateLiteral( const ZonePtrList* string_parts, const ZonePtrList* substitutions, int pos) { - return new (zone_) TemplateLiteral(string_parts, substitutions, pos); + return zone_->New(string_parts, substitutions, pos); } ImportCallExpression* NewImportCallExpression(Expression* args, int pos) { - return new (zone_) ImportCallExpression(args, pos); + return zone_->New(args, pos); } InitializeClassMembersStatement* NewInitializeClassMembersStatement( ZonePtrList* args, int pos) { - return new (zone_) InitializeClassMembersStatement(args, pos); + return zone_->New(args, pos); } Zone* zone() const { return zone_; } diff --git a/deps/v8/src/ast/modules.cc b/deps/v8/src/ast/modules.cc index 99371306fcf37a..08fbe761020cf5 100644 --- a/deps/v8/src/ast/modules.cc +++ b/deps/v8/src/ast/modules.cc @@ -3,9 +3,10 @@ // found in the LICENSE file. #include "src/ast/modules.h" + #include "src/ast/ast-value-factory.h" #include "src/ast/scopes.h" -#include "src/heap/off-thread-factory-inl.h" +#include "src/heap/local-factory-inl.h" #include "src/objects/module-inl.h" #include "src/objects/objects-inl.h" #include "src/parsing/pending-compilation-error-handler.h" @@ -32,7 +33,7 @@ void SourceTextModuleDescriptor::AddImport( const AstRawString* import_name, const AstRawString* local_name, const AstRawString* module_request, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone) { - Entry* entry = new (zone) Entry(loc); + Entry* entry = zone->New(loc); entry->local_name = local_name; entry->import_name = import_name; entry->module_request = AddModuleRequest(module_request, specifier_loc); @@ -43,7 +44,7 @@ void SourceTextModuleDescriptor::AddStarImport( const AstRawString* local_name, const AstRawString* module_request, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone) { - Entry* entry = new (zone) Entry(loc); + Entry* entry = zone->New(loc); entry->local_name = local_name; entry->module_request = AddModuleRequest(module_request, specifier_loc); AddNamespaceImport(entry, zone); @@ -57,7 +58,7 @@ void SourceTextModuleDescriptor::AddEmptyImport( void SourceTextModuleDescriptor::AddExport(const AstRawString* local_name, const AstRawString* export_name, Scanner::Location loc, Zone* zone) { - Entry* entry = new (zone) Entry(loc); + Entry* entry = zone->New(loc); entry->export_name = export_name; entry->local_name = local_name; AddRegularExport(entry); @@ -69,7 +70,7 @@ void SourceTextModuleDescriptor::AddExport( const Scanner::Location specifier_loc, Zone* zone) { DCHECK_NOT_NULL(import_name); DCHECK_NOT_NULL(export_name); - Entry* entry = new (zone) Entry(loc); + Entry* entry = zone->New(loc); entry->export_name = export_name; entry->import_name = import_name; entry->module_request = AddModuleRequest(module_request, specifier_loc); @@ -79,7 +80,7 @@ void SourceTextModuleDescriptor::AddExport( void SourceTextModuleDescriptor::AddStarExport( const AstRawString* module_request, const Scanner::Location loc, const Scanner::Location specifier_loc, Zone* zone) { - Entry* entry = new (zone) Entry(loc); + Entry* entry = zone->New(loc); entry->module_request = AddModuleRequest(module_request, specifier_loc); AddSpecialExport(entry, zone); } @@ -106,7 +107,7 @@ Handle SourceTextModuleDescriptor::Entry::Serialize( template Handle SourceTextModuleDescriptor::Entry::Serialize(Isolate* isolate) const; template Handle -SourceTextModuleDescriptor::Entry::Serialize(OffThreadIsolate* isolate) const; +SourceTextModuleDescriptor::Entry::Serialize(LocalIsolate* isolate) const; template Handle SourceTextModuleDescriptor::SerializeRegularExports( @@ -164,7 +165,7 @@ Handle SourceTextModuleDescriptor::SerializeRegularExports( template Handle SourceTextModuleDescriptor::SerializeRegularExports( Isolate* isolate, Zone* zone) const; template Handle SourceTextModuleDescriptor::SerializeRegularExports( - OffThreadIsolate* isolate, Zone* zone) const; + LocalIsolate* isolate, Zone* zone) const; void SourceTextModuleDescriptor::MakeIndirectExportsExplicit(Zone* zone) { for (auto it = regular_exports_.begin(); it != regular_exports_.end();) { diff --git a/deps/v8/src/ast/scopes.cc b/deps/v8/src/ast/scopes.cc index 3f0a1adbc3af95..6e0e238d33e58a 100644 --- a/deps/v8/src/ast/scopes.cc +++ b/deps/v8/src/ast/scopes.cc @@ -11,16 +11,18 @@ #include "src/base/optional.h" #include "src/builtins/accessors.h" #include "src/common/message-template.h" -#include "src/heap/off-thread-factory-inl.h" +#include "src/heap/local-factory-inl.h" #include "src/init/bootstrapper.h" #include "src/logging/counters.h" #include "src/objects/module-inl.h" #include "src/objects/objects-inl.h" #include "src/objects/scope-info.h" +#include "src/objects/string-set-inl.h" #include "src/parsing/parse-info.h" #include "src/parsing/parser.h" #include "src/parsing/preparse-data.h" #include "src/zone/zone-list-inl.h" +#include "src/zone/zone.h" namespace v8 { namespace internal { @@ -34,27 +36,34 @@ namespace internal { // use. Because a Variable holding a handle with the same location exists // this is ensured. +static_assert(sizeof(VariableMap) == (sizeof(void*) + 2 * sizeof(uint32_t) + + sizeof(ZoneAllocationPolicy)), + "Empty base optimization didn't kick in for VariableMap"); + VariableMap::VariableMap(Zone* zone) : ZoneHashMap(8, ZoneAllocationPolicy(zone)) {} +VariableMap::VariableMap(const VariableMap& other, Zone* zone) + : ZoneHashMap(other, ZoneAllocationPolicy(zone)) {} + Variable* VariableMap::Declare(Zone* zone, Scope* scope, const AstRawString* name, VariableMode mode, VariableKind kind, InitializationFlag initialization_flag, MaybeAssignedFlag maybe_assigned_flag, IsStaticFlag is_static_flag, bool* was_added) { + DCHECK_EQ(zone, allocator().zone()); // AstRawStrings are unambiguous, i.e., the same string is always represented // by the same AstRawString*. // FIXME(marja): fix the type of Lookup. - Entry* p = - ZoneHashMap::LookupOrInsert(const_cast(name), name->Hash(), - ZoneAllocationPolicy(zone)); + Entry* p = ZoneHashMap::LookupOrInsert(const_cast(name), + name->Hash()); *was_added = p->value == nullptr; if (*was_added) { // The variable has not been declared yet -> insert it. DCHECK_EQ(name, p->key); Variable* variable = - new (zone) Variable(scope, name, mode, kind, initialization_flag, + zone->New(scope, name, mode, kind, initialization_flag, maybe_assigned_flag, is_static_flag); p->value = variable; } @@ -66,11 +75,10 @@ void VariableMap::Remove(Variable* var) { ZoneHashMap::Remove(const_cast(name), name->Hash()); } -void VariableMap::Add(Zone* zone, Variable* var) { +void VariableMap::Add(Variable* var) { const AstRawString* name = var->raw_name(); - Entry* p = - ZoneHashMap::LookupOrInsert(const_cast(name), name->Hash(), - ZoneAllocationPolicy(zone)); + Entry* p = ZoneHashMap::LookupOrInsert(const_cast(name), + name->Hash()); DCHECK_NULL(p->value); DCHECK_EQ(name, p->key); p->value = var; @@ -90,18 +98,12 @@ Variable* VariableMap::Lookup(const AstRawString* name) { // Implementation of Scope Scope::Scope(Zone* zone) - : zone_(zone), - outer_scope_(nullptr), - variables_(zone), - scope_type_(SCRIPT_SCOPE) { + : outer_scope_(nullptr), variables_(zone), scope_type_(SCRIPT_SCOPE) { SetDefaults(); } Scope::Scope(Zone* zone, Scope* outer_scope, ScopeType scope_type) - : zone_(zone), - outer_scope_(outer_scope), - variables_(zone), - scope_type_(scope_type) { + : outer_scope_(outer_scope), variables_(zone), scope_type_(scope_type) { DCHECK_NE(SCRIPT_SCOPE, scope_type); SetDefaults(); set_language_mode(outer_scope->language_mode()); @@ -138,8 +140,8 @@ DeclarationScope::DeclarationScope(Zone* zone, Scope* outer_scope, ModuleScope::ModuleScope(DeclarationScope* script_scope, AstValueFactory* avfactory) : DeclarationScope(avfactory->zone(), script_scope, MODULE_SCOPE, kModule), - module_descriptor_(new (avfactory->zone()) - SourceTextModuleDescriptor(avfactory->zone())) { + module_descriptor_(avfactory->zone()->New( + avfactory->zone())) { set_language_mode(LanguageMode::kStrict); DeclareThis(avfactory); } @@ -192,8 +194,7 @@ ClassScope::ClassScope(Isolate* isolate, Zone* zone, } Scope::Scope(Zone* zone, ScopeType scope_type, Handle scope_info) - : zone_(zone), - outer_scope_(nullptr), + : outer_scope_(nullptr), variables_(zone), scope_info_(scope_info), scope_type_(scope_type) { @@ -226,8 +227,7 @@ DeclarationScope::DeclarationScope(Zone* zone, ScopeType scope_type, Scope::Scope(Zone* zone, const AstRawString* catch_variable_name, MaybeAssignedFlag maybe_assigned, Handle scope_info) - : zone_(zone), - outer_scope_(nullptr), + : outer_scope_(nullptr), variables_(zone), scope_info_(scope_info), scope_type_(CATCH_SCOPE) { @@ -357,13 +357,13 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone, while (!scope_info.is_null()) { if (scope_info.scope_type() == WITH_SCOPE) { if (scope_info.IsDebugEvaluateScope()) { - outer_scope = new (zone) - DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info, isolate)); + outer_scope = zone->New(zone, FUNCTION_SCOPE, + handle(scope_info, isolate)); outer_scope->set_is_debug_evaluate_scope(); } else { // For scope analysis, debug-evaluate is equivalent to a with scope. outer_scope = - new (zone) Scope(zone, WITH_SCOPE, handle(scope_info, isolate)); + zone->New(zone, WITH_SCOPE, handle(scope_info, isolate)); } } else if (scope_info.scope_type() == SCRIPT_SCOPE) { @@ -377,28 +377,28 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone, DCHECK(!scope_info.HasOuterScopeInfo()); break; } else if (scope_info.scope_type() == FUNCTION_SCOPE) { - outer_scope = new (zone) - DeclarationScope(zone, FUNCTION_SCOPE, handle(scope_info, isolate)); + outer_scope = zone->New(zone, FUNCTION_SCOPE, + handle(scope_info, isolate)); if (scope_info.IsAsmModule()) { outer_scope->AsDeclarationScope()->set_is_asm_module(); } } else if (scope_info.scope_type() == EVAL_SCOPE) { - outer_scope = new (zone) - DeclarationScope(zone, EVAL_SCOPE, handle(scope_info, isolate)); + outer_scope = zone->New(zone, EVAL_SCOPE, + handle(scope_info, isolate)); } else if (scope_info.scope_type() == CLASS_SCOPE) { - outer_scope = new (zone) ClassScope(isolate, zone, ast_value_factory, + outer_scope = zone->New(isolate, zone, ast_value_factory, handle(scope_info, isolate)); } else if (scope_info.scope_type() == BLOCK_SCOPE) { if (scope_info.is_declaration_scope()) { - outer_scope = new (zone) - DeclarationScope(zone, BLOCK_SCOPE, handle(scope_info, isolate)); + outer_scope = zone->New(zone, BLOCK_SCOPE, + handle(scope_info, isolate)); } else { outer_scope = - new (zone) Scope(zone, BLOCK_SCOPE, handle(scope_info, isolate)); + zone->New(zone, BLOCK_SCOPE, handle(scope_info, isolate)); } } else if (scope_info.scope_type() == MODULE_SCOPE) { - outer_scope = new (zone) - ModuleScope(isolate, handle(scope_info, isolate), ast_value_factory); + outer_scope = zone->New(isolate, handle(scope_info, isolate), + ast_value_factory); } else { DCHECK_EQ(scope_info.scope_type(), CATCH_SCOPE); DCHECK_EQ(scope_info.ContextLocalCount(), 1); @@ -407,9 +407,9 @@ Scope* Scope::DeserializeScopeChain(Isolate* isolate, Zone* zone, String name = scope_info.ContextLocalName(0); MaybeAssignedFlag maybe_assigned = scope_info.ContextLocalMaybeAssignedFlag(0); - outer_scope = new (zone) - Scope(zone, ast_value_factory->GetString(handle(name, isolate)), - maybe_assigned, handle(scope_info, isolate)); + outer_scope = zone->New( + zone, ast_value_factory->GetString(handle(name, isolate)), + maybe_assigned, handle(scope_info, isolate)); } if (deserialization_mode == DeserializationMode::kScopesOnly) { outer_scope->scope_info_ = Handle::null(); @@ -621,12 +621,12 @@ void DeclarationScope::DeclareThis(AstValueFactory* ast_value_factory) { bool derived_constructor = IsDerivedConstructor(function_kind_); - receiver_ = new (zone()) - Variable(this, ast_value_factory->this_string(), - derived_constructor ? VariableMode::kConst : VariableMode::kVar, - THIS_VARIABLE, - derived_constructor ? kNeedsInitialization : kCreatedInitialized, - kNotAssigned); + receiver_ = zone()->New( + this, ast_value_factory->this_string(), + derived_constructor ? VariableMode::kConst : VariableMode::kVar, + THIS_VARIABLE, + derived_constructor ? kNeedsInitialization : kCreatedInitialized, + kNotAssigned); } void DeclarationScope::DeclareArguments(AstValueFactory* ast_value_factory) { @@ -677,12 +677,12 @@ Variable* DeclarationScope::DeclareFunctionVar(const AstRawString* name, DCHECK_NULL(cache->variables_.Lookup(name)); VariableKind kind = is_sloppy(language_mode()) ? SLOPPY_FUNCTION_NAME_VARIABLE : NORMAL_VARIABLE; - function_ = new (zone()) - Variable(this, name, VariableMode::kConst, kind, kCreatedInitialized); + function_ = zone()->New(this, name, VariableMode::kConst, kind, + kCreatedInitialized); if (sloppy_eval_can_extend_vars()) { cache->NonLocal(name, VariableMode::kDynamic); } else { - cache->variables_.Add(zone(), function_); + cache->variables_.Add(function_); } return function_; } @@ -1131,7 +1131,7 @@ Variable* Scope::NewTemporary(const AstRawString* name) { Variable* Scope::NewTemporary(const AstRawString* name, MaybeAssignedFlag maybe_assigned) { DeclarationScope* scope = GetClosureScope(); - Variable* var = new (zone()) Variable(scope, name, VariableMode::kTemporary, + Variable* var = zone()->New(scope, name, VariableMode::kTemporary, NORMAL_VARIABLE, kCreatedInitialized); scope->AddLocal(var); if (maybe_assigned == kMaybeAssigned) var->SetMaybeAssigned(); @@ -1514,7 +1514,7 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory, DCHECK(is_function_scope()); // Reset all non-trivial members. - params_.Clear(); + params_.DropAndClear(); decls_.Clear(); locals_.Clear(); inner_scope_ = nullptr; @@ -1524,21 +1524,22 @@ void DeclarationScope::ResetAfterPreparsing(AstValueFactory* ast_value_factory, has_rest_ = false; function_ = nullptr; - DCHECK_NE(zone_, ast_value_factory->zone()); - zone_->ReleaseMemory(); + DCHECK_NE(zone(), ast_value_factory->zone()); + // Make sure this scope and zone aren't used for allocation anymore. + { + // Get the zone, while variables_ is still valid + Zone* zone = this->zone(); + variables_.Invalidate(); + zone->ReleaseMemory(); + } if (aborted) { // Prepare scope for use in the outer zone. - zone_ = ast_value_factory->zone(); - variables_.Reset(ZoneAllocationPolicy(zone_)); + variables_ = VariableMap(ast_value_factory->zone()); if (!IsArrowFunction(function_kind_)) { has_simple_parameters_ = true; DeclareDefaultFunctionVariables(ast_value_factory); } - } else { - // Make sure this scope isn't used for allocation anymore. - zone_ = nullptr; - variables_.Invalidate(); } #ifdef DEBUG @@ -2476,8 +2477,8 @@ template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void Scope:: AllocateScopeInfosRecursively(Isolate* isolate, MaybeHandle outer_scope); template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void Scope:: - AllocateScopeInfosRecursively( - OffThreadIsolate* isolate, MaybeHandle outer_scope); + AllocateScopeInfosRecursively( + LocalIsolate* isolate, MaybeHandle outer_scope); void DeclarationScope::RecalcPrivateNameContextChain() { // The outermost scope in a class heritage expression is marked to skip the @@ -2556,10 +2557,10 @@ void DeclarationScope::AllocateScopeInfos(ParseInfo* info, } } -template V8_EXPORT_PRIVATE void DeclarationScope::AllocateScopeInfos( +template V8_EXPORT_PRIVATE void DeclarationScope::AllocateScopeInfos( ParseInfo* info, Isolate* isolate); -template V8_EXPORT_PRIVATE void DeclarationScope::AllocateScopeInfos< - OffThreadIsolate>(ParseInfo* info, OffThreadIsolate* isolate); +template V8_EXPORT_PRIVATE void DeclarationScope::AllocateScopeInfos( + ParseInfo* info, LocalIsolate* isolate); int Scope::ContextLocalCount() const { if (num_heap_slots() == 0) return 0; diff --git a/deps/v8/src/ast/scopes.h b/deps/v8/src/ast/scopes.h index 11f44bb498436f..a5f4523670a170 100644 --- a/deps/v8/src/ast/scopes.h +++ b/deps/v8/src/ast/scopes.h @@ -6,6 +6,7 @@ #define V8_AST_SCOPES_H_ #include + #include "src/ast/ast.h" #include "src/base/compiler-specific.h" #include "src/base/hashmap.h" @@ -15,6 +16,7 @@ #include "src/objects/objects.h" #include "src/utils/pointer-with-payload.h" #include "src/utils/utils.h" +#include "src/zone/zone-hashmap.h" #include "src/zone/zone.h" namespace v8 { @@ -39,6 +41,15 @@ using UnresolvedList = class VariableMap : public ZoneHashMap { public: explicit VariableMap(Zone* zone); + VariableMap(const VariableMap& other, Zone* zone); + + VariableMap(VariableMap&& other) V8_NOEXCEPT : ZoneHashMap(std::move(other)) { + } + + VariableMap& operator=(VariableMap&& other) V8_NOEXCEPT { + static_cast(*this) = std::move(other); + return *this; + } Variable* Declare(Zone* zone, Scope* scope, const AstRawString* name, VariableMode mode, VariableKind kind, @@ -48,7 +59,9 @@ class VariableMap : public ZoneHashMap { V8_EXPORT_PRIVATE Variable* Lookup(const AstRawString* name); void Remove(Variable* var); - void Add(Zone* zone, Variable* var); + void Add(Variable* var); + + Zone* zone() const { return allocator().zone(); } }; class Scope; @@ -102,6 +115,10 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { } inline explicit Snapshot(Scope* scope); + // Disallow copy and move. + Snapshot(const Snapshot&) = delete; + Snapshot(Snapshot&&) = delete; + ~Snapshot() { // If we're still active, there was no arrow function. In that case outer // calls eval if it already called eval before this snapshot started, or @@ -142,10 +159,6 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { Scope* top_inner_scope_; UnresolvedList::Iterator top_unresolved_; base::ThreadedList::Iterator top_local_; - - // Disallow copy and move. - Snapshot(const Snapshot&) = delete; - Snapshot(Snapshot&&) = delete; }; enum class DeserializationMode { kIncludingVariables, kScopesOnly }; @@ -166,7 +179,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { // Assumes outer_scope_ is non-null. void ReplaceOuterScope(Scope* outer_scope); - Zone* zone() const { return zone_; } + Zone* zone() const { return variables_.zone(); } void SetMustUsePreparseData() { if (must_use_preparsed_scope_data_) { @@ -697,8 +710,7 @@ class V8_EXPORT_PRIVATE Scope : public NON_EXPORTED_BASE(ZoneObject) { friend class DeclarationScope; friend class ClassScope; friend class ScopeTestHelper; - - Zone* zone_; + friend Zone; // Scope tree. Scope* outer_scope_; // the immediately enclosing outer scope, or nullptr @@ -896,11 +908,13 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { } bool is_being_lazily_parsed() const { return is_being_lazily_parsed_; } #endif + void set_zone(Zone* zone) { #ifdef DEBUG needs_migration_ = true; #endif - zone_ = zone; + // Migrate variables_' backing store to new zone. + variables_ = VariableMap(variables_, zone); } // --------------------------------------------------------------------------- @@ -1258,7 +1272,7 @@ class V8_EXPORT_PRIVATE DeclarationScope : public Scope { V8_INLINE RareData* EnsureRareData() { if (rare_data_ == nullptr) { - rare_data_ = new (zone_) RareData; + rare_data_ = zone()->New(); } return rare_data_; } @@ -1439,8 +1453,8 @@ class V8_EXPORT_PRIVATE ClassScope : public Scope { } V8_INLINE RareData* EnsureRareData() { if (GetRareData() == nullptr) { - rare_data_and_is_parsing_heritage_.SetPointer(new (zone_) - RareData(zone_)); + rare_data_and_is_parsing_heritage_.SetPointer( + zone()->New(zone())); } return GetRareData(); } diff --git a/deps/v8/src/base/OWNERS b/deps/v8/src/base/OWNERS index 3654b400adad26..67dcc1cd98f358 100644 --- a/deps/v8/src/base/OWNERS +++ b/deps/v8/src/base/OWNERS @@ -1,4 +1,5 @@ clemensb@chromium.org +ishell@chromium.org mlippautz@chromium.org # COMPONENT: Blink>JavaScript diff --git a/deps/v8/src/base/address-region.h b/deps/v8/src/base/address-region.h index 44151606c0cc63..010a123dc3ed9d 100644 --- a/deps/v8/src/base/address-region.h +++ b/deps/v8/src/base/address-region.h @@ -25,9 +25,9 @@ class AddressRegion { using Address = uintptr_t; - AddressRegion() = default; + constexpr AddressRegion() = default; - AddressRegion(Address address, size_t size) + constexpr AddressRegion(Address address, size_t size) : address_(address), size_(size) {} Address begin() const { return address_; } diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h index 11c41545ab902f..01a01c5ff4c33a 100644 --- a/deps/v8/src/base/atomicops.h +++ b/deps/v8/src/base/atomicops.h @@ -65,6 +65,8 @@ using AtomicWord = intptr_t; // I.e. replace |*ptr| with |new_value| if |*ptr| used to be |old_value|. // Always return the value of |*ptr| before the operation. // Acquire, Relaxed, Release correspond to standard C++ memory orders. +Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value, + Atomic8 new_value); Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr, Atomic16 old_value, Atomic16 new_value); Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, diff --git a/deps/v8/src/base/atomicops_internals_portable.h b/deps/v8/src/base/atomicops_internals_portable.h index 1f89f0a6b37c46..9abaf17c066b43 100644 --- a/deps/v8/src/base/atomicops_internals_portable.h +++ b/deps/v8/src/base/atomicops_internals_portable.h @@ -50,6 +50,14 @@ inline void SeqCst_MemoryFence() { #endif } +inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value, + Atomic8 new_value) { + bool result = __atomic_compare_exchange_n(ptr, &old_value, new_value, false, + __ATOMIC_RELAXED, __ATOMIC_RELAXED); + USE(result); // Make gcc compiler happy. + return old_value; +} + inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr, Atomic16 old_value, Atomic16 new_value) { __atomic_compare_exchange_n(ptr, &old_value, new_value, false, diff --git a/deps/v8/src/base/atomicops_internals_std.h b/deps/v8/src/base/atomicops_internals_std.h index 8ea1019202959a..1b423c842c6cf8 100644 --- a/deps/v8/src/base/atomicops_internals_std.h +++ b/deps/v8/src/base/atomicops_internals_std.h @@ -28,6 +28,14 @@ inline void SeqCst_MemoryFence() { std::atomic_thread_fence(std::memory_order_seq_cst); } +inline Atomic8 Relaxed_CompareAndSwap(volatile Atomic8* ptr, Atomic8 old_value, + Atomic8 new_value) { + std::atomic_compare_exchange_strong_explicit( + helper::to_std_atomic(ptr), &old_value, new_value, + std::memory_order_relaxed, std::memory_order_relaxed); + return old_value; +} + inline Atomic16 Relaxed_CompareAndSwap(volatile Atomic16* ptr, Atomic16 old_value, Atomic16 new_value) { std::atomic_compare_exchange_strong_explicit( diff --git a/deps/v8/src/base/bit-field.h b/deps/v8/src/base/bit-field.h index 9cebac32de49aa..ca5fb459210ec2 100644 --- a/deps/v8/src/base/bit-field.h +++ b/deps/v8/src/base/bit-field.h @@ -52,9 +52,7 @@ class BitField final { // Returns a type U with the bit field value encoded. static constexpr U encode(T value) { -#if V8_HAS_CXX14_CONSTEXPR - DCHECK(is_valid(value)); -#endif + CONSTEXPR_DCHECK(is_valid(value)); return static_cast(value) << kShift; } diff --git a/deps/v8/src/base/bits.h b/deps/v8/src/base/bits.h index b74b98e1069bf9..cf4b77fa1862ff 100644 --- a/deps/v8/src/base/bits.h +++ b/deps/v8/src/base/bits.h @@ -32,22 +32,27 @@ constexpr inline return sizeof(T) == 8 ? __builtin_popcountll(static_cast(value)) : __builtin_popcount(static_cast(value)); #else + // Fall back to divide-and-conquer popcount (see "Hacker's Delight" by Henry + // S. Warren, Jr.), chapter 5-1. constexpr uint64_t mask[] = {0x5555555555555555, 0x3333333333333333, 0x0f0f0f0f0f0f0f0f}; - // Start with 1 bit wide buckets of [0,1]. + // Start with 64 buckets of 1 bits, holding values from [0,1]. value = ((value >> 1) & mask[0]) + (value & mask[0]); - // Having 2 bit wide buckets of [0,2] now. + // Having 32 buckets of 2 bits, holding values from [0,2] now. value = ((value >> 2) & mask[1]) + (value & mask[1]); - // Having 4 bit wide buckets of [0,4] now. - value = (value >> 4) + value; - // Having 4 bit wide buckets of [0,8] now. - if (sizeof(T) > 1) - value = ((value >> (sizeof(T) > 1 ? 8 : 0)) & mask[2]) + (value & mask[2]); - // Having 8 bit wide buckets of [0,16] now. + // Having 16 buckets of 4 bits, holding values from [0,4] now. + value = ((value >> 4) & mask[2]) + (value & mask[2]); + // Having 8 buckets of 8 bits, holding values from [0,8] now. + // From this point on, the buckets are bigger than the number of bits + // required to hold the values, and the buckets are bigger the maximum + // result, so there's no need to mask value anymore, since there's no + // more risk of overflow between buckets. + if (sizeof(T) > 1) value = (value >> (sizeof(T) > 1 ? 8 : 0)) + value; + // Having 4 buckets of 16 bits, holding values from [0,16] now. if (sizeof(T) > 2) value = (value >> (sizeof(T) > 2 ? 16 : 0)) + value; - // Having 8 bit wide buckets of [0,32] now. + // Having 2 buckets of 32 bits, holding values from [0,32] now. if (sizeof(T) > 4) value = (value >> (sizeof(T) > 4 ? 32 : 0)) + value; - // Having 8 bit wide buckets of [0,64] now. + // Having 1 buckets of 64 bits, holding values from [0,64] now. return static_cast(value & 0xff); #endif } @@ -140,9 +145,7 @@ constexpr inline bool IsPowerOfTwo(T value) { template ::value>::type> inline constexpr int WhichPowerOfTwo(T value) { -#if V8_HAS_CXX14_CONSTEXPR - DCHECK(IsPowerOfTwo(value)); -#endif + CONSTEXPR_DCHECK(IsPowerOfTwo(value)); #if V8_HAS_BUILTIN_CTZ STATIC_ASSERT(sizeof(T) <= 8); return sizeof(T) == 8 ? __builtin_ctzll(static_cast(value)) diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc index 9b01f89428b657..e6c1a61bcbe962 100644 --- a/deps/v8/src/base/bounded-page-allocator.cc +++ b/deps/v8/src/base/bounded-page-allocator.cc @@ -59,6 +59,26 @@ bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size, return true; } +bool BoundedPageAllocator::ReserveForSharedMemoryMapping(void* ptr, + size_t size) { + Address address = reinterpret_cast
(ptr); + CHECK(IsAligned(address, allocate_page_size_)); + CHECK(IsAligned(size, commit_page_size_)); + CHECK(region_allocator_.contains(address, size)); + + // Region allocator requires page size rather than commit size so just over- + // allocate there since any extra space couldn't be used anyway. + size_t region_size = RoundUp(size, allocate_page_size_); + if (!region_allocator_.AllocateRegionAt( + address, region_size, RegionAllocator::RegionState::kExcluded)) { + return false; + } + + CHECK(page_allocator_->SetPermissions(ptr, size, + PageAllocator::Permission::kNoAccess)); + return true; +} + bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) { MutexGuard guard(&mutex_); diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h index f1b85f23aa196c..d09aecee05c6f0 100644 --- a/deps/v8/src/base/bounded-page-allocator.h +++ b/deps/v8/src/base/bounded-page-allocator.h @@ -56,6 +56,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator { void* AllocatePages(void* hint, size_t size, size_t alignment, Permission access) override; + bool ReserveForSharedMemoryMapping(void* address, size_t size) override; + // Allocates pages at given address, returns true on success. bool AllocatePagesAt(Address address, size_t size, Permission access); diff --git a/deps/v8/src/base/bounds.h b/deps/v8/src/base/bounds.h index 236e29b7ccd945..fb8c968d660eca 100644 --- a/deps/v8/src/base/bounds.h +++ b/deps/v8/src/base/bounds.h @@ -15,9 +15,7 @@ namespace base { // branch. template inline constexpr bool IsInRange(T value, U lower_limit, U higher_limit) { -#if V8_HAS_CXX14_CONSTEXPR - DCHECK_LE(lower_limit, higher_limit); -#endif + CONSTEXPR_DCHECK(lower_limit <= higher_limit); STATIC_ASSERT(sizeof(U) <= sizeof(T)); using unsigned_T = typename std::make_unsigned::type; // Use static_cast to support enum classes. @@ -29,7 +27,9 @@ inline constexpr bool IsInRange(T value, U lower_limit, U higher_limit) { // Checks if [index, index+length) is in range [0, max). Note that this check // works even if {index+length} would wrap around. -inline constexpr bool IsInBounds(size_t index, size_t length, size_t max) { +template ::value>::type> +inline constexpr bool IsInBounds(T index, T length, T max) { return length <= max && index <= (max - length); } diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h index 8d142c456c9cc1..ad287c92906485 100644 --- a/deps/v8/src/base/build_config.h +++ b/deps/v8/src/base/build_config.h @@ -199,6 +199,8 @@ #else #define V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK false #endif +constexpr int kReturnAddressStackSlotCount = + V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0; // Number of bits to represent the page size for paged spaces. #if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h index 5d68f7e11b01b1..49ce128a4a3f2c 100644 --- a/deps/v8/src/base/compiler-specific.h +++ b/deps/v8/src/base/compiler-specific.h @@ -105,4 +105,31 @@ #define V8_NOEXCEPT #endif +// Specify memory alignment for structs, classes, etc. +// Use like: +// class ALIGNAS(16) MyClass { ... } +// ALIGNAS(16) int array[4]; +// +// In most places you can use the C++11 keyword "alignas", which is preferred. +// +// But compilers have trouble mixing __attribute__((...)) syntax with +// alignas(...) syntax. +// +// Doesn't work in clang or gcc: +// struct alignas(16) __attribute__((packed)) S { char c; }; +// Works in clang but not gcc: +// struct __attribute__((packed)) alignas(16) S2 { char c; }; +// Works in clang and gcc: +// struct alignas(16) S3 { char c; } __attribute__((packed)); +// +// There are also some attributes that must be specified *before* a class +// definition: visibility (used for exporting functions/classes) is one of +// these attributes. This means that it is not possible to use alignas() with a +// class that is marked as exported. +#if defined(V8_CC_MSVC) +#define ALIGNAS(byte_alignment) __declspec(align(byte_alignment)) +#else +#define ALIGNAS(byte_alignment) __attribute__((aligned(byte_alignment))) +#endif + #endif // V8_BASE_COMPILER_SPECIFIC_H_ diff --git a/deps/v8/src/base/cpu.cc b/deps/v8/src/base/cpu.cc index bbdae525e30b39..bae1afe7d1d8ce 100644 --- a/deps/v8/src/base/cpu.cc +++ b/deps/v8/src/base/cpu.cc @@ -75,7 +75,8 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { #endif // !V8_LIBC_MSVCRT -#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 +#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS || \ + V8_HOST_ARCH_MIPS64 #if V8_OS_LINUX @@ -108,6 +109,51 @@ static V8_INLINE void __cpuid(int cpu_info[4], int info_type) { #define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) #define HWCAP_LPAE (1 << 20) +#endif // V8_HOST_ARCH_ARM + +#if V8_HOST_ARCH_ARM64 + +// See kernel header. +/* + * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP + */ +#define HWCAP_FP (1 << 0) +#define HWCAP_ASIMD (1 << 1) +#define HWCAP_EVTSTRM (1 << 2) +#define HWCAP_AES (1 << 3) +#define HWCAP_PMULL (1 << 4) +#define HWCAP_SHA1 (1 << 5) +#define HWCAP_SHA2 (1 << 6) +#define HWCAP_CRC32 (1 << 7) +#define HWCAP_ATOMICS (1 << 8) +#define HWCAP_FPHP (1 << 9) +#define HWCAP_ASIMDHP (1 << 10) +#define HWCAP_CPUID (1 << 11) +#define HWCAP_ASIMDRDM (1 << 12) +#define HWCAP_JSCVT (1 << 13) +#define HWCAP_FCMA (1 << 14) +#define HWCAP_LRCPC (1 << 15) +#define HWCAP_DCPOP (1 << 16) +#define HWCAP_SHA3 (1 << 17) +#define HWCAP_SM3 (1 << 18) +#define HWCAP_SM4 (1 << 19) +#define HWCAP_ASIMDDP (1 << 20) +#define HWCAP_SHA512 (1 << 21) +#define HWCAP_SVE (1 << 22) +#define HWCAP_ASIMDFHM (1 << 23) +#define HWCAP_DIT (1 << 24) +#define HWCAP_USCAT (1 << 25) +#define HWCAP_ILRCPC (1 << 26) +#define HWCAP_FLAGM (1 << 27) +#define HWCAP_SSBS (1 << 28) +#define HWCAP_SB (1 << 29) +#define HWCAP_PACA (1 << 30) +#define HWCAP_PACG (1UL << 31) + +#endif // V8_HOST_ARCH_ARM64 + +#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 + static uint32_t ReadELFHWCaps() { uint32_t result = 0; #if V8_GLIBC_PREREQ(2, 16) @@ -136,7 +182,7 @@ static uint32_t ReadELFHWCaps() { return result; } -#endif // V8_HOST_ARCH_ARM +#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 #if V8_HOST_ARCH_MIPS int __detect_fp64_mode(void) { @@ -298,7 +344,8 @@ static bool HasListItem(const char* list, const char* item) { #endif // V8_OS_LINUX -#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 +#endif // V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || + // V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64 CPU::CPU() : stepping_(0), @@ -337,6 +384,7 @@ CPU::CPU() has_vfp_(false), has_vfp3_(false), has_vfp3_d32_(false), + has_jscvt_(false), is_fp64_mode_(false), has_non_stop_time_stamp_counter_(false), has_msa_(false) { @@ -609,6 +657,19 @@ CPU::CPU() // Windows makes high-resolution thread timing information available in // user-space. has_non_stop_time_stamp_counter_ = true; + +#elif V8_OS_LINUX + // Try to extract the list of CPU features from ELF hwcaps. + uint32_t hwcaps = ReadELFHWCaps(); + if (hwcaps != 0) { + has_jscvt_ = (hwcaps & HWCAP_JSCVT) != 0; + } else { + // Try to fallback to "Features" CPUInfo field + CPUInfo cpu_info; + char* features = cpu_info.ExtractField("Features"); + has_jscvt_ = HasListItem(features, "jscvt"); + delete[] features; + } #endif // V8_OS_WIN #elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64 diff --git a/deps/v8/src/base/cpu.h b/deps/v8/src/base/cpu.h index 4b4becfa204937..8cec23c8e8e4bc 100644 --- a/deps/v8/src/base/cpu.h +++ b/deps/v8/src/base/cpu.h @@ -110,6 +110,7 @@ class V8_BASE_EXPORT CPU final { bool has_vfp() const { return has_vfp_; } bool has_vfp3() const { return has_vfp3_; } bool has_vfp3_d32() const { return has_vfp3_d32_; } + bool has_jscvt() const { return has_jscvt_; } // mips features bool is_fp64_mode() const { return is_fp64_mode_; } @@ -153,6 +154,7 @@ class V8_BASE_EXPORT CPU final { bool has_vfp_; bool has_vfp3_; bool has_vfp3_d32_; + bool has_jscvt_; bool is_fp64_mode_; bool has_non_stop_time_stamp_counter_; bool has_msa_; diff --git a/deps/v8/src/base/enum-set.h b/deps/v8/src/base/enum-set.h index 927a8f87fe0ce6..2415f1c500bd3f 100644 --- a/deps/v8/src/base/enum-set.h +++ b/deps/v8/src/base/enum-set.h @@ -63,9 +63,7 @@ class EnumSet { explicit constexpr EnumSet(T bits) : bits_(bits) {} static constexpr T Mask(E element) { -#if V8_HAS_CXX14_CONSTEXPR - DCHECK_GT(sizeof(T) * 8, static_cast(element)); -#endif + CONSTEXPR_DCHECK(sizeof(T) * 8 > static_cast(element)); return T{1} << static_cast::type>(element); } diff --git a/deps/v8/src/base/flags.h b/deps/v8/src/base/flags.h index c2b7952260a5b5..bbc204568228b8 100644 --- a/deps/v8/src/base/flags.h +++ b/deps/v8/src/base/flags.h @@ -89,50 +89,40 @@ class Flags final { mask_type mask_; }; -#define DEFINE_OPERATORS_FOR_FLAGS(Type) \ - inline Type operator&( \ - Type::flag_type lhs, \ - Type::flag_type rhs)ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT; \ - inline Type operator&(Type::flag_type lhs, Type::flag_type rhs) { \ - return Type(lhs) & rhs; \ - } \ - inline Type operator&( \ - Type::flag_type lhs, \ - const Type& rhs)ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT; \ - inline Type operator&(Type::flag_type lhs, const Type& rhs) { \ - return rhs & lhs; \ - } \ - inline void operator&(Type::flag_type lhs, \ - Type::mask_type rhs)ALLOW_UNUSED_TYPE; \ - inline void operator&(Type::flag_type lhs, Type::mask_type rhs) {} \ - inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) \ - ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT; \ - inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) { \ - return Type(lhs) | rhs; \ - } \ - inline Type operator|(Type::flag_type lhs, const Type& rhs) \ - ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT; \ - inline Type operator|(Type::flag_type lhs, const Type& rhs) { \ - return rhs | lhs; \ - } \ - inline void operator|(Type::flag_type lhs, Type::mask_type rhs) \ - ALLOW_UNUSED_TYPE; \ - inline void operator|(Type::flag_type lhs, Type::mask_type rhs) {} \ - inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) \ - ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT; \ - inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) { \ - return Type(lhs) ^ rhs; \ - } \ - inline Type operator^(Type::flag_type lhs, const Type& rhs) \ - ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT; \ - inline Type operator^(Type::flag_type lhs, const Type& rhs) { \ - return rhs ^ lhs; \ - } \ - inline void operator^(Type::flag_type lhs, Type::mask_type rhs) \ - ALLOW_UNUSED_TYPE; \ - inline void operator^(Type::flag_type lhs, Type::mask_type rhs) {} \ - inline Type operator~(Type::flag_type val)ALLOW_UNUSED_TYPE; \ - inline Type operator~(Type::flag_type val) { return ~Type(val); } +#define DEFINE_OPERATORS_FOR_FLAGS(Type) \ + ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \ + Type::flag_type lhs, Type::flag_type rhs) { \ + return Type(lhs) & rhs; \ + } \ + ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \ + Type::flag_type lhs, const Type& rhs) { \ + return rhs & lhs; \ + } \ + ALLOW_UNUSED_TYPE inline void operator&(Type::flag_type lhs, \ + Type::mask_type rhs) {} \ + ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \ + Type::flag_type lhs, Type::flag_type rhs) { \ + return Type(lhs) | rhs; \ + } \ + ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \ + Type::flag_type lhs, const Type& rhs) { \ + return rhs | lhs; \ + } \ + ALLOW_UNUSED_TYPE inline void operator|(Type::flag_type lhs, \ + Type::mask_type rhs) {} \ + ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \ + Type::flag_type lhs, Type::flag_type rhs) { \ + return Type(lhs) ^ rhs; \ + } \ + ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \ + Type::flag_type lhs, const Type& rhs) { \ + return rhs ^ lhs; \ + } \ + ALLOW_UNUSED_TYPE inline void operator^(Type::flag_type lhs, \ + Type::mask_type rhs) {} \ + ALLOW_UNUSED_TYPE inline constexpr Type operator~(Type::flag_type val) { \ + return ~Type(val); \ + } } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/hashmap.h b/deps/v8/src/base/hashmap.h index 4ad946d0ddb132..2b40b329b8e819 100644 --- a/deps/v8/src/base/hashmap.h +++ b/deps/v8/src/base/hashmap.h @@ -20,8 +20,14 @@ namespace base { class DefaultAllocationPolicy { public: - V8_INLINE void* New(size_t size) { return malloc(size); } - V8_INLINE static void Delete(void* p) { free(p); } + template + V8_INLINE T* NewArray(size_t length) { + return static_cast(malloc(length * sizeof(T))); + } + template + V8_INLINE void DeleteArray(T* p, size_t length) { + free(p); + } }; template @@ -36,17 +42,21 @@ class TemplateHashMapImpl { // initial_capacity is the size of the initial hash map; // it must be a power of 2 (and thus must not be 0). - TemplateHashMapImpl(uint32_t capacity = kDefaultHashMapCapacity, - MatchFun match = MatchFun(), - AllocationPolicy allocator = AllocationPolicy()); + explicit TemplateHashMapImpl(uint32_t capacity = kDefaultHashMapCapacity, + MatchFun match = MatchFun(), + AllocationPolicy allocator = AllocationPolicy()); // Clones the given hashmap and creates a copy with the same entries. - TemplateHashMapImpl(const TemplateHashMapImpl* original, - AllocationPolicy allocator = AllocationPolicy()); + explicit TemplateHashMapImpl(const TemplateHashMapImpl* original, + AllocationPolicy allocator = AllocationPolicy()); + + TemplateHashMapImpl(TemplateHashMapImpl&& other) V8_NOEXCEPT = default; ~TemplateHashMapImpl(); + TemplateHashMapImpl& operator=(TemplateHashMapImpl&& other) + V8_NOEXCEPT = default; + // If an entry with matching key is found, returns that entry. // Otherwise, nullptr is returned. Entry* Lookup(const Key& key, uint32_t hash) const; @@ -54,18 +64,15 @@ class TemplateHashMapImpl { // If an entry with matching key is found, returns that entry. // If no matching entry is found, a new entry is inserted with // corresponding key, key hash, and default initialized value. - Entry* LookupOrInsert(const Key& key, uint32_t hash, - AllocationPolicy allocator = AllocationPolicy()); + Entry* LookupOrInsert(const Key& key, uint32_t hash); // If an entry with matching key is found, returns that entry. // If no matching entry is found, a new entry is inserted with // corresponding key, key hash, and value created by func. template - Entry* LookupOrInsert(const Key& key, uint32_t hash, const Func& value_func, - AllocationPolicy allocator = AllocationPolicy()); + Entry* LookupOrInsert(const Key& key, uint32_t hash, const Func& value_func); - Entry* InsertNew(const Key& key, uint32_t hash, - AllocationPolicy allocator = AllocationPolicy()); + Entry* InsertNew(const Key& key, uint32_t hash); // Removes the entry with matching key. // It returns the value of the deleted entry @@ -77,19 +84,18 @@ class TemplateHashMapImpl { // Empties the map and makes it unusable for allocation. void Invalidate() { - AllocationPolicy::Delete(map_); - map_ = nullptr; - occupancy_ = 0; - capacity_ = 0; + DCHECK_NOT_NULL(impl_.map_); + impl_.allocator().DeleteArray(impl_.map_, capacity()); + impl_ = Impl(impl_.match(), AllocationPolicy()); } // The number of (non-empty) entries in the table. - uint32_t occupancy() const { return occupancy_; } + uint32_t occupancy() const { return impl_.occupancy_; } // The capacity of the table. The implementation // makes sure that occupancy is at most 80% of // the table capacity. - uint32_t capacity() const { return capacity_; } + uint32_t capacity() const { return impl_.capacity_; } // Iteration // @@ -102,28 +108,58 @@ class TemplateHashMapImpl { Entry* Start() const; Entry* Next(Entry* entry) const; - void Reset(AllocationPolicy allocator) { - Initialize(capacity_, allocator); - occupancy_ = 0; - } + AllocationPolicy allocator() const { return impl_.allocator(); } protected: - void Initialize(uint32_t capacity, AllocationPolicy allocator); + void Initialize(uint32_t capacity); private: - Entry* map_; - uint32_t capacity_; - uint32_t occupancy_; - // TODO(leszeks): This takes up space even if it has no state, maybe replace - // with something that does the empty base optimisation e.g. std::tuple - MatchFun match_; - - Entry* map_end() const { return map_ + capacity_; } + Entry* map_end() const { return impl_.map_ + impl_.capacity_; } Entry* Probe(const Key& key, uint32_t hash) const; Entry* FillEmptyEntry(Entry* entry, const Key& key, const Value& value, - uint32_t hash, - AllocationPolicy allocator = AllocationPolicy()); - void Resize(AllocationPolicy allocator); + uint32_t hash); + void Resize(); + + // To support matcher and allocator that may not be possible to + // default-construct, we have to store their instances. Using this to store + // all internal state of the hash map and using private inheritance to store + // matcher and allocator lets us take advantage of an empty base class + // optimization to avoid extra space in the common case when MatchFun and + // AllocationPolicy have no state. + // TODO(ishell): Once we reach C++20, consider removing the Impl struct and + // adding match and allocator as [[no_unique_address]] fields. + struct Impl : private MatchFun, private AllocationPolicy { + Impl(MatchFun match, AllocationPolicy allocator) + : MatchFun(std::move(match)), AllocationPolicy(std::move(allocator)) {} + + Impl() = default; + Impl(const Impl&) V8_NOEXCEPT = default; + Impl(Impl&& other) V8_NOEXCEPT { *this = std::move(other); } + + Impl& operator=(const Impl& other) V8_NOEXCEPT = default; + Impl& operator=(Impl&& other) V8_NOEXCEPT { + MatchFun::operator=(std::move(other)); + AllocationPolicy::operator=(std::move(other)); + map_ = other.map_; + capacity_ = other.capacity_; + occupancy_ = other.occupancy_; + + other.map_ = nullptr; + other.capacity_ = 0; + other.occupancy_ = 0; + return *this; + } + + const MatchFun& match() const { return *this; } + MatchFun& match() { return *this; } + + const AllocationPolicy& allocator() const { return *this; } + AllocationPolicy& allocator() { return *this; } + + Entry* map_ = nullptr; + uint32_t capacity_ = 0; + uint32_t occupancy_ = 0; + } impl_; DISALLOW_COPY_AND_ASSIGN(TemplateHashMapImpl); }; @@ -132,28 +168,27 @@ template :: TemplateHashMapImpl(uint32_t initial_capacity, MatchFun match, AllocationPolicy allocator) - : match_(match) { - Initialize(initial_capacity, allocator); + : impl_(std::move(match), std::move(allocator)) { + Initialize(initial_capacity); } template TemplateHashMapImpl:: - TemplateHashMapImpl(const TemplateHashMapImpl* original, + TemplateHashMapImpl(const TemplateHashMapImpl* original, AllocationPolicy allocator) - : capacity_(original->capacity_), - occupancy_(original->occupancy_), - match_(original->match_) { - map_ = reinterpret_cast(allocator.New(capacity_ * sizeof(Entry))); - memcpy(map_, original->map_, capacity_ * sizeof(Entry)); + : impl_(original->impl_.match(), std::move(allocator)) { + impl_.capacity_ = original->capacity(); + impl_.occupancy_ = original->occupancy(); + impl_.map_ = impl_.allocator().template NewArray(capacity()); + memcpy(impl_.map_, original->impl_.map_, capacity() * sizeof(Entry)); } template TemplateHashMapImpl::~TemplateHashMapImpl() { - AllocationPolicy::Delete(map_); + if (impl_.map_) impl_.allocator().DeleteArray(impl_.map_, capacity()); } template typename TemplateHashMapImpl::Entry* TemplateHashMapImpl::LookupOrInsert( - const Key& key, uint32_t hash, AllocationPolicy allocator) { - return LookupOrInsert(key, hash, []() { return Value(); }, allocator); + const Key& key, uint32_t hash) { + return LookupOrInsert(key, hash, []() { return Value(); }); } template typename TemplateHashMapImpl::Entry* TemplateHashMapImpl::LookupOrInsert( - const Key& key, uint32_t hash, const Func& value_func, - AllocationPolicy allocator) { + const Key& key, uint32_t hash, const Func& value_func) { // Find a matching entry. Entry* entry = Probe(key, hash); if (entry->exists()) { return entry; } - return FillEmptyEntry(entry, key, value_func(), hash, allocator); + return FillEmptyEntry(entry, key, value_func(), hash); } template typename TemplateHashMapImpl::Entry* TemplateHashMapImpl::InsertNew( - const Key& key, uint32_t hash, AllocationPolicy allocator) { + const Key& key, uint32_t hash) { Entry* entry = Probe(key, hash); - return FillEmptyEntry(entry, key, Value(), hash, allocator); + return FillEmptyEntry(entry, key, Value(), hash); } template ::Remove( // This guarantees loop termination as there is at least one empty entry so // eventually the removed entry will have an empty entry after it. - DCHECK(occupancy_ < capacity_); + DCHECK(occupancy() < capacity()); // p is the candidate entry to clear. q is used to scan forwards. Entry* q = p; // Start at the entry to remove. @@ -232,7 +266,7 @@ Value TemplateHashMapImpl::Remove( // Move q to the next entry. q = q + 1; if (q == map_end()) { - q = map_; + q = impl_.map_; } // All entries between p and q have their initial position between p and q @@ -243,7 +277,7 @@ Value TemplateHashMapImpl::Remove( } // Find the initial position for the entry at position q. - Entry* r = map_ + (q->hash & (capacity_ - 1)); + Entry* r = impl_.map_ + (q->hash & (capacity() - 1)); // If the entry at position q has its initial position outside the range // between p and q it can be moved forward to position p and will still be @@ -256,7 +290,7 @@ Value TemplateHashMapImpl::Remove( // Clear the entry which is allowed to en emptied. p->clear(); - occupancy_--; + impl_.occupancy_--; return value; } @@ -264,17 +298,17 @@ template void TemplateHashMapImpl::Clear() { // Mark all entries as empty. - for (size_t i = 0; i < capacity_; ++i) { - map_[i].clear(); + for (size_t i = 0; i < capacity(); ++i) { + impl_.map_[i].clear(); } - occupancy_ = 0; + impl_.occupancy_ = 0; } template typename TemplateHashMapImpl::Entry* TemplateHashMapImpl::Start() const { - return Next(map_ - 1); + return Next(impl_.map_ - 1); } template ::Entry* TemplateHashMapImpl::Next( Entry* entry) const { const Entry* end = map_end(); - DCHECK(map_ - 1 <= entry && entry < end); + DCHECK(impl_.map_ - 1 <= entry && entry < end); for (entry++; entry < end; entry++) { if (entry->exists()) { return entry; @@ -297,32 +331,33 @@ template ::Entry* TemplateHashMapImpl::Probe( const Key& key, uint32_t hash) const { - DCHECK(base::bits::IsPowerOfTwo(capacity_)); - size_t i = hash & (capacity_ - 1); - DCHECK(i < capacity_); - - DCHECK(occupancy_ < capacity_); // Guarantees loop termination. - while (map_[i].exists() && !match_(hash, map_[i].hash, key, map_[i].key)) { - i = (i + 1) & (capacity_ - 1); + DCHECK(base::bits::IsPowerOfTwo(capacity())); + size_t i = hash & (capacity() - 1); + DCHECK(i < capacity()); + + DCHECK(occupancy() < capacity()); // Guarantees loop termination. + Entry* map = impl_.map_; + while (map[i].exists() && + !impl_.match()(hash, map[i].hash, key, map[i].key)) { + i = (i + 1) & (capacity() - 1); } - return &map_[i]; + return &map[i]; } template typename TemplateHashMapImpl::Entry* TemplateHashMapImpl::FillEmptyEntry( - Entry* entry, const Key& key, const Value& value, uint32_t hash, - AllocationPolicy allocator) { + Entry* entry, const Key& key, const Value& value, uint32_t hash) { DCHECK(!entry->exists()); new (entry) Entry(key, value, hash); - occupancy_++; + impl_.occupancy_++; // Grow the map if we reached >= 80% occupancy. - if (occupancy_ + occupancy_ / 4 >= capacity_) { - Resize(allocator); + if (occupancy() + occupancy() / 4 >= capacity()) { + Resize(); entry = Probe(key, hash); } @@ -332,39 +367,39 @@ TemplateHashMapImpl::FillEmptyEntry( template void TemplateHashMapImpl::Initialize( - uint32_t capacity, AllocationPolicy allocator) { + uint32_t capacity) { DCHECK(base::bits::IsPowerOfTwo(capacity)); - map_ = reinterpret_cast(allocator.New(capacity * sizeof(Entry))); - if (map_ == nullptr) { + impl_.map_ = impl_.allocator().template NewArray(capacity); + if (impl_.map_ == nullptr) { FATAL("Out of memory: HashMap::Initialize"); return; } - capacity_ = capacity; + impl_.capacity_ = capacity; Clear(); } template -void TemplateHashMapImpl::Resize( - AllocationPolicy allocator) { - Entry* map = map_; - uint32_t n = occupancy_; +void TemplateHashMapImpl::Resize() { + Entry* old_map = impl_.map_; + uint32_t old_capacity = capacity(); + uint32_t n = occupancy(); // Allocate larger map. - Initialize(capacity_ * 2, allocator); + Initialize(capacity() * 2); // Rehash all current entries. - for (Entry* entry = map; n > 0; entry++) { + for (Entry* entry = old_map; n > 0; entry++) { if (entry->exists()) { Entry* new_entry = Probe(entry->key, entry->hash); - new_entry = FillEmptyEntry(new_entry, entry->key, entry->value, - entry->hash, allocator); + new_entry = + FillEmptyEntry(new_entry, entry->key, entry->value, entry->hash); n--; } } // Delete old map. - AllocationPolicy::Delete(map); + impl_.allocator().DeleteArray(old_map, old_capacity); } // Match function which compares hashes before executing a (potentially @@ -396,14 +431,14 @@ class CustomMatcherTemplateHashMapImpl public: using MatchFun = bool (*)(void*, void*); - CustomMatcherTemplateHashMapImpl( + explicit CustomMatcherTemplateHashMapImpl( MatchFun match, uint32_t capacity = Base::kDefaultHashMapCapacity, AllocationPolicy allocator = AllocationPolicy()) : Base(capacity, HashEqualityThenKeyMatcher(match), allocator) {} - CustomMatcherTemplateHashMapImpl( - const CustomMatcherTemplateHashMapImpl* original, + explicit CustomMatcherTemplateHashMapImpl( + const CustomMatcherTemplateHashMapImpl* original, AllocationPolicy allocator = AllocationPolicy()) : Base(original, allocator) {} @@ -432,9 +467,23 @@ class PointerTemplateHashMapImpl AllocationPolicy>; public: - PointerTemplateHashMapImpl(uint32_t capacity = Base::kDefaultHashMapCapacity, - AllocationPolicy allocator = AllocationPolicy()) + explicit PointerTemplateHashMapImpl( + uint32_t capacity = Base::kDefaultHashMapCapacity, + AllocationPolicy allocator = AllocationPolicy()) : Base(capacity, KeyEqualityMatcher(), allocator) {} + + PointerTemplateHashMapImpl(const PointerTemplateHashMapImpl& other, + AllocationPolicy allocator = AllocationPolicy()) + : Base(&other, allocator) {} + + PointerTemplateHashMapImpl(PointerTemplateHashMapImpl&& other) V8_NOEXCEPT + : Base(std::move(other)) {} + + PointerTemplateHashMapImpl& operator=(PointerTemplateHashMapImpl&& other) + V8_NOEXCEPT { + static_cast(*this) = std::move(other); + return *this; + } }; using HashMap = PointerTemplateHashMapImpl; @@ -477,17 +526,16 @@ class TemplateHashMap friend class TemplateHashMap; }; - TemplateHashMap(MatchFun match, - AllocationPolicy allocator = AllocationPolicy()) + explicit TemplateHashMap(MatchFun match, + AllocationPolicy allocator = AllocationPolicy()) : Base(Base::kDefaultHashMapCapacity, HashEqualityThenKeyMatcher(match), allocator) {} Iterator begin() const { return Iterator(this, this->Start()); } Iterator end() const { return Iterator(this, nullptr); } - Iterator find(Key* key, bool insert = false, - AllocationPolicy allocator = AllocationPolicy()) { + Iterator find(Key* key, bool insert = false) { if (insert) { - return Iterator(this, this->LookupOrInsert(key, key->Hash(), allocator)); + return Iterator(this, this->LookupOrInsert(key, key->Hash())); } return Iterator(this, this->Lookup(key, key->Hash())); } diff --git a/deps/v8/src/base/ieee754.cc b/deps/v8/src/base/ieee754.cc index 6212f21f119b12..1706b56dfd9001 100644 --- a/deps/v8/src/base/ieee754.cc +++ b/deps/v8/src/base/ieee754.cc @@ -86,22 +86,22 @@ namespace { /* Set the more significant 32 bits of a double from an int. */ -#define SET_HIGH_WORD(d, v) \ - do { \ - uint64_t bits = bit_cast(d); \ - bits &= V8_2PART_UINT64_C(0x00000000, FFFFFFFF); \ - bits |= static_cast(v) << 32; \ - (d) = bit_cast(bits); \ +#define SET_HIGH_WORD(d, v) \ + do { \ + uint64_t bits = bit_cast(d); \ + bits &= 0x0000'0000'FFFF'FFFF; \ + bits |= static_cast(v) << 32; \ + (d) = bit_cast(bits); \ } while (false) /* Set the less significant 32 bits of a double from an int. */ -#define SET_LOW_WORD(d, v) \ - do { \ - uint64_t bits = bit_cast(d); \ - bits &= V8_2PART_UINT64_C(0xFFFFFFFF, 00000000); \ - bits |= static_cast(v); \ - (d) = bit_cast(bits); \ +#define SET_LOW_WORD(d, v) \ + do { \ + uint64_t bits = bit_cast(d); \ + bits &= 0xFFFF'FFFF'0000'0000; \ + bits |= static_cast(v); \ + (d) = bit_cast(bits); \ } while (false) int32_t __ieee754_rem_pio2(double x, double* y) V8_WARN_UNUSED_RESULT; diff --git a/deps/v8/src/base/iterator.h b/deps/v8/src/base/iterator.h index 86d4b068d336f3..0bec8725227107 100644 --- a/deps/v8/src/base/iterator.h +++ b/deps/v8/src/base/iterator.h @@ -36,8 +36,7 @@ class iterator_range { typename std::iterator_traits::difference_type; iterator_range() : begin_(), end_() {} - template - iterator_range(ForwardIterator1 begin, ForwardIterator2 end) + iterator_range(ForwardIterator begin, ForwardIterator end) : begin_(begin), end_(end) {} iterator begin() { return begin_; } diff --git a/deps/v8/src/base/logging.h b/deps/v8/src/base/logging.h index 790018c98e9889..fe39f988225e96 100644 --- a/deps/v8/src/base/logging.h +++ b/deps/v8/src/base/logging.h @@ -134,6 +134,12 @@ V8_BASE_EXPORT void SetDcheckFunction(void (*dcheck_Function)(const char*, int, #endif +#if V8_HAS_CXX14_CONSTEXPR +#define CONSTEXPR_DCHECK(cond) DCHECK(cond) +#else +#define CONSTEXPR_DCHECK(cond) +#endif + // Define PrintCheckOperand for each T which defines operator<< for ostream. template typename std::enable_if< diff --git a/deps/v8/src/base/macros.h b/deps/v8/src/base/macros.h index e22dd00895ab7b..cea15280a485cd 100644 --- a/deps/v8/src/base/macros.h +++ b/deps/v8/src/base/macros.h @@ -171,22 +171,19 @@ V8_INLINE Dest bit_cast(Source const& source) { #endif #endif -// Helper macro to define no_sanitize attributes only with clang. -#if defined(__clang__) && defined(__has_attribute) -#if __has_attribute(no_sanitize) -#define CLANG_NO_SANITIZE(what) __attribute__((no_sanitize(what))) -#endif -#endif -#if !defined(CLANG_NO_SANITIZE) -#define CLANG_NO_SANITIZE(what) -#endif - // DISABLE_CFI_PERF -- Disable Control Flow Integrity checks for Perf reasons. -#define DISABLE_CFI_PERF CLANG_NO_SANITIZE("cfi") +#define DISABLE_CFI_PERF V8_CLANG_NO_SANITIZE("cfi") // DISABLE_CFI_ICALL -- Disable Control Flow Integrity indirect call checks, // useful because calls into JITed code can not be CFI verified. -#define DISABLE_CFI_ICALL CLANG_NO_SANITIZE("cfi-icall") +#ifdef V8_OS_WIN +// On Windows, also needs __declspec(guard(nocf)) for CFG. +#define DISABLE_CFI_ICALL \ + V8_CLANG_NO_SANITIZE("cfi-icall") \ + __declspec(guard(nocf)) +#else +#define DISABLE_CFI_ICALL V8_CLANG_NO_SANITIZE("cfi-icall") +#endif #if V8_CC_GNU #define V8_IMMEDIATE_CRASH() __builtin_trap() @@ -328,10 +325,10 @@ V8_INLINE A implicit_cast(A x) { #define V8PRIuPTR "lxu" #endif -// The following macro works on both 32 and 64-bit platforms. -// Usage: instead of writing 0x1234567890123456 -// write V8_2PART_UINT64_C(0x12345678,90123456); -#define V8_2PART_UINT64_C(a, b) (((static_cast(a) << 32) + 0x##b##u)) +// Make a uint64 from two uint32_t halves. +inline uint64_t make_uint64(uint32_t high, uint32_t low) { + return (uint64_t{high} << 32) + low; +} // Return the largest multiple of m which is <= x. template diff --git a/deps/v8/src/base/memory.h b/deps/v8/src/base/memory.h index 087f67291d201d..e2676a82e77a0f 100644 --- a/deps/v8/src/base/memory.h +++ b/deps/v8/src/base/memory.h @@ -76,6 +76,9 @@ static inline V ReadLittleEndianValue(V* p) { template static inline void WriteLittleEndianValue(V* p, V value) { + static_assert( + !std::is_array::value, + "Passing an array decays to pointer, causing unexpected results."); WriteLittleEndianValue(reinterpret_cast
(p), value); } diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h index 6610c7ffc33623..3c13e654c80cf5 100644 --- a/deps/v8/src/base/optional.h +++ b/deps/v8/src/base/optional.h @@ -557,33 +557,33 @@ class OPTIONAL_DECLSPEC_EMPTY_BASES Optional return *this; } - const T* operator->() const { - DCHECK(storage_.is_populated_); + constexpr const T* operator->() const { + CONSTEXPR_DCHECK(storage_.is_populated_); return &storage_.value_; } - T* operator->() { - DCHECK(storage_.is_populated_); + constexpr T* operator->() { + CONSTEXPR_DCHECK(storage_.is_populated_); return &storage_.value_; } - const T& operator*() const & { - DCHECK(storage_.is_populated_); + constexpr const T& operator*() const& { + CONSTEXPR_DCHECK(storage_.is_populated_); return storage_.value_; } - T& operator*() & { - DCHECK(storage_.is_populated_); + constexpr T& operator*() & { + CONSTEXPR_DCHECK(storage_.is_populated_); return storage_.value_; } - const T&& operator*() const && { - DCHECK(storage_.is_populated_); + constexpr const T&& operator*() const&& { + CONSTEXPR_DCHECK(storage_.is_populated_); return std::move(storage_.value_); } - T&& operator*() && { - DCHECK(storage_.is_populated_); + constexpr T&& operator*() && { + CONSTEXPR_DCHECK(storage_.is_populated_); return std::move(storage_.value_); } diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc index 76a0aff39953a4..98b2c690960336 100644 --- a/deps/v8/src/base/page-allocator.cc +++ b/deps/v8/src/base/page-allocator.cc @@ -42,6 +42,80 @@ void* PageAllocator::AllocatePages(void* hint, size_t size, size_t alignment, static_cast(access)); } +class SharedMemoryMapping : public ::v8::PageAllocator::SharedMemoryMapping { + public: + explicit SharedMemoryMapping(PageAllocator* page_allocator, void* ptr, + size_t size) + : page_allocator_(page_allocator), ptr_(ptr), size_(size) {} + ~SharedMemoryMapping() override { page_allocator_->FreePages(ptr_, size_); } + void* GetMemory() const override { return ptr_; } + + private: + PageAllocator* page_allocator_; + void* ptr_; + size_t size_; +}; + +class SharedMemory : public ::v8::PageAllocator::SharedMemory { + public: + SharedMemory(PageAllocator* allocator, void* memory, size_t size) + : allocator_(allocator), ptr_(memory), size_(size) {} + void* GetMemory() const override { return ptr_; } + size_t GetSize() const override { return size_; } + std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapTo( + void* new_address) const override { + if (allocator_->RemapShared(ptr_, new_address, size_)) { + return std::make_unique(allocator_, new_address, + size_); + } else { + return {}; + } + } + + ~SharedMemory() override { allocator_->FreePages(ptr_, size_); } + + private: + PageAllocator* allocator_; + void* ptr_; + size_t size_; +}; + +bool PageAllocator::CanAllocateSharedPages() { +#ifdef V8_OS_LINUX + return true; +#else + return false; +#endif +} + +std::unique_ptr +PageAllocator::AllocateSharedPages(size_t size, const void* original_address) { +#ifdef V8_OS_LINUX + void* ptr = + base::OS::AllocateShared(size, base::OS::MemoryPermission::kReadWrite); + CHECK_NOT_NULL(ptr); + memcpy(ptr, original_address, size); + bool success = base::OS::SetPermissions( + ptr, size, base::OS::MemoryPermission::kReadWrite); + CHECK(success); + + auto shared_memory = + std::make_unique(this, ptr, size); + return shared_memory; +#else + return {}; +#endif +} + +void* PageAllocator::RemapShared(void* old_address, void* new_address, + size_t size) { +#ifdef V8_OS_LINUX + return base::OS::RemapShared(old_address, new_address, size); +#else + return nullptr; +#endif +} + bool PageAllocator::FreePages(void* address, size_t size) { return base::OS::Free(address, size); } diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h index 2b8ee1a5e5e740..a98f0847907ae2 100644 --- a/deps/v8/src/base/page-allocator.h +++ b/deps/v8/src/base/page-allocator.h @@ -5,6 +5,8 @@ #ifndef V8_BASE_PAGE_ALLOCATOR_H_ #define V8_BASE_PAGE_ALLOCATOR_H_ +#include + #include "include/v8-platform.h" #include "src/base/base-export.h" #include "src/base/compiler-specific.h" @@ -12,6 +14,8 @@ namespace v8 { namespace base { +class SharedMemory; + class V8_BASE_EXPORT PageAllocator : public NON_EXPORTED_BASE(::v8::PageAllocator) { public: @@ -29,6 +33,11 @@ class V8_BASE_EXPORT PageAllocator void* AllocatePages(void* hint, size_t size, size_t alignment, PageAllocator::Permission access) override; + bool CanAllocateSharedPages() override; + + std::unique_ptr AllocateSharedPages( + size_t size, const void* original_address) override; + bool FreePages(void* address, size_t size) override; bool ReleasePages(void* address, size_t size, size_t new_size) override; @@ -39,6 +48,10 @@ class V8_BASE_EXPORT PageAllocator bool DiscardSystemPages(void* address, size_t size) override; private: + friend class v8::base::SharedMemory; + + void* RemapShared(void* old_address, void* new_address, size_t size); + const size_t allocate_page_size_; const size_t commit_page_size_; }; diff --git a/deps/v8/src/base/platform/platform-linux.cc b/deps/v8/src/base/platform/platform-linux.cc index 26efd100df1603..5b619fb007380d 100644 --- a/deps/v8/src/base/platform/platform-linux.cc +++ b/deps/v8/src/base/platform/platform-linux.cc @@ -22,7 +22,7 @@ #include // open #include #include // index -#include // mmap & munmap +#include // mmap & munmap & mremap #include // open #include // mmap & munmap #include // sysconf @@ -144,5 +144,16 @@ void OS::SignalCodeMovingGC() { void OS::AdjustSchedulingParams() {} +void* OS::RemapShared(void* old_address, void* new_address, size_t size) { + void* result = + mremap(old_address, 0, size, MREMAP_FIXED | MREMAP_MAYMOVE, new_address); + + if (result == MAP_FAILED) { + return nullptr; + } + DCHECK(result == new_address); + return result; +} + } // namespace base } // namespace v8 diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc index 17fc5b508394c8..14294019d90dd0 100644 --- a/deps/v8/src/base/platform/platform-posix.cc +++ b/deps/v8/src/base/platform/platform-posix.cc @@ -111,6 +111,13 @@ const int kMmapFd = VM_MAKE_TAG(255); const int kMmapFd = -1; #endif // !V8_OS_MACOSX +#if defined(__APPLE__) && V8_TARGET_ARCH_ARM64 +// During snapshot generation in cross builds, sysconf() runs on the Intel +// host and returns host page size, while the snapshot needs to use the +// target page size. +constexpr int kAppleArmPageSize = 1 << 14; +#endif + const int kMmapFdOffset = 0; // TODO(v8:10026): Add the right permission flag to make executable pages @@ -131,8 +138,12 @@ int GetProtectionFromMemoryPermission(OS::MemoryPermission access) { UNREACHABLE(); } -int GetFlagsForMemoryPermission(OS::MemoryPermission access) { - int flags = MAP_PRIVATE | MAP_ANONYMOUS; +enum class PageType { kShared, kPrivate }; + +int GetFlagsForMemoryPermission(OS::MemoryPermission access, + PageType page_type) { + int flags = MAP_ANONYMOUS; + flags |= (page_type == PageType::kShared) ? MAP_SHARED : MAP_PRIVATE; if (access == OS::MemoryPermission::kNoAccess) { #if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX flags |= MAP_NORESERVE; @@ -144,9 +155,10 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access) { return flags; } -void* Allocate(void* hint, size_t size, OS::MemoryPermission access) { +void* Allocate(void* hint, size_t size, OS::MemoryPermission access, + PageType page_type) { int prot = GetProtectionFromMemoryPermission(access); - int flags = GetFlagsForMemoryPermission(access); + int flags = GetFlagsForMemoryPermission(access, page_type); void* result = mmap(hint, size, prot, flags, kMmapFd, kMmapFdOffset); if (result == MAP_FAILED) return nullptr; return result; @@ -226,12 +238,20 @@ int OS::ActivationFrameAlignment() { // static size_t OS::AllocatePageSize() { +#if defined(__APPLE__) && V8_TARGET_ARCH_ARM64 + return kAppleArmPageSize; +#else return static_cast(sysconf(_SC_PAGESIZE)); +#endif } // static size_t OS::CommitPageSize() { +#if defined(__APPLE__) && V8_TARGET_ARCH_ARM64 + static size_t page_size = kAppleArmPageSize; +#else static size_t page_size = getpagesize(); +#endif return page_size; } @@ -250,12 +270,10 @@ void* OS::GetRandomMmapAddr() { MutexGuard guard(rng_mutex.Pointer()); GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr)); } -#if defined(__APPLE__) -#if V8_TARGET_ARCH_ARM64 +#if defined(__APPLE__) && V8_TARGET_ARCH_ARM64 DCHECK_EQ(1 << 14, AllocatePageSize()); raw_addr = RoundDown(raw_addr, 1 << 14); #endif -#endif #if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \ defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER) // If random hint addresses interfere with address ranges hard coded in @@ -265,7 +283,7 @@ void* OS::GetRandomMmapAddr() { raw_addr &= 0x007fffff0000ULL; raw_addr += 0x7e8000000000ULL; #else -#if V8_TARGET_ARCH_X64 +#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 // Currently available CPUs have 48 bits of virtual addressing. Truncate // the hint address to 46 bits to give the kernel a fighting chance of // fulfilling our placement request. @@ -338,7 +356,7 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment, // Add the maximum misalignment so we are guaranteed an aligned base address. size_t request_size = size + (alignment - page_size); request_size = RoundUp(request_size, OS::AllocatePageSize()); - void* result = base::Allocate(hint, request_size, access); + void* result = base::Allocate(hint, request_size, access, PageType::kPrivate); if (result == nullptr) return nullptr; // Unmap memory allocated before the aligned base address. @@ -363,6 +381,12 @@ void* OS::Allocate(void* hint, size_t size, size_t alignment, return static_cast(aligned_base); } +// static +void* OS::AllocateShared(size_t size, MemoryPermission access) { + DCHECK_EQ(0, size % AllocatePageSize()); + return base::Allocate(nullptr, size, access, PageType::kShared); +} + // static bool OS::Free(void* address, const size_t size) { DCHECK_EQ(0, reinterpret_cast(address) % AllocatePageSize()); @@ -395,7 +419,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { // The cost is a syscall that effectively no-ops. // TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary. // https://crbug.com/823915 -#if defined(OS_MACOSX) +#if defined(V8_OS_MACOSX) if (access != OS::MemoryPermission::kNoAccess) madvise(address, size, MADV_FREE_REUSE); #endif @@ -406,7 +430,7 @@ bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) { bool OS::DiscardSystemPages(void* address, size_t size) { DCHECK_EQ(0, reinterpret_cast(address) % CommitPageSize()); DCHECK_EQ(0, size % CommitPageSize()); -#if defined(OS_MACOSX) +#if defined(V8_OS_MACOSX) // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also // marks the pages with the reusable bit, which allows both Activity Monitor // and memory-infra to correctly track the pages. @@ -531,7 +555,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name, OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, size_t size, void* initial) { if (FILE* file = fopen(name, "w+")) { - if (size == 0) return new PosixMemoryMappedFile(file, 0, 0); + if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0); size_t result = fwrite(initial, 1, size, file); if (result == size && !ferror(file)) { void* memory = mmap(OS::GetRandomMmapAddr(), result, diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h index af55036a006aca..d5f59d1d7a8d8a 100644 --- a/deps/v8/src/base/platform/platform.h +++ b/deps/v8/src/base/platform/platform.h @@ -274,6 +274,13 @@ class V8_BASE_EXPORT OS { size_t alignment, MemoryPermission access); + V8_WARN_UNUSED_RESULT static void* AllocateShared(size_t size, + MemoryPermission access); + + V8_WARN_UNUSED_RESULT static void* RemapShared(void* old_address, + void* new_address, + size_t size); + V8_WARN_UNUSED_RESULT static bool Free(void* address, const size_t size); V8_WARN_UNUSED_RESULT static bool Release(void* address, size_t size); diff --git a/deps/v8/src/base/platform/time.cc b/deps/v8/src/base/platform/time.cc index a12a5b0d0a7f9d..e72f90d2147965 100644 --- a/deps/v8/src/base/platform/time.cc +++ b/deps/v8/src/base/platform/time.cc @@ -327,7 +327,7 @@ class Clock final { namespace { DEFINE_LAZY_LEAKY_OBJECT_GETTER(Clock, GetClock) -} +} // namespace Time Time::Now() { return GetClock()->Now(); } diff --git a/deps/v8/src/base/platform/time.h b/deps/v8/src/base/platform/time.h index 5f69129ecbc4cc..7f1fe9d632185e 100644 --- a/deps/v8/src/base/platform/time.h +++ b/deps/v8/src/base/platform/time.h @@ -14,6 +14,7 @@ #include "src/base/base-export.h" #include "src/base/bits.h" #include "src/base/macros.h" +#include "src/base/safe_conversions.h" #if V8_OS_WIN #include "src/base/win32-headers.h" #endif @@ -36,7 +37,7 @@ class TimeTicks; namespace time_internal { template class TimeBase; -} +} // namespace time_internal class TimeConstants { public: @@ -90,6 +91,11 @@ class V8_BASE_EXPORT TimeDelta final { return TimeDelta(nanoseconds / TimeConstants::kNanosecondsPerMicrosecond); } + static TimeDelta FromMillisecondsD(double milliseconds) { + return FromDouble(milliseconds * + TimeConstants::kMicrosecondsPerMillisecond); + } + // Returns the maximum time delta, which should be greater than any reasonable // time delta we might compare it to. Adding or subtracting the maximum time // delta to a time or another time delta has an undefined result. @@ -201,6 +207,9 @@ class V8_BASE_EXPORT TimeDelta final { } private: + // TODO(v8:10620): constexpr requires constexpr saturated_cast. + static inline TimeDelta FromDouble(double value); + template friend class time_internal::TimeBase; // Constructs a delta given the duration in microseconds. This is private // to avoid confusion by callers with an integer constructor. Use @@ -211,6 +220,11 @@ class V8_BASE_EXPORT TimeDelta final { int64_t delta_; }; +// static +TimeDelta TimeDelta::FromDouble(double value) { + return TimeDelta(saturated_cast(value)); +} + // static constexpr TimeDelta TimeDelta::Max() { return TimeDelta(std::numeric_limits::max()); diff --git a/deps/v8/src/base/region-allocator.cc b/deps/v8/src/base/region-allocator.cc index 6905b83410190e..9224dc99dc3622 100644 --- a/deps/v8/src/base/region-allocator.cc +++ b/deps/v8/src/base/region-allocator.cc @@ -3,7 +3,9 @@ // found in the LICENSE file. #include "src/base/region-allocator.h" + #include "src/base/bits.h" +#include "src/base/logging.h" #include "src/base/macros.h" namespace v8 { @@ -18,7 +20,8 @@ constexpr int kMaxRandomizationAttempts = 3; RegionAllocator::RegionAllocator(Address memory_region_begin, size_t memory_region_size, size_t page_size) - : whole_region_(memory_region_begin, memory_region_size, false), + : whole_region_(memory_region_begin, memory_region_size, + RegionState::kFree), region_size_in_pages_(size() / page_size), max_load_for_randomization_( static_cast(size() * kMaxLoadFactorForRandomization)), @@ -47,7 +50,7 @@ RegionAllocator::AllRegionsSet::iterator RegionAllocator::FindRegion( Address address) { if (!whole_region_.contains(address)) return all_regions_.end(); - Region key(address, 0, false); + Region key(address, 0, RegionState::kFree); AllRegionsSet::iterator iter = all_regions_.upper_bound(&key); // Regions in |all_regions_| are compared by end() values and key's end() // points exactly to the address we are querying, so the upper_bound will @@ -63,13 +66,13 @@ void RegionAllocator::FreeListAddRegion(Region* region) { } RegionAllocator::Region* RegionAllocator::FreeListFindRegion(size_t size) { - Region key(0, size, false); + Region key(0, size, RegionState::kFree); auto iter = free_regions_.lower_bound(&key); return iter == free_regions_.end() ? nullptr : *iter; } void RegionAllocator::FreeListRemoveRegion(Region* region) { - DCHECK(!region->is_used()); + DCHECK(region->is_free()); auto iter = free_regions_.find(region); DCHECK_NE(iter, free_regions_.end()); DCHECK_EQ(region, *iter); @@ -85,10 +88,11 @@ RegionAllocator::Region* RegionAllocator::Split(Region* region, DCHECK_GT(region->size(), new_size); // Create new region and put it to the lists after the |region|. - bool used = region->is_used(); + DCHECK(!region->is_excluded()); + RegionState state = region->state(); Region* new_region = - new Region(region->begin() + new_size, region->size() - new_size, used); - if (!used) { + new Region(region->begin() + new_size, region->size() - new_size, state); + if (state == RegionState::kFree) { // Remove region from the free list before updating it's size. FreeListRemoveRegion(region); } @@ -96,7 +100,7 @@ RegionAllocator::Region* RegionAllocator::Split(Region* region, all_regions_.insert(new_region); - if (!used) { + if (state == RegionState::kFree) { FreeListAddRegion(region); FreeListAddRegion(new_region); } @@ -132,7 +136,7 @@ RegionAllocator::Address RegionAllocator::AllocateRegion(size_t size) { // Mark region as used. FreeListRemoveRegion(region); - region->set_is_used(true); + region->set_state(RegionState::kAllocated); return region->begin(); } @@ -146,7 +150,7 @@ RegionAllocator::Address RegionAllocator::AllocateRegion( rng->NextBytes(&random, sizeof(random)); size_t random_offset = page_size_ * (random % region_size_in_pages_); Address address = begin() + random_offset; - if (AllocateRegionAt(address, size)) { + if (AllocateRegionAt(address, size, RegionState::kAllocated)) { return address; } } @@ -155,10 +159,12 @@ RegionAllocator::Address RegionAllocator::AllocateRegion( return AllocateRegion(size); } -bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size) { +bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size, + RegionState region_state) { DCHECK(IsAligned(requested_address, page_size_)); DCHECK_NE(size, 0); DCHECK(IsAligned(size, page_size_)); + DCHECK_NE(region_state, RegionState::kFree); Address requested_end = requested_address + size; DCHECK_LE(requested_end, end()); @@ -171,7 +177,7 @@ bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size) { } region = *region_iter; } - if (region->is_used() || region->end() < requested_end) { + if (!region->is_free() || region->end() < requested_end) { return false; } // Found free region that includes the requested one. @@ -190,7 +196,7 @@ bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size) { // Mark region as used. FreeListRemoveRegion(region); - region->set_is_used(true); + region->set_state(region_state); return true; } @@ -202,7 +208,7 @@ size_t RegionAllocator::TrimRegion(Address address, size_t new_size) { return 0; } Region* region = *region_iter; - if (region->begin() != address || !region->is_used()) { + if (region->begin() != address || !region->is_allocated()) { return 0; } @@ -214,14 +220,14 @@ size_t RegionAllocator::TrimRegion(Address address, size_t new_size) { ++region_iter; } size_t size = region->size(); - region->set_is_used(false); + region->set_state(RegionState::kFree); // Merge current region with the surrounding ones if they are free. if (region->end() != whole_region_.end()) { // There must be a range after the current one. AllRegionsSet::iterator next_iter = std::next(region_iter); DCHECK_NE(next_iter, all_regions_.end()); - if (!(*next_iter)->is_used()) { + if ((*next_iter)->is_free()) { // |next| region object will be deleted during merge, remove it from // the free list. FreeListRemoveRegion(*next_iter); @@ -232,7 +238,7 @@ size_t RegionAllocator::TrimRegion(Address address, size_t new_size) { // There must be a range before the current one. AllRegionsSet::iterator prev_iter = std::prev(region_iter); DCHECK_NE(prev_iter, all_regions_.end()); - if (!(*prev_iter)->is_used()) { + if ((*prev_iter)->is_free()) { // |prev| region's size will change, we'll have to re-insert it into // the proper place of the free list. FreeListRemoveRegion(*prev_iter); @@ -252,7 +258,7 @@ size_t RegionAllocator::CheckRegion(Address address) { return 0; } Region* region = *region_iter; - if (region->begin() != address || !region->is_used()) { + if (region->begin() != address || region->is_free()) { return 0; } return region->size(); @@ -265,13 +271,28 @@ bool RegionAllocator::IsFree(Address address, size_t size) { return true; } Region* region = *region_iter; - return !region->is_used() && region->contains(address, size); + return region->is_free() && region->contains(address, size); +} + +namespace { +const char* RegionStateToString(RegionAllocator::RegionState state) { + switch (state) { + case RegionAllocator::RegionState::kFree: + return "free"; + case RegionAllocator::RegionState::kExcluded: + return "excluded"; + case RegionAllocator::RegionState::kAllocated: + return "used"; + default: + UNREACHABLE(); + } } +} // namespace void RegionAllocator::Region::Print(std::ostream& os) const { std::ios::fmtflags flags = os.flags(std::ios::hex | std::ios::showbase); os << "[" << begin() << ", " << end() << "), size: " << size(); - os << ", " << (is_used() ? "used" : "free"); + os << ", " << RegionStateToString(state_); os.flags(flags); } diff --git a/deps/v8/src/base/region-allocator.h b/deps/v8/src/base/region-allocator.h index 4b1354adf50133..887f123b10c90d 100644 --- a/deps/v8/src/base/region-allocator.h +++ b/deps/v8/src/base/region-allocator.h @@ -29,6 +29,15 @@ class V8_BASE_EXPORT RegionAllocator final { static constexpr Address kAllocationFailure = static_cast
(-1); + enum class RegionState { + // The region can be allocated from. + kFree, + // The region has been carved out of the wider area and is not allocatable. + kExcluded, + // The region has been allocated and is managed by a RegionAllocator. + kAllocated, + }; + RegionAllocator(Address address, size_t size, size_t page_size); ~RegionAllocator(); @@ -43,7 +52,12 @@ class V8_BASE_EXPORT RegionAllocator final { // true. // This kind of allocation is supposed to be used during setup phase to mark // certain regions as used or for randomizing regions displacement. - bool AllocateRegionAt(Address requested_address, size_t size); + // By default regions are marked as used, but can also be allocated as + // RegionState::kExcluded to prevent the RegionAllocator from using that + // memory range, which is useful when reserving any area to remap shared + // memory into. + bool AllocateRegionAt(Address requested_address, size_t size, + RegionState region_state = RegionState::kAllocated); // Frees region at given |address|, returns the size of the region. // There must be a used region starting at given address otherwise nothing @@ -87,16 +101,20 @@ class V8_BASE_EXPORT RegionAllocator final { private: class Region : public AddressRegion { public: - Region(Address address, size_t size, bool is_used) - : AddressRegion(address, size), is_used_(is_used) {} + Region(Address address, size_t size, RegionState state) + : AddressRegion(address, size), state_(state) {} - bool is_used() const { return is_used_; } - void set_is_used(bool used) { is_used_ = used; } + bool is_free() const { return state_ == RegionState::kFree; } + bool is_allocated() const { return state_ == RegionState::kAllocated; } + bool is_excluded() const { return state_ == RegionState::kExcluded; } + void set_state(RegionState state) { state_ = state; } + + RegionState state() { return state_; } void Print(std::ostream& os) const; private: - bool is_used_; + RegionState state_; }; // The whole region. @@ -153,10 +171,11 @@ class V8_BASE_EXPORT RegionAllocator final { void Merge(AllRegionsSet::iterator prev_iter, AllRegionsSet::iterator next_iter); + FRIEND_TEST(RegionAllocatorTest, AllocateExcluded); FRIEND_TEST(RegionAllocatorTest, AllocateRegionRandom); - FRIEND_TEST(RegionAllocatorTest, Fragmentation); - FRIEND_TEST(RegionAllocatorTest, FindRegion); FRIEND_TEST(RegionAllocatorTest, Contains); + FRIEND_TEST(RegionAllocatorTest, FindRegion); + FRIEND_TEST(RegionAllocatorTest, Fragmentation); DISALLOW_COPY_AND_ASSIGN(RegionAllocator); }; diff --git a/deps/v8/src/base/template-utils.h b/deps/v8/src/base/template-utils.h index 8f89672e1af66e..d6d4ca32d946ca 100644 --- a/deps/v8/src/base/template-utils.h +++ b/deps/v8/src/base/template-utils.h @@ -7,6 +7,7 @@ #include #include +#include #include #include diff --git a/deps/v8/src/base/utils/random-number-generator.h b/deps/v8/src/base/utils/random-number-generator.h index 8eb8563175ab24..55e20d544f1303 100644 --- a/deps/v8/src/base/utils/random-number-generator.h +++ b/deps/v8/src/base/utils/random-number-generator.h @@ -130,9 +130,9 @@ class V8_BASE_EXPORT RandomNumberGenerator final { static uint64_t MurmurHash3(uint64_t); private: - static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d); + static const int64_t kMultiplier = 0x5'deec'e66d; static const int64_t kAddend = 0xb; - static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff); + static const int64_t kMask = 0xffff'ffff'ffff; int Next(int bits) V8_WARN_UNUSED_RESULT; diff --git a/deps/v8/src/builtins/aggregate-error.tq b/deps/v8/src/builtins/aggregate-error.tq new file mode 100644 index 00000000000000..0f4a47b3e737d4 --- /dev/null +++ b/deps/v8/src/builtins/aggregate-error.tq @@ -0,0 +1,49 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/objects/js-objects.h' + +namespace error { + +transitioning javascript builtin AggregateErrorConstructor( + js-implicit context: NativeContext, target: JSFunction, + newTarget: JSAny)(...arguments): JSAny { + // This function is implementing the spec as suggested by + // https://github.com/tc39/proposal-promise-any/pull/59 . FIXME(marja): + // change this if the PR is declined, otherwise remove the comment. + + // 1. If NewTarget is undefined, let newTarget be the active function + // object, else let newTarget be NewTarget. + // 2. Let O be ? OrdinaryCreateFromConstructor(newTarget, + // "%AggregateError.prototype%", « [[ErrorData]], [[AggregateErrors]] »). + // 3. If _message_ is not _undefined_, then + // a. Let msg be ? ToString(_message_). + // b. Let msgDesc be the PropertyDescriptor { [[Value]]: _msg_, + // [[Writable]]: *true*, [[Enumerable]]: *false*, [[Configurable]]: *true* + // c. Perform ! DefinePropertyOrThrow(_O_, *"message"*, _msgDesc_). + const message: JSAny = arguments[1]; + const obj: JSObject = + ConstructAggregateErrorHelper(context, target, newTarget, message); + + // 4. Let errorsList be ? IterableToList(errors). + const errors: JSAny = arguments[0]; + const errorsList = iterator::IterableToListWithSymbolLookup(errors); + + // 5. Perform ! DefinePropertyOrThrow(_O_, `"errors"`, Property Descriptor { + // [[Configurable]]: *true*, [[Enumerable]]: *false*, [[Writable]]: *true*, + // [[Value]]: ! CreateArrayFromList(_errorsList_) }). + SetOwnPropertyIgnoreAttributes( + obj, ErrorsStringConstant(), errorsList, + SmiConstant(PropertyAttributes::DONT_ENUM)); + + // 6. Return O. + return obj; +} + +extern transitioning runtime ConstructAggregateErrorHelper( + Context, JSFunction, JSAny, Object): JSObject; + +extern transitioning runtime ConstructInternalAggregateErrorHelper( + Context, Object): JSObject; +} diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index d340fd20b0a72b..982c1abbd0971b 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -1572,13 +1572,29 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, bool with_result) { const RegisterConfiguration* config(RegisterConfiguration::Default()); int allocatable_register_count = config->num_allocatable_general_registers(); + UseScratchRegisterScope temps(masm); + Register scratch = temps.Acquire(); // Temp register is not allocatable. if (with_result) { +#ifdef V8_REVERSE_JSARGS + if (java_script_builtin) { + __ mov(scratch, r0); + } else { + // Overwrite the hole inserted by the deoptimizer with the return value + // from the LAZY deopt point. + __ str( + r0, + MemOperand( + sp, config->num_allocatable_general_registers() * kPointerSize + + BuiltinContinuationFrameConstants::kFixedFrameSize)); + } +#else // Overwrite the hole inserted by the deoptimizer with the return value from // the LAZY deopt point. __ str(r0, MemOperand( sp, config->num_allocatable_general_registers() * kPointerSize + BuiltinContinuationFrameConstants::kFixedFrameSize)); +#endif } for (int i = allocatable_register_count - 1; i >= 0; --i) { int code = config->GetAllocatableGeneralCode(i); @@ -1587,13 +1603,22 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, __ SmiUntag(Register::from_code(code)); } } +#ifdef V8_REVERSE_JSARGS + if (java_script_builtin && with_result) { + // Overwrite the hole inserted by the deoptimizer with the return value from + // the LAZY deopt point. r0 contains the arguments count, the return value + // from LAZY is always the last argument. + __ add(r0, r0, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + __ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2)); + // Recover arguments count. + __ sub(r0, r0, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount)); + } +#endif __ ldr(fp, MemOperand( sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); - // Load builtin index (stored as a Smi) and use it to get the builtin start // address from the builtins table. - UseScratchRegisterScope temps(masm); - Register builtin = temps.Acquire(); + Register builtin = scratch; __ Pop(builtin); __ add(sp, sp, Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); @@ -2086,7 +2111,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, // Forward the arguments from the caller frame. { Label loop; +#ifdef V8_REVERSE_JSARGS + // Skips frame pointer and old receiver. + __ add(r4, r4, Operand(2 * kPointerSize)); + __ pop(r8); // Save new receiver. +#else + // Skips frame pointer. __ add(r4, r4, Operand(kPointerSize)); +#endif __ add(r0, r0, r5); __ bind(&loop); { @@ -2095,6 +2127,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, __ sub(r5, r5, Operand(1), SetCC); __ b(ne, &loop); } +#ifdef V8_REVERSE_JSARGS + __ push(r8); // Recover new receiver. +#endif } } __ b(&stack_done); @@ -3025,6 +3060,11 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ Ret(); } +void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { + // TODO(v8:10701): Implement for this platform. + __ Trap(); +} + namespace { int AddressOffset(ExternalReference ref0, ExternalReference ref1) { diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index 46ab7a61fa88d2..e34b07908bb455 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -1064,7 +1064,13 @@ static void TailCallOptimizedCodeSlot(MacroAssembler* masm, ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure); static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch"); __ LoadCodeObjectEntry(x2, optimized_code_entry); - __ Jump(x2); + + { + UseScratchRegisterScope temps(masm); + temps.Exclude(x17); + __ Mov(x17, x2); + __ Jump(x17); + } // Optimized code slot contains deoptimized code, evict it and re-enter the // closure's code. @@ -1673,7 +1679,11 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) { __ Mov(x1, Operand(x23, LSL, kSystemPointerSizeLog2)); __ Ldr(kJavaScriptCallCodeStartRegister, MemOperand(kInterpreterDispatchTableRegister, x1)); - __ Jump(kJavaScriptCallCodeStartRegister); + + UseScratchRegisterScope temps(masm); + temps.Exclude(x17); + __ Mov(x17, kJavaScriptCallCodeStartRegister); + __ Jump(x17); } void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) { @@ -1736,14 +1746,28 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, allocatable_register_count)) * kSystemPointerSize; + UseScratchRegisterScope temps(masm); + Register scratch = temps.AcquireX(); // Temp register is not allocatable. + // Set up frame pointer. __ Add(fp, sp, frame_size); if (with_result) { +#ifdef V8_REVERSE_JSARGS + if (java_script_builtin) { + __ mov(scratch, x0); + } else { + // Overwrite the hole inserted by the deoptimizer with the return value + // from the LAZY deopt point. + __ Str(x0, MemOperand( + fp, BuiltinContinuationFrameConstants::kCallerSPOffset)); + } +#else // Overwrite the hole inserted by the deoptimizer with the return value from // the LAZY deopt point. __ Str(x0, MemOperand(fp, BuiltinContinuationFrameConstants::kCallerSPOffset)); +#endif } // Restore registers in pairs. @@ -1766,10 +1790,25 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, if (java_script_builtin) __ SmiUntag(kJavaScriptCallArgCountRegister); +#ifdef V8_REVERSE_JSARGS + if (java_script_builtin && with_result) { + // Overwrite the hole inserted by the deoptimizer with the return value from + // the LAZY deopt point. r0 contains the arguments count, the return value + // from LAZY is always the last argument. + __ add(x0, x0, + BuiltinContinuationFrameConstants::kCallerSPOffset / + kSystemPointerSize); + __ Str(scratch, MemOperand(fp, x0, LSL, kSystemPointerSizeLog2)); + // Recover argument count. + __ sub(x0, x0, + BuiltinContinuationFrameConstants::kCallerSPOffset / + kSystemPointerSize); + } +#endif + // Load builtin index (stored as a Smi) and use it to get the builtin start // address from the builtins table. - UseScratchRegisterScope temps(masm); - Register builtin = temps.AcquireX(); + Register builtin = scratch; __ Ldr( builtin, MemOperand(fp, BuiltinContinuationFrameConstants::kBuiltinIndexOffset)); @@ -3353,6 +3392,8 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { kWasmCompileLazyFuncIndexRegister.W()); __ SmiTag(kWasmCompileLazyFuncIndexRegister, kWasmCompileLazyFuncIndexRegister); + + UseScratchRegisterScope temps(masm); { HardAbortScope hard_abort(masm); // Avoid calls to Abort. FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY); @@ -3374,15 +3415,19 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) { // set the current context on the isolate. __ Mov(cp, Smi::zero()); __ CallRuntime(Runtime::kWasmCompileLazy, 2); + + // Exclude x17 from the scope, there are hardcoded uses of it below. + temps.Exclude(x17); + // The entrypoint address is the return value. - __ mov(x8, kReturnRegister0); + __ Mov(x17, kReturnRegister0); // Restore registers. __ PopDRegList(fp_regs); __ PopXRegList(gp_regs); } // Finally, jump to the entrypoint. - __ Jump(x8); + __ Jump(x17); } void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) { @@ -3605,10 +3650,15 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, // underlying register is caller-saved and can be arbitrarily clobbered. __ ResetSpeculationPoisonRegister(); - // Compute the handler entry address and jump to it. - __ Mov(x10, pending_handler_entrypoint_address); - __ Ldr(x10, MemOperand(x10)); - __ Br(x10); + // Compute the handler entry address and jump to it. We use x17 here for the + // jump target, as this jump can occasionally end up at the start of + // InterpreterEnterBytecodeDispatch, which when CFI is enabled starts with + // a "BTI c". + UseScratchRegisterScope temps(masm); + temps.Exclude(x17); + __ Mov(x17, pending_handler_entrypoint_address); + __ Ldr(x17, MemOperand(x17)); + __ Br(x17); } void Builtins::Generate_DoubleToI(MacroAssembler* masm) { @@ -3678,6 +3728,11 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ Ret(); } +void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { + // TODO(v8:10701): Implement for this platform. + __ Trap(); +} + namespace { // The number of register that CallApiFunctionAndReturn will need to save on diff --git a/deps/v8/src/builtins/array-join.tq b/deps/v8/src/builtins/array-join.tq index 08d0cbf894bf49..23266c4e5ad1f3 100644 --- a/deps/v8/src/builtins/array-join.tq +++ b/deps/v8/src/builtins/array-join.tq @@ -296,7 +296,7 @@ transitioning macro ArrayJoinImpl(implicit context: Context)( } case (obj: JSAny): { if (IsNullOrUndefined(obj)) continue; - next = ToString(context, obj); + next = string::ToString(context, obj); } } } @@ -414,17 +414,18 @@ const kMinJoinStackSize: constexpr int31 generates 'JSArray::kMinJoinStackSize'; macro LoadJoinStack(implicit context: Context)(): FixedArray labels IfUninitialized { - const nativeContext: NativeContext = LoadNativeContext(context); - const stack: HeapObject = UnsafeCast( - nativeContext[NativeContextSlot::ARRAY_JOIN_STACK_INDEX]); - if (stack == Undefined) goto IfUninitialized; - assert(IsFixedArray(stack)); - return UnsafeCast(stack); + typeswitch (*NativeContextSlot(ContextSlot::ARRAY_JOIN_STACK_INDEX)) { + case (Undefined): { + goto IfUninitialized; + } + case (stack: FixedArray): { + return stack; + } + } } macro SetJoinStack(implicit context: Context)(stack: FixedArray): void { - const nativeContext: NativeContext = LoadNativeContext(context); - nativeContext[NativeContextSlot::ARRAY_JOIN_STACK_INDEX] = stack; + *NativeContextSlot(ContextSlot::ARRAY_JOIN_STACK_INDEX) = stack; } // Adds a receiver to the stack. The FixedArray will automatically grow to diff --git a/deps/v8/src/builtins/array-map.tq b/deps/v8/src/builtins/array-map.tq index 8ff3cbaccdc9ac..48c8f876810da0 100644 --- a/deps/v8/src/builtins/array-map.tq +++ b/deps/v8/src/builtins/array-map.tq @@ -3,18 +3,28 @@ // found in the LICENSE file. namespace array { + +// Continuation for lazy deopt triggered by allocation of the result array. +transitioning javascript builtin +ArrayMapPreLoopLazyDeoptContinuation( + js-implicit context: NativeContext, receiver: JSAny)( + callback: JSAny, thisArg: JSAny, length: JSAny, result: JSAny): JSAny { + const jsreceiver = Cast(receiver) otherwise unreachable; + const outputArray = Cast(result) otherwise unreachable; + const numberLength = Cast(length) otherwise unreachable; + + const callbackfn = Cast(callback) + otherwise ThrowTypeError(MessageTemplate::kCalledNonCallable, callback); + return ArrayMapLoopContinuation( + jsreceiver, callbackfn, thisArg, outputArray, jsreceiver, kZero, + numberLength); +} + transitioning javascript builtin ArrayMapLoopEagerDeoptContinuation( js-implicit context: NativeContext, receiver: JSAny)( callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny, length: JSAny): JSAny { - // All continuation points in the optimized filter implementation are - // after the ToObject(O) call that ensures we are dealing with a - // JSReceiver. - // - // Also, this great mass of casts is necessary because the signature - // of Torque javascript builtins requires JSAny type for all parameters - // other than {context}. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; const outputArray = Cast(array) otherwise unreachable; @@ -31,17 +41,14 @@ ArrayMapLoopLazyDeoptContinuation( js-implicit context: NativeContext, receiver: JSAny)( callback: JSAny, thisArg: JSAny, array: JSAny, initialK: JSAny, length: JSAny, result: JSAny): JSAny { - // All continuation points in the optimized filter implementation are - // after the ToObject(O) call that ensures we are dealing with a - // JSReceiver. const jsreceiver = Cast(receiver) otherwise unreachable; const callbackfn = Cast(callback) otherwise unreachable; const outputArray = Cast(array) otherwise unreachable; let numberK = Cast(initialK) otherwise unreachable; const numberLength = Cast(length) otherwise unreachable; - // This custom lazy deopt point is right after the callback. map() needs - // to pick up at the next step, which is setting the callback result in + // This custom lazy deopt point is right after the callback. The continuation + // needs to pick up at the next step, which is setting the callback result in // the output array. After incrementing k, we can glide into the loop // continuation builtin. diff --git a/deps/v8/src/builtins/array-slice.tq b/deps/v8/src/builtins/array-slice.tq index 147dae6f72f46b..7b82f2bda33caf 100644 --- a/deps/v8/src/builtins/array-slice.tq +++ b/deps/v8/src/builtins/array-slice.tq @@ -36,21 +36,18 @@ macro HandleFastAliasedSloppyArgumentsSlice( const sloppyElements: SloppyArgumentsElements = Cast(args.elements) otherwise Bailout; - const sloppyElementsLength: Smi = sloppyElements.length; - const parameterMapLength: Smi = - sloppyElementsLength - kSloppyArgumentsParameterMapStart; + const parameterMapLength: Smi = sloppyElements.length; // Check to make sure that the extraction will not access outside the // defined arguments const end: Smi = start + count; const unmappedElements: FixedArray = - Cast(sloppyElements.objects[kSloppyArgumentsArgumentsIndex]) + Cast(sloppyElements.arguments) otherwise Bailout; const unmappedElementsLength: Smi = unmappedElements.length; if (SmiAbove(end, unmappedElementsLength)) goto Bailout; - const argumentsContext: Context = - UnsafeCast(sloppyElements.objects[kSloppyArgumentsContextIndex]); + const argumentsContext: Context = sloppyElements.context; const arrayMap: Map = LoadJSArrayElementsMap(ElementsKind::HOLEY_ELEMENTS, context); @@ -63,10 +60,9 @@ macro HandleFastAliasedSloppyArgumentsSlice( // Fill in the part of the result that map to context-mapped parameters. for (let current: Smi = start; current < to; ++current) { - const e: Object = - sloppyElements.objects[current + kSloppyArgumentsParameterMapStart]; + const e: Object = sloppyElements.mapped_entries[current]; const newElement = UnsafeCast<(JSAny | TheHole)>( - e != TheHole ? argumentsContext[UnsafeCast(e)] : + e != TheHole ? argumentsContext.elements[UnsafeCast(e)] : unmappedElements.objects[current]); // It is safe to skip the write barrier here because resultElements was // allocated together with result in a folded allocation. diff --git a/deps/v8/src/builtins/array.tq b/deps/v8/src/builtins/array.tq index 92b2c520e28cce..a9b4b1235b242a 100644 --- a/deps/v8/src/builtins/array.tq +++ b/deps/v8/src/builtins/array.tq @@ -71,7 +71,7 @@ macro EnsureArrayLengthWritable(implicit context: Context)(map: Map): // guaranteed to stay the first property. const descriptors: DescriptorArray = map.instance_descriptors; const descriptor:&DescriptorEntry = - & descriptors.descriptors[kLengthDescriptorIndex]; + &descriptors.descriptors[kLengthDescriptorIndex]; assert(TaggedEqual(descriptor->key, LengthStringConstant())); const details: Smi = UnsafeCast(descriptor->details); if ((details & kAttributesReadOnlyMask) != 0) { diff --git a/deps/v8/src/builtins/base.tq b/deps/v8/src/builtins/base.tq index 1d2c4546461cd8..fc35c2a45737d9 100644 --- a/deps/v8/src/builtins/base.tq +++ b/deps/v8/src/builtins/base.tq @@ -11,6 +11,7 @@ #include 'src/objects/bigint.h' #include 'src/objects/elements-kind.h' #include 'src/objects/free-space.h' +#include 'src/objects/js-function.h' #include 'src/objects/js-generator.h' #include 'src/objects/js-promise.h' #include 'src/objects/js-regexp-string-iterator.h' @@ -78,7 +79,8 @@ type JSPrimitive = Numeric|String|Symbol|Boolean|Null|Undefined; // TheHole or FixedArray. type JSAny = JSReceiver|JSPrimitive; -type JSAnyNotNumber = BigInt|String|Symbol|Boolean|Null|Undefined|JSReceiver; +type JSAnyNotNumeric = String|Symbol|Boolean|Null|Undefined|JSReceiver; +type JSAnyNotNumber = BigInt|JSAnyNotNumeric; // This is the intersection of JSAny and HeapObject. type JSAnyNotSmi = JSAnyNotNumber|HeapNumber; @@ -134,6 +136,7 @@ const kDoubleHole: float64_or_hole = float64_or_hole{is_hole: true, value: 0}; // The HashTable inheritance hierarchy doesn't actually look like this in C++ // because it uses some class templates that we can't yet (and may never) // express in Torque, but this is the expected organization of instance types. +@doNotGenerateCast extern class HashTable extends FixedArray generates 'TNode'; extern class OrderedHashMap extends HashTable; extern class OrderedHashSet extends HashTable; @@ -141,7 +144,6 @@ extern class OrderedNameDictionary extends HashTable; extern class NameDictionary extends HashTable; extern class GlobalDictionary extends HashTable; extern class SimpleNumberDictionary extends HashTable; -extern class StringTable extends HashTable; extern class EphemeronHashTable extends HashTable; type ObjectHashTable extends HashTable generates 'TNode'; @@ -232,7 +234,8 @@ extern enum ElementsKind extends int32 { ... } -extern enum AllocationFlag constexpr 'CodeStubAssembler::AllocationFlag' { +extern enum AllocationFlag extends int32 +constexpr 'CodeStubAssembler::AllocationFlag' { kNone, kDoubleAlignment, kPretenured, @@ -264,6 +267,7 @@ extern enum MessageTemplate { kNotGeneric, kCalledNonCallable, kCalledOnNullOrUndefined, + kCannotConvertToPrimitive, kProtoObjectOrNull, kInvalidOffset, kInvalidTypedArrayLength, @@ -307,6 +311,7 @@ extern enum MessageTemplate { kProxyGetPrototypeOfNonExtensible, kProxySetPrototypeOfNonExtensible, kProxyDeletePropertyNonExtensible, + kUndefinedOrNullToObject, kWeakRefsCleanupMustBeCallable, kWasmTrapUnreachable, kWasmTrapMemOutOfBounds, @@ -320,11 +325,25 @@ extern enum MessageTemplate { kWasmTrapDataSegmentDropped, kWasmTrapElemSegmentDropped, kWasmTrapTableOutOfBounds, - kWasmTrapBrOnExnNullRef, - kWasmTrapRethrowNullRef, + kWasmTrapBrOnExnNull, + kWasmTrapRethrowNull, kWasmTrapNullDereference, kWasmTrapIllegalCast, kWasmTrapArrayOutOfBounds, + kWeakRefsRegisterTargetAndHoldingsMustNotBeSame, + kWeakRefsRegisterTargetMustBeObject, + kWeakRefsUnregisterTokenMustBeObject, + kWeakRefsWeakRefConstructorTargetMustBeObject, + ... +} + +extern enum PropertyAttributes extends int31 { + NONE, + READ_ONLY, + DONT_ENUM, + DONT_DELETE, + ALL_ATTRIBUTES_MASK, + FROZEN, ... } @@ -364,12 +383,6 @@ const kMaxRegularHeapObjectSize: constexpr int31 const kMaxNewSpaceFixedArrayElements: constexpr int31 generates 'FixedArray::kMaxRegularLength'; -const kSloppyArgumentsArgumentsIndex: constexpr int31 - generates 'SloppyArgumentsElements::kArgumentsIndex'; -const kSloppyArgumentsContextIndex: constexpr int31 - generates 'SloppyArgumentsElements::kContextIndex'; -const kSloppyArgumentsParameterMapStart: constexpr int31 - generates 'SloppyArgumentsElements::kParameterMapStart'; extern enum PrimitiveType { kString, kBoolean, kSymbol, kNumber } @@ -387,7 +400,9 @@ type Boolean = True|False; type NumberOrUndefined = Number|Undefined; +extern macro DefaultStringConstant(): String; extern macro EmptyStringConstant(): EmptyString; +extern macro ErrorsStringConstant(): String; extern macro FalseConstant(): False; extern macro Int32FalseConstant(): bool; extern macro Int32TrueConstant(): bool; @@ -396,11 +411,18 @@ extern macro LengthStringConstant(): String; extern macro MatchSymbolConstant(): Symbol; extern macro MessageStringConstant(): String; extern macro NanConstant(): NaN; +extern macro NameStringConstant(): String; extern macro NullConstant(): Null; +extern macro NumberStringConstant(): String; extern macro ReturnStringConstant(): String; +extern macro StringStringConstant(): String; extern macro TheHoleConstant(): TheHole; +extern macro ToPrimitiveSymbolConstant(): PublicSymbol; +extern macro ToStringStringConstant(): String; extern macro TrueConstant(): True; extern macro UndefinedConstant(): Undefined; +extern macro ValueOfStringConstant(): String; +extern macro WasmWrappedObjectSymbolConstant(): Symbol; const TheHole: TheHole = TheHoleConstant(); const Null: Null = NullConstant(); @@ -428,8 +450,6 @@ const UNSAFE_SKIP_WRITE_BARRIER: extern transitioning macro AllocateJSIteratorResult(implicit context: Context)( JSAny, Boolean): JSObject; -extern macro AllocateSyntheticFunctionContext( - NativeContext, constexpr int32): Context; extern class Filler extends HeapObject generates 'TNode'; @@ -454,12 +474,11 @@ extern macro SelectBooleanConstant(bool): Boolean; extern macro Print(constexpr string); extern macro Print(constexpr string, Object); extern macro Comment(constexpr string); -extern macro StaticAssert(bool); extern macro Print(Object); extern macro DebugBreak(); // ES6 7.1.4 ToInteger ( argument ) -transitioning macro ToIntegerImpl(implicit context: Context)(input: Object): +transitioning macro ToIntegerImpl(implicit context: Context)(input: JSAny): Number { let input = input; @@ -478,32 +497,38 @@ transitioning macro ToIntegerImpl(implicit context: Context)(input: Object): assert(IsNumberNormalized(result)); return result; } - case (ho: HeapObject): { - input = math::NonNumberToNumber(ho); + case (a: JSAnyNotNumber): { + input = conversion::NonNumberToNumber(a); } } } unreachable; } -transitioning builtin ToInteger(implicit context: Context)(input: Object): +transitioning builtin ToInteger(implicit context: Context)(input: JSAny): Number { return ToIntegerImpl(input); } @export -transitioning macro ToInteger_Inline(implicit context: Context)(input: Object): +transitioning macro ToInteger_Inline(implicit context: Context)(input: JSAny): Number { typeswitch (input) { case (s: Smi): { return s; } - case (ho: HeapObject): { - return ToInteger(ho); + case (JSAny): { + return ToInteger(input); } } } +extern enum BigIntHandling extends int32 +constexpr 'CodeStubAssembler::BigIntHandling' { kConvertToNumber, kThrow } + +extern transitioning macro ToNumber(implicit context: Context)( + JSAny, constexpr BigIntHandling): Number; + extern transitioning macro ToLength_Inline(implicit context: Context)(JSAny): Number; extern transitioning macro ToNumber_Inline(implicit context: Context)(JSAny): @@ -518,6 +543,8 @@ extern transitioning macro GetProperty(implicit context: Context)( JSAny, JSAny): JSAny; extern transitioning builtin SetProperty(implicit context: Context)( JSAny, JSAny, JSAny): JSAny; +extern transitioning builtin SetPropertyIgnoreAttributes( + implicit context: Context)(JSObject, String, JSAny, Smi): JSAny; extern transitioning builtin SetPropertyInLiteral(implicit context: Context)( JSAny, JSAny, JSAny): JSAny; extern transitioning builtin DeleteProperty(implicit context: Context)( @@ -529,6 +556,8 @@ extern transitioning macro HasProperty_Inline(implicit context: Context)( extern builtin LoadIC( Context, JSAny, JSAny, TaggedIndex, FeedbackVector): JSAny; +extern macro SetPropertyStrict(Context, Object, Object, Object): Object; + extern macro ThrowRangeError(implicit context: Context)( constexpr MessageTemplate): never; extern macro ThrowRangeError(implicit context: Context)( @@ -581,10 +610,7 @@ extern builtin ToObject(Context, JSAny): JSReceiver; extern macro ToObject_Inline(Context, JSAny): JSReceiver; extern macro IsNullOrUndefined(Object): bool; extern macro IsString(HeapObject): bool; -transitioning builtin ToString(context: Context, o: JSAny): String { - return ToStringImpl(context, o); -} -extern transitioning runtime ToStringRT(Context, JSAny): String; +extern macro IsSeqOneByteString(HeapObject): bool; extern transitioning builtin NonPrimitiveToPrimitive_String( Context, JSAny): JSPrimitive; extern transitioning builtin NonPrimitiveToPrimitive_Default( @@ -616,6 +642,18 @@ extern macro StringCharCodeAt(String, uintptr): int32; extern runtime StringCompareSequence(Context, String, String, Number): Boolean; extern macro StringFromSingleCharCode(int32): String; +extern macro NumberToString(Number): String; +extern macro StringToNumber(String): Number; +extern transitioning macro NonNumberToNumber(implicit context: Context)( + JSAnyNotNumber): Number; +extern transitioning macro NonNumberToNumeric(implicit context: Context)( + JSAnyNotNumber): Numeric; + +extern macro Equal(JSAny, JSAny, Context): Boolean; +macro Equal(implicit context: Context)(left: JSAny, right: JSAny): Boolean { + return Equal(left, right); +} + extern macro StrictEqual(JSAny, JSAny): Boolean; extern macro SmiLexicographicCompare(Smi, Smi): Smi; extern runtime ReThrow(Context, JSAny): never; @@ -778,6 +816,8 @@ extern operator '+' macro ConstexprInt31Add( constexpr int31, constexpr int31): constexpr int31; extern operator '*' macro ConstexprInt31Mul( constexpr int31, constexpr int31): constexpr int31; +extern operator '-' macro Int32Sub(int16, int16): int32; +extern operator '-' macro Int32Sub(uint16, uint16): int32; extern operator '-' macro Int32Sub(int32, int32): int32; extern operator '*' macro Int32Mul(int32, int32): int32; extern operator '/' macro Int32Div(int32, int32): int32; @@ -801,6 +841,7 @@ extern operator '==' macro Word32Equal(uint32, uint32): bool; extern operator '!=' macro Word32NotEqual(int32, int32): bool; extern operator '!=' macro Word32NotEqual(uint32, uint32): bool; extern operator '>>>' macro Word32Shr(uint32, uint32): uint32; +extern operator '>>' macro Word32Sar(int32, int32): int32; extern operator '<<' macro Word32Shl(int32, int32): int32; extern operator '<<' macro Word32Shl(uint32, uint32): uint32; extern operator '|' macro Word32Or(int32, int32): int32; @@ -809,11 +850,14 @@ extern operator '&' macro Word32And(bool, bool): bool; extern operator '|' macro Word32Or(bool, bool): bool; extern operator '==' macro Word32Equal(bool, bool): bool; extern operator '!=' macro Word32NotEqual(bool, bool): bool; +extern operator '|' macro ConstexprWord32Or( + constexpr int32, constexpr int32): constexpr int32; extern operator '+' macro Float64Add(float64, float64): float64; extern operator '-' macro Float64Sub(float64, float64): float64; extern operator '*' macro Float64Mul(float64, float64): float64; extern operator '/' macro Float64Div(float64, float64): float64; +extern operator '%' macro Float64Mod(float64, float64): float64; extern operator '+' macro NumberAdd(Number, Number): Number; extern operator '-' macro NumberSub(Number, Number): Number; @@ -850,6 +894,12 @@ extern operator '!' macro ConstexprBoolNot(constexpr bool): constexpr bool; extern operator '!' macro Word32BinaryNot(bool): bool; extern operator '!' macro IsFalse(Boolean): bool; +extern operator '==' macro +ConstexprInt31Equal( + constexpr InstanceType, constexpr InstanceType): constexpr bool; +extern operator '-' macro ConstexprUint32Sub( + constexpr InstanceType, constexpr InstanceType): constexpr int32; + extern operator '.instanceType' macro LoadInstanceType(HeapObject): InstanceType; @@ -882,6 +932,7 @@ extern macro TaggedIsNotSmi(Object): bool; extern macro TaggedIsPositiveSmi(Object): bool; extern macro IsValidPositiveSmi(intptr): bool; +extern macro IsInteger(JSAny): bool; extern macro IsInteger(HeapNumber): bool; extern macro AllocateHeapNumberWithValue(float64): HeapNumber; @@ -912,6 +963,7 @@ macro SmiTag(value: T): SmiTagged { return %RawDownCast>(SmiFromUint32(value)); } extern macro SmiToInt32(Smi): int32; +extern macro SmiToFloat64(Smi): float64; extern macro TaggedIndexToIntPtr(TaggedIndex): intptr; extern macro IntPtrToTaggedIndex(intptr): TaggedIndex; extern macro TaggedIndexToSmi(TaggedIndex): Smi; @@ -919,6 +971,7 @@ extern macro SmiToTaggedIndex(Smi): TaggedIndex; extern macro RoundIntPtrToFloat64(intptr): float64; extern macro ChangeFloat32ToFloat64(float32): float64; extern macro ChangeNumberToFloat64(Number): float64; +extern macro ChangeNumberToUint32(Number): uint32; extern macro ChangeTaggedNonSmiToInt32(implicit context: Context)(JSAnyNotSmi): int32; extern macro ChangeTaggedToFloat64(implicit context: Context)(JSAny): float64; @@ -938,6 +991,7 @@ extern macro NumberConstant(constexpr int32): Number; extern macro NumberConstant(constexpr uint32): Number; extern macro IntPtrConstant(constexpr int31): intptr; extern macro IntPtrConstant(constexpr int32): intptr; +extern macro Uint16Constant(constexpr uint16): uint16; extern macro Int32Constant(constexpr int31): int31; extern macro Int32Constant(constexpr int32): int32; extern macro Float64Constant(constexpr int31): float64; @@ -962,22 +1016,6 @@ extern macro BitcastWordToTagged(uintptr): Object; extern macro BitcastTaggedToWord(Tagged): intptr; extern macro BitcastTaggedToWordForTagAndSmiBits(Tagged): intptr; -macro Is( - implicit context: Context)(o: B): bool { - Cast(o) otherwise return false; - return true; -} - -macro UnsafeCast(implicit context: Context)(o: Object): - A { - assert(Is(o)); - return %RawDownCast(o); -} - -macro UnsafeConstCast(r: const &T):&T { - return %RawDownCast<&T>(r); -} - extern macro FixedArrayMapConstant(): Map; extern macro FixedDoubleArrayMapConstant(): Map; extern macro FixedCOWArrayMapConstant(): Map; @@ -986,7 +1024,6 @@ extern macro EmptyFixedArrayConstant(): EmptyFixedArray; extern macro PromiseCapabilityMapConstant(): Map; extern macro OneByteStringMapConstant(): Map; extern macro StringMapConstant(): Map; -extern macro SloppyArgumentsElementsMapConstant(): Map; const kFixedArrayMap: Map = FixedArrayMapConstant(); const kFixedDoubleArrayMap: Map = FixedDoubleArrayMapConstant(); @@ -998,7 +1035,6 @@ const kPromiseCapabilityMap: Map = PromiseCapabilityMapConstant(); const kOneByteStringMap: Map = OneByteStringMapConstant(); // The map of a non-internalized internal SeqTwoByteString. const kStringMap: Map = StringMapConstant(); -const kSloppyArgumentsElementsMap: Map = SloppyArgumentsElementsMapConstant(); extern macro IsPrototypeInitialArrayPrototype(implicit context: Context)(Map): bool; @@ -1070,60 +1106,50 @@ macro AllowNonNumberElements(kind: ElementsKind): ElementsKind { } macro GetObjectFunction(implicit context: Context)(): JSFunction { - return UnsafeCast( - LoadNativeContext(context)[NativeContextSlot::OBJECT_FUNCTION_INDEX]); + return *NativeContextSlot(ContextSlot::OBJECT_FUNCTION_INDEX); } macro GetArrayFunction(implicit context: Context)(): JSFunction { - return UnsafeCast( - LoadNativeContext(context)[NativeContextSlot::ARRAY_FUNCTION_INDEX]); + return *NativeContextSlot(ContextSlot::ARRAY_FUNCTION_INDEX); } macro GetArrayBufferFunction(implicit context: Context)(): Constructor { - return UnsafeCast( - LoadNativeContext(context)[NativeContextSlot::ARRAY_BUFFER_FUN_INDEX]); + return *NativeContextSlot(ContextSlot::ARRAY_BUFFER_FUN_INDEX); } macro GetArrayBufferNoInitFunction(implicit context: Context)(): JSFunction { - return UnsafeCast(LoadNativeContext( - context)[NativeContextSlot::ARRAY_BUFFER_NOINIT_FUN_INDEX]); + return *NativeContextSlot(ContextSlot::ARRAY_BUFFER_NOINIT_FUN_INDEX); } macro GetFastPackedElementsJSArrayMap(implicit context: Context)(): Map { - return UnsafeCast(LoadNativeContext( - context)[NativeContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX]); + return *NativeContextSlot(ContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX); } macro GetFastPackedSmiElementsJSArrayMap(implicit context: Context)(): Map { - return UnsafeCast(LoadNativeContext( - context)[NativeContextSlot::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX]); + return *NativeContextSlot( + ContextSlot::JS_ARRAY_PACKED_SMI_ELEMENTS_MAP_INDEX); } macro GetProxyRevocableResultMap(implicit context: Context)(): Map { - return UnsafeCast(LoadNativeContext( - context)[NativeContextSlot::PROXY_REVOCABLE_RESULT_MAP_INDEX]); + return *NativeContextSlot(ContextSlot::PROXY_REVOCABLE_RESULT_MAP_INDEX); } macro GetIteratorResultMap(implicit context: Context)(): Map { - return UnsafeCast( - LoadNativeContext(context)[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]); + return *NativeContextSlot(ContextSlot::ITERATOR_RESULT_MAP_INDEX); } macro GetInitialStringIteratorMap(implicit context: Context)(): Map { - return UnsafeCast(LoadNativeContext( - context)[NativeContextSlot::INITIAL_STRING_ITERATOR_MAP_INDEX]); + return *NativeContextSlot(ContextSlot::INITIAL_STRING_ITERATOR_MAP_INDEX); } macro GetReflectApply(implicit context: Context)(): Callable { - return UnsafeCast( - LoadNativeContext(context)[NativeContextSlot::REFLECT_APPLY_INDEX]); + return *NativeContextSlot(ContextSlot::REFLECT_APPLY_INDEX); } macro GetRegExpLastMatchInfo(implicit context: Context)(): RegExpMatchInfo { - return %RawDownCast(LoadNativeContext( - context)[NativeContextSlot::REGEXP_LAST_MATCH_INFO_INDEX]); + return *NativeContextSlot(ContextSlot::REGEXP_LAST_MATCH_INFO_INDEX); } macro GetStrictArgumentsMap(implicit context: Context)(): Map { - return UnsafeCast(LoadNativeContext( - context)[NativeContextSlot::STRICT_ARGUMENTS_MAP_INDEX]); + return *NativeContextSlot(ContextSlot::STRICT_ARGUMENTS_MAP_INDEX); } macro GetSloppyArgumentsMap(implicit context: Context)(): Map { - return UnsafeCast(LoadNativeContext( - context)[NativeContextSlot::SLOPPY_ARGUMENTS_MAP_INDEX]); + return *NativeContextSlot(ContextSlot::SLOPPY_ARGUMENTS_MAP_INDEX); } macro GetFastAliasedArgumentsMap(implicit context: Context)(): Map { - return UnsafeCast(LoadNativeContext( - context)[NativeContextSlot::FAST_ALIASED_ARGUMENTS_MAP_INDEX]); + return *NativeContextSlot(ContextSlot::FAST_ALIASED_ARGUMENTS_MAP_INDEX); +} +macro GetWeakCellMap(implicit context: Context)(): Map { + return %GetClassMapConstant(); } // Call(Context, Target, Receiver, ...Args) @@ -1371,7 +1397,6 @@ transitioning macro GetMethod(implicit context: Context)( MessageTemplate::kPropertyNotFunction, value, symbol, o); } -extern macro NumberToString(Number): String; extern macro IsOneByteStringInstanceType(InstanceType): bool; // After converting an index to an integer, calculate a relative index: @@ -1514,6 +1539,9 @@ macro IsFastJSArrayForReadWithNoCustomIteration(context: Context, o: Object): extern transitioning runtime CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, JSAny); +extern transitioning runtime SetOwnPropertyIgnoreAttributes( + implicit context: Context)(JSObject, String, JSAny, Smi); + namespace runtime { extern runtime GetDerivedMap(Context, JSFunction, JSReceiver): Map; @@ -1573,37 +1601,8 @@ transitioning builtin FastCreateDataProperty(implicit context: Context)( return Undefined; } -@export -transitioning macro ToStringImpl(context: Context, o: JSAny): String { - let result: JSAny = o; - while (true) { - typeswitch (result) { - case (num: Number): { - return NumberToString(num); - } - case (str: String): { - return str; - } - case (oddball: Oddball): { - return oddball.to_string; - } - case (JSReceiver): { - result = NonPrimitiveToPrimitive_String(context, result); - continue; - } - case (Symbol): { - ThrowTypeError(MessageTemplate::kSymbolToString); - } - case (JSAny): { - return ToStringRT(context, o); - } - } - } - unreachable; -} - macro VerifiedUnreachable(): never { - StaticAssert(false); + static_assert(false); unreachable; } diff --git a/deps/v8/src/builtins/bigint.tq b/deps/v8/src/builtins/bigint.tq index d52de7f84eab50..409301dcc9814f 100644 --- a/deps/v8/src/builtins/bigint.tq +++ b/deps/v8/src/builtins/bigint.tq @@ -13,6 +13,7 @@ type BigInt extends BigIntBase; @noVerifier @hasSameInstanceTypeAsParent +@doNotGenerateCast extern class MutableBigInt extends BigIntBase generates 'TNode' { } diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc index 734b9b634a0b30..134baeb96e84c9 100644 --- a/deps/v8/src/builtins/builtins-array-gen.cc +++ b/deps/v8/src/builtins/builtins-array-gen.cc @@ -438,7 +438,6 @@ TF_BUILTIN(ArrayPrototypePush, CodeStubAssembler) { } TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) { - ParameterMode mode = OptimalParameterMode(); TNode context = CAST(Parameter(Descriptor::kContext)); TNode array = CAST(Parameter(Descriptor::kSource)); TNode begin = SmiToBInt(CAST(Parameter(Descriptor::kBegin))); @@ -446,7 +445,7 @@ TF_BUILTIN(ExtractFastJSArray, ArrayBuiltinsAssembler) { CSA_ASSERT(this, Word32BinaryNot(IsNoElementsProtectorCellInvalid())); - Return(ExtractFastJSArray(context, array, begin, count, mode)); + Return(ExtractFastJSArray(context, array, begin, count)); } TF_BUILTIN(CloneFastJSArray, ArrayBuiltinsAssembler) { @@ -477,7 +476,7 @@ TF_BUILTIN(CloneFastJSArrayFillingHoles, ArrayBuiltinsAssembler) { LoadElementsKind(array))), Word32BinaryNot(IsNoElementsProtectorCellInvalid()))); - Return(CloneFastJSArray(context, array, {}, + Return(CloneFastJSArray(context, array, base::nullopt, HoleConversionMode::kConvertToUndefined)); } @@ -937,8 +936,8 @@ void ArrayIncludesIndexofAssembler::GeneratePackedDoubles( Label continue_loop(this); GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged), &return_not_found); - TNode element_k = LoadFixedDoubleArrayElement( - elements, index_var.value(), MachineType::Float64()); + TNode element_k = + LoadFixedDoubleArrayElement(elements, index_var.value()); Branch(Float64Equal(element_k, search_num.value()), &return_found, &continue_loop); BIND(&continue_loop); @@ -952,8 +951,8 @@ void ArrayIncludesIndexofAssembler::GeneratePackedDoubles( Label continue_loop(this); GotoIfNot(UintPtrLessThan(index_var.value(), array_length_untagged), &return_not_found); - TNode element_k = LoadFixedDoubleArrayElement( - elements, index_var.value(), MachineType::Float64()); + TNode element_k = + LoadFixedDoubleArrayElement(elements, index_var.value()); BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop); BIND(&continue_loop); Increment(&index_var); @@ -1011,8 +1010,8 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles( // No need for hole checking here; the following Float64Equal will // return 'not equal' for holes anyway. - TNode element_k = LoadFixedDoubleArrayElement( - elements, index_var.value(), MachineType::Float64()); + TNode element_k = + LoadFixedDoubleArrayElement(elements, index_var.value()); Branch(Float64Equal(element_k, search_num.value()), &return_found, &continue_loop); @@ -1030,8 +1029,7 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles( // Load double value or continue if it's the hole NaN. TNode element_k = LoadFixedDoubleArrayElement( - elements, index_var.value(), MachineType::Float64(), 0, - INTPTR_PARAMETERS, &continue_loop); + elements, index_var.value(), &continue_loop); BranchIfFloat64IsNaN(element_k, &return_found, &continue_loop); BIND(&continue_loop); @@ -1046,9 +1044,8 @@ void ArrayIncludesIndexofAssembler::GenerateHoleyDoubles( &return_not_found); // Check if the element is a double hole, but don't load it. - LoadFixedDoubleArrayElement(elements, index_var.value(), - MachineType::None(), 0, INTPTR_PARAMETERS, - &return_found); + LoadFixedDoubleArrayElement(elements, index_var.value(), &return_found, + MachineType::None()); Increment(&index_var); Goto(&hole_loop); @@ -1153,7 +1150,7 @@ TF_BUILTIN(ArrayIndexOfHoleyDoubles, ArrayIncludesIndexofAssembler) { // ES #sec-array.prototype.values TF_BUILTIN(ArrayPrototypeValues, CodeStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); + TNode context = CAST(Parameter(Descriptor::kContext)); TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Return(CreateArrayIterator(context, ToObject_Inline(context, receiver), IterationKind::kValues)); @@ -1161,7 +1158,7 @@ TF_BUILTIN(ArrayPrototypeValues, CodeStubAssembler) { // ES #sec-array.prototype.entries TF_BUILTIN(ArrayPrototypeEntries, CodeStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); + TNode context = CAST(Parameter(Descriptor::kContext)); TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Return(CreateArrayIterator(context, ToObject_Inline(context, receiver), IterationKind::kEntries)); @@ -1169,7 +1166,7 @@ TF_BUILTIN(ArrayPrototypeEntries, CodeStubAssembler) { // ES #sec-array.prototype.keys TF_BUILTIN(ArrayPrototypeKeys, CodeStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); + TNode context = CAST(Parameter(Descriptor::kContext)); TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Return(CreateArrayIterator(context, ToObject_Inline(context, receiver), IterationKind::kKeys)); @@ -1665,7 +1662,8 @@ void ArrayBuiltinsAssembler::TailCallArrayConstructorStub( void ArrayBuiltinsAssembler::CreateArrayDispatchNoArgument( TNode context, TNode target, TNode argc, - AllocationSiteOverrideMode mode, TNode allocation_site) { + AllocationSiteOverrideMode mode, + base::Optional> allocation_site) { if (mode == DISABLE_ALLOCATION_SITES) { Callable callable = CodeFactory::ArrayNoArgumentConstructor( isolate(), GetInitialFastElementsKind(), mode); @@ -1674,7 +1672,8 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchNoArgument( argc); } else { DCHECK_EQ(mode, DONT_OVERRIDE); - TNode elements_kind = LoadElementsKind(allocation_site); + DCHECK(allocation_site); + TNode elements_kind = LoadElementsKind(*allocation_site); // TODO(ishell): Compute the builtin index dynamically instead of // iterating over all expected elements kinds. @@ -1688,7 +1687,7 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchNoArgument( Callable callable = CodeFactory::ArrayNoArgumentConstructor(isolate(), kind, mode); - TailCallArrayConstructorStub(callable, context, target, allocation_site, + TailCallArrayConstructorStub(callable, context, target, *allocation_site, argc); BIND(&next); @@ -1701,7 +1700,8 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchNoArgument( void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument( TNode context, TNode target, TNode argc, - AllocationSiteOverrideMode mode, TNode allocation_site) { + AllocationSiteOverrideMode mode, + base::Optional> allocation_site) { if (mode == DISABLE_ALLOCATION_SITES) { ElementsKind initial = GetInitialFastElementsKind(); ElementsKind holey_initial = GetHoleyElementsKind(initial); @@ -1712,7 +1712,8 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument( argc); } else { DCHECK_EQ(mode, DONT_OVERRIDE); - TNode transition_info = LoadTransitionInfo(allocation_site); + DCHECK(allocation_site); + TNode transition_info = LoadTransitionInfo(*allocation_site); // Least significant bit in fast array elements kind means holeyness. STATIC_ASSERT(PACKED_SMI_ELEMENTS == 0); @@ -1735,7 +1736,7 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument( // Make elements kind holey and update elements kind in the type info. var_elements_kind = Word32Or(var_elements_kind.value(), Int32Constant(1)); StoreObjectFieldNoWriteBarrier( - allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset, + *allocation_site, AllocationSite::kTransitionInfoOrBoilerplateOffset, SmiOr(transition_info, SmiConstant(fast_elements_kind_holey_mask))); Goto(&normal_sequence); } @@ -1756,7 +1757,7 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument( Callable callable = CodeFactory::ArraySingleArgumentConstructor(isolate(), kind, mode); - TailCallArrayConstructorStub(callable, context, target, allocation_site, + TailCallArrayConstructorStub(callable, context, target, *allocation_site, argc); BIND(&next); @@ -1769,7 +1770,8 @@ void ArrayBuiltinsAssembler::CreateArrayDispatchSingleArgument( void ArrayBuiltinsAssembler::GenerateDispatchToArrayStub( TNode context, TNode target, TNode argc, - AllocationSiteOverrideMode mode, TNode allocation_site) { + AllocationSiteOverrideMode mode, + base::Optional> allocation_site) { Label check_one_case(this), fallthrough(this); GotoIfNot(Word32Equal(argc, Int32Constant(0)), &check_one_case); CreateArrayDispatchNoArgument(context, target, argc, mode, allocation_site); @@ -1848,13 +1850,8 @@ void ArrayBuiltinsAssembler::GenerateConstructor( SmiConstant(AbortReason::kAllocatingNonEmptyPackedArray); TailCallRuntime(Runtime::kAbort, context, reason); } else { - int element_size = - IsDoubleElementsKind(elements_kind) ? kDoubleSize : kTaggedSize; - int max_fast_elements = - (kMaxRegularHeapObjectSize - FixedArray::kHeaderSize - - JSArray::kHeaderSize - AllocationMemento::kSize) / - element_size; - Branch(SmiAboveOrEqual(array_size_smi, SmiConstant(max_fast_elements)), + Branch(SmiAboveOrEqual(array_size_smi, + SmiConstant(JSArray::kInitialMaxFastElementArray)), &call_runtime, &small_smi_size); } @@ -1862,8 +1859,9 @@ void ArrayBuiltinsAssembler::GenerateConstructor( { TNode array = AllocateJSArray( elements_kind, array_map, array_size_smi, array_size_smi, - mode == DONT_TRACK_ALLOCATION_SITE ? TNode() - : CAST(allocation_site)); + mode == DONT_TRACK_ALLOCATION_SITE + ? base::Optional>(base::nullopt) + : CAST(allocation_site)); Return(array); } } @@ -1878,13 +1876,14 @@ void ArrayBuiltinsAssembler::GenerateConstructor( void ArrayBuiltinsAssembler::GenerateArrayNoArgumentConstructor( ElementsKind kind, AllocationSiteOverrideMode mode) { using Descriptor = ArrayNoArgumentConstructorDescriptor; - TNode native_context = CAST(LoadObjectField( - Parameter(Descriptor::kFunction), JSFunction::kContextOffset)); + TNode native_context = LoadObjectField( + CAST(Parameter(Descriptor::kFunction)), JSFunction::kContextOffset); bool track_allocation_site = AllocationSite::ShouldTrack(kind) && mode != DISABLE_ALLOCATION_SITES; - TNode allocation_site = - track_allocation_site ? CAST(Parameter(Descriptor::kAllocationSite)) - : TNode(); + base::Optional> allocation_site = + track_allocation_site + ? CAST(Parameter(Descriptor::kAllocationSite)) + : base::Optional>(base::nullopt); TNode array_map = LoadJSArrayElementsMap(kind, native_context); TNode array = AllocateJSArray( kind, array_map, IntPtrConstant(JSArray::kPreallocatedArrayElements), diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h index 088af90665da5a..96833d9dea24c1 100644 --- a/deps/v8/src/builtins/builtins-array-gen.h +++ b/deps/v8/src/builtins/builtins-array-gen.h @@ -72,20 +72,20 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler { TNode target, TNode allocation_site_or_undefined, TNode argc); - void GenerateDispatchToArrayStub(TNode context, - TNode target, TNode argc, - AllocationSiteOverrideMode mode, - TNode allocation_site = {}); + void GenerateDispatchToArrayStub( + TNode context, TNode target, TNode argc, + AllocationSiteOverrideMode mode, + base::Optional> allocation_site = base::nullopt); void CreateArrayDispatchNoArgument( TNode context, TNode target, TNode argc, AllocationSiteOverrideMode mode, - TNode allocation_site = {}); + base::Optional> allocation_site); void CreateArrayDispatchSingleArgument( TNode context, TNode target, TNode argc, AllocationSiteOverrideMode mode, - TNode allocation_site = {}); + base::Optional> allocation_site); void GenerateConstructor(TNode context, TNode array_function, diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc index b138515af653a2..73e5605ccc445c 100644 --- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc +++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc @@ -16,6 +16,10 @@ namespace internal { namespace { class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler { public: + // The 'next' and 'return' take an optional value parameter, and the 'throw' + // method take an optional reason parameter. + static const int kValueOrReasonArg = 0; + explicit AsyncFromSyncBuiltinsAssembler(compiler::CodeAssemblerState* state) : AsyncBuiltinsAssembler(state) {} @@ -31,8 +35,8 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler { using SyncIteratorNodeGenerator = std::function(TNode)>; void Generate_AsyncFromSyncIteratorMethod( - const TNode context, const TNode iterator, - const TNode sent_value, + CodeStubArguments* args, const TNode context, + const TNode iterator, const TNode sent_value, const SyncIteratorNodeGenerator& get_method, const UndefinedMethodHandler& if_method_undefined, const char* operation_name, @@ -40,9 +44,9 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler { base::Optional> initial_exception_value = base::nullopt); void Generate_AsyncFromSyncIteratorMethod( - const TNode context, const TNode iterator, - const TNode sent_value, Handle name, - const UndefinedMethodHandler& if_method_undefined, + CodeStubArguments* args, const TNode context, + const TNode iterator, const TNode sent_value, + Handle name, const UndefinedMethodHandler& if_method_undefined, const char* operation_name, Label::Type reject_label_type = Label::kDeferred, base::Optional> initial_exception_value = base::nullopt) { @@ -50,7 +54,7 @@ class AsyncFromSyncBuiltinsAssembler : public AsyncBuiltinsAssembler { return GetProperty(context, sync_iterator, name); }; return Generate_AsyncFromSyncIteratorMethod( - context, iterator, sent_value, get_method, if_method_undefined, + args, context, iterator, sent_value, get_method, if_method_undefined, operation_name, reject_label_type, initial_exception_value); } @@ -97,8 +101,9 @@ void AsyncFromSyncBuiltinsAssembler::ThrowIfNotAsyncFromSyncIterator( } void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod( - const TNode context, const TNode iterator, - const TNode sent_value, const SyncIteratorNodeGenerator& get_method, + CodeStubArguments* args, const TNode context, + const TNode iterator, const TNode sent_value, + const SyncIteratorNodeGenerator& get_method, const UndefinedMethodHandler& if_method_undefined, const char* operation_name, Label::Type reject_label_type, base::Optional> initial_exception_value) { @@ -122,22 +127,37 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod( if (if_method_undefined) { Label if_isnotundefined(this); - GotoIfNot(IsUndefined(method), &if_isnotundefined); + GotoIfNot(IsNullOrUndefined(method), &if_isnotundefined); if_method_undefined(native_context, promise, &reject_promise); BIND(&if_isnotundefined); } - TNode iter_result; + TVARIABLE(Object, iter_result); { + Label has_sent_value(this), no_sent_value(this), merge(this); ScopedExceptionHandler handler(this, &reject_promise, &var_exception); - iter_result = Call(context, method, sync_iterator, sent_value); + Branch( + IntPtrGreaterThan(args->GetLength(), IntPtrConstant(kValueOrReasonArg)), + &has_sent_value, &no_sent_value); + BIND(&has_sent_value); + { + iter_result = Call(context, method, sync_iterator, sent_value); + Goto(&merge); + } + BIND(&no_sent_value); + { + iter_result = Call(context, method, sync_iterator); + Goto(&merge); + } + BIND(&merge); } TNode value; TNode done; - std::tie(value, done) = LoadIteratorResult( - context, native_context, iter_result, &reject_promise, &var_exception); + std::tie(value, done) = + LoadIteratorResult(context, native_context, iter_result.value(), + &reject_promise, &var_exception); const TNode promise_fun = CAST(LoadContextElement(native_context, Context::PROMISE_FUNCTION_INDEX)); @@ -160,15 +180,16 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod( // Perform ! PerformPromiseThen(valueWrapper, // onFulfilled, undefined, promiseCapability). - Return(CallBuiltin(Builtins::kPerformPromiseThen, context, value_wrapper, - on_fulfilled, UndefinedConstant(), promise)); + args->PopAndReturn(CallBuiltin(Builtins::kPerformPromiseThen, context, + value_wrapper, on_fulfilled, + UndefinedConstant(), promise)); BIND(&reject_promise); { const TNode exception = var_exception.value(); CallBuiltin(Builtins::kRejectPromise, context, promise, exception, TrueConstant()); - Return(promise); + args->PopAndReturn(promise); } } @@ -252,8 +273,12 @@ AsyncFromSyncBuiltinsAssembler::LoadIteratorResult( // https://tc39.github.io/proposal-async-iteration/ // Section #sec-%asyncfromsynciteratorprototype%.next TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) { - const TNode iterator = CAST(Parameter(Descriptor::kReceiver)); - const TNode value = CAST(Parameter(Descriptor::kValue)); + TNode argc = ChangeInt32ToIntPtr( + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount))); + CodeStubArguments args(this, argc); + + const TNode iterator = args.GetReceiver(); + const TNode value = args.GetOptionalArgumentValue(kValueOrReasonArg); const TNode context = CAST(Parameter(Descriptor::kContext)); auto get_method = [=](const TNode unused) { @@ -261,7 +286,7 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) { JSAsyncFromSyncIterator::kNextOffset); }; Generate_AsyncFromSyncIteratorMethod( - context, iterator, value, get_method, UndefinedMethodHandler(), + &args, context, iterator, value, get_method, UndefinedMethodHandler(), "[Async-from-Sync Iterator].prototype.next"); } @@ -269,11 +294,16 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeNext, AsyncFromSyncBuiltinsAssembler) { // Section #sec-%asyncfromsynciteratorprototype%.return TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn, AsyncFromSyncBuiltinsAssembler) { - const TNode iterator = CAST(Parameter(Descriptor::kReceiver)); - const TNode value = CAST(Parameter(Descriptor::kValue)); + TNode argc = ChangeInt32ToIntPtr( + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount))); + CodeStubArguments args(this, argc); + + const TNode iterator = args.GetReceiver(); + const TNode value = args.GetOptionalArgumentValue(kValueOrReasonArg); const TNode context = CAST(Parameter(Descriptor::kContext)); - auto if_return_undefined = [=](const TNode native_context, + auto if_return_undefined = [=, &args]( + const TNode native_context, const TNode promise, Label* if_exception) { // If return is undefined, then @@ -285,20 +315,24 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeReturn, // IfAbruptRejectPromise(nextDone, promiseCapability). // Return promiseCapability.[[Promise]]. CallBuiltin(Builtins::kResolvePromise, context, promise, iter_result); - Return(promise); + args.PopAndReturn(promise); }; Generate_AsyncFromSyncIteratorMethod( - context, iterator, value, factory()->return_string(), if_return_undefined, - "[Async-from-Sync Iterator].prototype.return"); + &args, context, iterator, value, factory()->return_string(), + if_return_undefined, "[Async-from-Sync Iterator].prototype.return"); } // https://tc39.github.io/proposal-async-iteration/ // Section #sec-%asyncfromsynciteratorprototype%.throw TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow, AsyncFromSyncBuiltinsAssembler) { - const TNode iterator = CAST(Parameter(Descriptor::kReceiver)); - const TNode reason = CAST(Parameter(Descriptor::kReason)); + TNode argc = ChangeInt32ToIntPtr( + UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount))); + CodeStubArguments args(this, argc); + + const TNode iterator = args.GetReceiver(); + const TNode reason = args.GetOptionalArgumentValue(kValueOrReasonArg); const TNode context = CAST(Parameter(Descriptor::kContext)); auto if_throw_undefined = [=](const TNode native_context, @@ -306,9 +340,9 @@ TF_BUILTIN(AsyncFromSyncIteratorPrototypeThrow, Label* if_exception) { Goto(if_exception); }; Generate_AsyncFromSyncIteratorMethod( - context, iterator, reason, factory()->throw_string(), if_throw_undefined, - "[Async-from-Sync Iterator].prototype.throw", Label::kNonDeferred, - reason); + &args, context, iterator, reason, factory()->throw_string(), + if_throw_undefined, "[Async-from-Sync Iterator].prototype.throw", + Label::kNonDeferred, reason); } } // namespace internal diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc index d457e0331490f8..f7919b78f47993 100644 --- a/deps/v8/src/builtins/builtins-call-gen.cc +++ b/deps/v8/src/builtins/builtins-call-gen.cc @@ -64,6 +64,49 @@ void Builtins::Generate_CallFunctionForwardVarargs(MacroAssembler* masm) { masm->isolate()->builtins()->CallFunction()); } +TF_BUILTIN(Call_ReceiverIsNullOrUndefined_WithFeedback, + CallOrConstructBuiltinsAssembler) { + TNode target = CAST(Parameter(Descriptor::kFunction)); + TNode argc = + UncheckedCast(Parameter(Descriptor::kActualArgumentsCount)); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode maybe_feedback_vector = + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); + TNode slot = UncheckedCast(Parameter(Descriptor::kSlot)); + CollectCallFeedback(target, context, maybe_feedback_vector, + Unsigned(ChangeInt32ToIntPtr(slot))); + TailCallBuiltin(Builtins::kCall_ReceiverIsNullOrUndefined, context, target, + argc); +} + +TF_BUILTIN(Call_ReceiverIsNotNullOrUndefined_WithFeedback, + CallOrConstructBuiltinsAssembler) { + TNode target = CAST(Parameter(Descriptor::kFunction)); + TNode argc = + UncheckedCast(Parameter(Descriptor::kActualArgumentsCount)); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode maybe_feedback_vector = + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); + TNode slot = UncheckedCast(Parameter(Descriptor::kSlot)); + CollectCallFeedback(target, context, maybe_feedback_vector, + Unsigned(ChangeInt32ToIntPtr(slot))); + TailCallBuiltin(Builtins::kCall_ReceiverIsNotNullOrUndefined, context, target, + argc); +} + +TF_BUILTIN(Call_ReceiverIsAny_WithFeedback, CallOrConstructBuiltinsAssembler) { + TNode target = CAST(Parameter(Descriptor::kFunction)); + TNode argc = + UncheckedCast(Parameter(Descriptor::kActualArgumentsCount)); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode maybe_feedback_vector = + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); + TNode slot = UncheckedCast(Parameter(Descriptor::kSlot)); + CollectCallFeedback(target, context, maybe_feedback_vector, + Unsigned(ChangeInt32ToIntPtr(slot))); + TailCallBuiltin(Builtins::kCall_ReceiverIsAny, context, target, argc); +} + void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike( TNode target, base::Optional> new_target, TNode arguments_list, TNode context) { @@ -387,6 +430,19 @@ TF_BUILTIN(CallWithArrayLike, CallOrConstructBuiltinsAssembler) { CallOrConstructWithArrayLike(target, new_target, arguments_list, context); } +TF_BUILTIN(CallWithArrayLike_WithFeedback, CallOrConstructBuiltinsAssembler) { + TNode target = CAST(Parameter(Descriptor::kTarget)); + base::Optional> new_target = base::nullopt; + TNode arguments_list = CAST(Parameter(Descriptor::kArgumentsList)); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode maybe_feedback_vector = + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); + TNode slot = UncheckedCast(Parameter(Descriptor::kSlot)); + CollectCallFeedback(target, context, maybe_feedback_vector, + Unsigned(ChangeInt32ToIntPtr(slot))); + CallOrConstructWithArrayLike(target, new_target, arguments_list, context); +} + TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) { TNode target = CAST(Parameter(Descriptor::kTarget)); base::Optional> new_target = base::nullopt; @@ -397,6 +453,21 @@ TF_BUILTIN(CallWithSpread, CallOrConstructBuiltinsAssembler) { CallOrConstructWithSpread(target, new_target, spread, args_count, context); } +TF_BUILTIN(CallWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) { + TNode target = CAST(Parameter(Descriptor::kTarget)); + base::Optional> new_target = base::nullopt; + TNode spread = CAST(Parameter(Descriptor::kSpread)); + TNode args_count = + UncheckedCast(Parameter(Descriptor::kArgumentsCount)); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode maybe_feedback_vector = + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); + TNode slot = UncheckedCast(Parameter(Descriptor::kSlot)); + CollectCallFeedback(target, context, maybe_feedback_vector, + Unsigned(ChangeInt32ToIntPtr(slot))); + CallOrConstructWithSpread(target, new_target, spread, args_count, context); +} + TNode CallOrConstructBuiltinsAssembler::GetCompatibleReceiver( TNode receiver, TNode signature, TNode context) { @@ -535,7 +606,7 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate( TNode function_template_info_flags = LoadAndUntagObjectField( function_template_info, FunctionTemplateInfo::kFlagOffset); Branch(IsSetWord(function_template_info_flags, - 1 << FunctionTemplateInfo::kAcceptAnyReceiver), + 1 << FunctionTemplateInfo::AcceptAnyReceiverBit::kShift), &receiver_done, &receiver_needs_access_check); BIND(&receiver_needs_access_check); diff --git a/deps/v8/src/builtins/builtins-collections-gen.cc b/deps/v8/src/builtins/builtins-collections-gen.cc index 2f0e5a756026b4..db708c633ccded 100644 --- a/deps/v8/src/builtins/builtins-collections-gen.cc +++ b/deps/v8/src/builtins/builtins-collections-gen.cc @@ -413,8 +413,7 @@ TNode BaseCollectionsAssembler::AllocateJSCollectionSlow( TNode context, TNode constructor, TNode new_target) { ConstructorBuiltinsAssembler constructor_assembler(this->state()); - return constructor_assembler.EmitFastNewObject(context, constructor, - new_target); + return constructor_assembler.FastNewObject(context, constructor, new_target); } void BaseCollectionsAssembler::GenerateConstructor( @@ -568,8 +567,7 @@ TNode BaseCollectionsAssembler::LoadAndNormalizeFixedDoubleArrayElement( TVARIABLE(Object, entry); Label if_hole(this, Label::kDeferred), next(this); TNode element = - LoadFixedDoubleArrayElement(CAST(elements), index, MachineType::Float64(), - 0, INTPTR_PARAMETERS, &if_hole); + LoadFixedDoubleArrayElement(CAST(elements), index, &if_hole); { // not hole entry = AllocateHeapNumberWithValue(element); Goto(&next); @@ -761,7 +759,7 @@ void CollectionsBuiltinsAssembler::FindOrderedHashTableEntry( const TNode number_of_buckets = SmiUntag(CAST(UnsafeLoadFixedArrayElement( table, CollectionType::NumberOfBucketsIndex()))); - const TNode bucket = + const TNode bucket = WordAnd(hash, IntPtrSub(number_of_buckets, IntPtrConstant(1))); const TNode first_entry = SmiUntag(CAST(UnsafeLoadFixedArrayElement( table, bucket, CollectionType::HashTableStartIndex() * kTaggedSize))); @@ -2511,7 +2509,7 @@ TNode WeakCollectionsBuiltinsAssembler::AllocateTable( AllocateFixedArray(HOLEY_ELEMENTS, length, kAllowLargeObjectAllocation)); TNode map = - HeapConstant(EphemeronHashTableShape::GetMap(ReadOnlyRoots(isolate()))); + HeapConstant(EphemeronHashTable::GetMap(ReadOnlyRoots(isolate()))); StoreMapNoWriteBarrier(table, map); StoreFixedArrayElement(table, EphemeronHashTable::kNumberOfElementsIndex, SmiConstant(0), SKIP_WRITE_BARRIER); @@ -2695,7 +2693,7 @@ TNode WeakCollectionsBuiltinsAssembler::ShouldShrink( TNode WeakCollectionsBuiltinsAssembler::ValueIndexFromKeyIndex( TNode key_index) { return IntPtrAdd(key_index, - IntPtrConstant(EphemeronHashTableShape::kEntryValueIndex - + IntPtrConstant(EphemeronHashTable::ShapeT::kEntryValueIndex - EphemeronHashTable::kEntryKeyIndex)); } diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc index c706ce9306c3dc..c313d773f38b46 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.cc +++ b/deps/v8/src/builtins/builtins-constructor-gen.cc @@ -36,6 +36,31 @@ void Builtins::Generate_ConstructFunctionForwardVarargs(MacroAssembler* masm) { BUILTIN_CODE(masm->isolate(), ConstructFunction)); } +TF_BUILTIN(Construct_WithFeedback, CallOrConstructBuiltinsAssembler) { + TNode target = CAST(Parameter(Descriptor::kTarget)); + TNode new_target = CAST(Parameter(Descriptor::kNewTarget)); + TNode argc = + UncheckedCast(Parameter(Descriptor::kActualArgumentsCount)); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode maybe_feedback_vector = + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); + TNode slot = UncheckedCast(Parameter(Descriptor::kSlot)); + + TVARIABLE(AllocationSite, allocation_site); + Label if_construct_generic(this), if_construct_array(this); + CollectConstructFeedback(context, target, new_target, maybe_feedback_vector, + Unsigned(ChangeInt32ToIntPtr(slot)), + &if_construct_generic, &if_construct_array, + &allocation_site); + + BIND(&if_construct_generic); + TailCallBuiltin(Builtins::kConstruct, context, target, new_target, argc); + + BIND(&if_construct_array); + TailCallBuiltin(Builtins::kArrayConstructorImpl, context, target, new_target, + argc, allocation_site.value()); +} + TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) { TNode target = CAST(Parameter(Descriptor::kTarget)); TNode new_target = CAST(Parameter(Descriptor::kNewTarget)); @@ -44,6 +69,30 @@ TF_BUILTIN(ConstructWithArrayLike, CallOrConstructBuiltinsAssembler) { CallOrConstructWithArrayLike(target, new_target, arguments_list, context); } +TF_BUILTIN(ConstructWithArrayLike_WithFeedback, + CallOrConstructBuiltinsAssembler) { + TNode target = CAST(Parameter(Descriptor::kTarget)); + TNode new_target = CAST(Parameter(Descriptor::kNewTarget)); + TNode arguments_list = CAST(Parameter(Descriptor::kArgumentsList)); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode maybe_feedback_vector = + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); + TNode slot = UncheckedCast(Parameter(Descriptor::kSlot)); + + TVARIABLE(AllocationSite, allocation_site); + Label if_construct_generic(this), if_construct_array(this); + CollectConstructFeedback(context, target, new_target, maybe_feedback_vector, + Unsigned(ChangeInt32ToIntPtr(slot)), + &if_construct_generic, &if_construct_array, + &allocation_site); + + BIND(&if_construct_array); + Goto(&if_construct_generic); // Not implemented. + + BIND(&if_construct_generic); + CallOrConstructWithArrayLike(target, new_target, arguments_list, context); +} + TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) { TNode target = CAST(Parameter(Descriptor::kTarget)); TNode new_target = CAST(Parameter(Descriptor::kNewTarget)); @@ -54,6 +103,31 @@ TF_BUILTIN(ConstructWithSpread, CallOrConstructBuiltinsAssembler) { CallOrConstructWithSpread(target, new_target, spread, args_count, context); } +TF_BUILTIN(ConstructWithSpread_WithFeedback, CallOrConstructBuiltinsAssembler) { + TNode target = CAST(Parameter(Descriptor::kTarget)); + TNode new_target = CAST(Parameter(Descriptor::kNewTarget)); + TNode spread = CAST(Parameter(Descriptor::kSpread)); + TNode args_count = + UncheckedCast(Parameter(Descriptor::kActualArgumentsCount)); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode maybe_feedback_vector = + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); + TNode slot = UncheckedCast(Parameter(Descriptor::kSlot)); + + TVARIABLE(AllocationSite, allocation_site); + Label if_construct_generic(this), if_construct_array(this); + CollectConstructFeedback(context, target, new_target, maybe_feedback_vector, + Unsigned(ChangeInt32ToIntPtr(slot)), + &if_construct_generic, &if_construct_array, + &allocation_site); + + BIND(&if_construct_array); + Goto(&if_construct_generic); // Not implemented. + + BIND(&if_construct_generic); + CallOrConstructWithSpread(target, new_target, spread, args_count, context); +} + using Node = compiler::Node; TF_BUILTIN(FastNewClosure, ConstructorBuiltinsAssembler) { @@ -152,20 +226,20 @@ TF_BUILTIN(FastNewObject, ConstructorBuiltinsAssembler) { Label call_runtime(this); TNode result = - EmitFastNewObject(context, target, new_target, &call_runtime); + FastNewObject(context, target, new_target, &call_runtime); Return(result); BIND(&call_runtime); TailCallRuntime(Runtime::kNewObject, context, target, new_target); } -TNode ConstructorBuiltinsAssembler::EmitFastNewObject( +TNode ConstructorBuiltinsAssembler::FastNewObject( TNode context, TNode target, TNode new_target) { TVARIABLE(JSObject, var_obj); Label call_runtime(this), end(this); - var_obj = EmitFastNewObject(context, target, new_target, &call_runtime); + var_obj = FastNewObject(context, target, new_target, &call_runtime); Goto(&end); BIND(&call_runtime); @@ -176,7 +250,7 @@ TNode ConstructorBuiltinsAssembler::EmitFastNewObject( return var_obj.value(); } -TNode ConstructorBuiltinsAssembler::EmitFastNewObject( +TNode ConstructorBuiltinsAssembler::FastNewObject( TNode context, TNode target, TNode new_target, Label* call_runtime) { // Verify that the new target is a JSFunction. @@ -218,7 +292,7 @@ TNode ConstructorBuiltinsAssembler::EmitFastNewObject( kNone, kWithSlackTracking); } -TNode ConstructorBuiltinsAssembler::EmitFastNewFunctionContext( +TNode ConstructorBuiltinsAssembler::FastNewFunctionContext( TNode scope_info, TNode slots, TNode context, ScopeType scope_type) { TNode slots_intptr = Signed(ChangeUint32ToWord(slots)); @@ -267,23 +341,7 @@ TNode ConstructorBuiltinsAssembler::EmitFastNewFunctionContext( return function_context; } -TF_BUILTIN(FastNewFunctionContextEval, ConstructorBuiltinsAssembler) { - TNode scope_info = CAST(Parameter(Descriptor::kScopeInfo)); - TNode slots = UncheckedCast(Parameter(Descriptor::kSlots)); - TNode context = CAST(Parameter(Descriptor::kContext)); - Return(EmitFastNewFunctionContext(scope_info, slots, context, - ScopeType::EVAL_SCOPE)); -} - -TF_BUILTIN(FastNewFunctionContextFunction, ConstructorBuiltinsAssembler) { - TNode scope_info = CAST(Parameter(Descriptor::kScopeInfo)); - TNode slots = UncheckedCast(Parameter(Descriptor::kSlots)); - TNode context = CAST(Parameter(Descriptor::kContext)); - Return(EmitFastNewFunctionContext(scope_info, slots, context, - ScopeType::FUNCTION_SCOPE)); -} - -TNode ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral( +TNode ConstructorBuiltinsAssembler::CreateRegExpLiteral( TNode maybe_feedback_vector, TNode slot, TNode pattern, TNode flags, TNode context) { Label call_runtime(this, Label::kDeferred), end(this); @@ -319,22 +377,10 @@ TNode ConstructorBuiltinsAssembler::EmitCreateRegExpLiteral( return result.value(); } -TF_BUILTIN(CreateRegExpLiteral, ConstructorBuiltinsAssembler) { - TNode maybe_feedback_vector = - CAST(Parameter(Descriptor::kFeedbackVector)); - TNode slot = CAST(Parameter(Descriptor::kSlot)); - TNode pattern = CAST(Parameter(Descriptor::kPattern)); - TNode flags = CAST(Parameter(Descriptor::kFlags)); - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode result = EmitCreateRegExpLiteral(maybe_feedback_vector, slot, - pattern, flags, context); - Return(result); -} - -TNode ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral( +TNode ConstructorBuiltinsAssembler::CreateShallowArrayLiteral( TNode feedback_vector, TNode slot, - TNode context, Label* call_runtime, - AllocationSiteMode allocation_site_mode) { + TNode context, AllocationSiteMode allocation_site_mode, + Label* call_runtime) { Label zero_capacity(this), cow_elements(this), fast_elements(this), return_result(this); @@ -352,29 +398,7 @@ TNode ConstructorBuiltinsAssembler::EmitCreateShallowArrayLiteral( } } -TF_BUILTIN(CreateShallowArrayLiteral, ConstructorBuiltinsAssembler) { - TNode feedback_vector = - CAST(Parameter(Descriptor::kFeedbackVector)); - TNode slot = CAST(Parameter(Descriptor::kSlot)); - TNode constant_elements = - CAST(Parameter(Descriptor::kConstantElements)); - TNode context = CAST(Parameter(Descriptor::kContext)); - Label call_runtime(this, Label::kDeferred); - Return(EmitCreateShallowArrayLiteral(feedback_vector, slot, context, - &call_runtime, - DONT_TRACK_ALLOCATION_SITE)); - - BIND(&call_runtime); - { - Comment("call runtime"); - int const flags = - AggregateLiteral::kDisableMementos | AggregateLiteral::kIsShallow; - Return(CallRuntime(Runtime::kCreateArrayLiteral, context, feedback_vector, - slot, constant_elements, SmiConstant(flags))); - } -} - -TNode ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral( +TNode ConstructorBuiltinsAssembler::CreateEmptyArrayLiteral( TNode feedback_vector, TNode slot, TNode context) { // Array literals always have a valid AllocationSite to properly track @@ -418,17 +442,7 @@ TNode ConstructorBuiltinsAssembler::EmitCreateEmptyArrayLiteral( return result; } -TF_BUILTIN(CreateEmptyArrayLiteral, ConstructorBuiltinsAssembler) { - TNode feedback_vector = - CAST(Parameter(Descriptor::kFeedbackVector)); - TNode slot = CAST(Parameter(Descriptor::kSlot)); - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode result = - EmitCreateEmptyArrayLiteral(feedback_vector, slot, context); - Return(result); -} - -TNode ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( +TNode ConstructorBuiltinsAssembler::CreateShallowObjectLiteral( TNode feedback_vector, TNode slot, Label* call_runtime) { TNode maybe_allocation_site = @@ -605,26 +619,8 @@ TNode ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( return copy; } -TF_BUILTIN(CreateShallowObjectLiteral, ConstructorBuiltinsAssembler) { - Label call_runtime(this); - TNode feedback_vector = - CAST(Parameter(Descriptor::kFeedbackVector)); - TNode slot = CAST(Parameter(Descriptor::kSlot)); - TNode copy = - EmitCreateShallowObjectLiteral(feedback_vector, slot, &call_runtime); - Return(copy); - - BIND(&call_runtime); - TNode object_boilerplate_description = - CAST(Parameter(Descriptor::kObjectBoilerplateDescription)); - TNode flags = CAST(Parameter(Descriptor::kFlags)); - TNode context = CAST(Parameter(Descriptor::kContext)); - TailCallRuntime(Runtime::kCreateObjectLiteral, context, feedback_vector, slot, - object_boilerplate_description, flags); -} - // Used by the CreateEmptyObjectLiteral bytecode and the Object constructor. -TNode ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral( +TNode ConstructorBuiltinsAssembler::CreateEmptyObjectLiteral( TNode context) { TNode native_context = LoadNativeContext(context); TNode object_function = @@ -641,110 +637,5 @@ TNode ConstructorBuiltinsAssembler::EmitCreateEmptyObjectLiteral( return result; } -// ES #sec-object-constructor -TF_BUILTIN(ObjectConstructor, ConstructorBuiltinsAssembler) { - int const kValueArg = 0; - TNode argc = ChangeInt32ToIntPtr( - UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount))); - CodeStubArguments args(this, argc); - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode new_target = CAST(Parameter(Descriptor::kJSNewTarget)); - - TVARIABLE(Object, var_result); - Label if_subclass(this, Label::kDeferred), if_notsubclass(this), - return_result(this); - GotoIf(IsUndefined(new_target), &if_notsubclass); - TNode target = CAST(Parameter(Descriptor::kJSTarget)); - Branch(TaggedEqual(new_target, target), &if_notsubclass, &if_subclass); - - BIND(&if_subclass); - { - var_result = - CallBuiltin(Builtins::kFastNewObject, context, target, new_target); - Goto(&return_result); - } - - BIND(&if_notsubclass); - { - Label if_newobject(this, Label::kDeferred), if_toobject(this); - - TNode value_index = IntPtrConstant(kValueArg); - GotoIf(UintPtrGreaterThanOrEqual(value_index, argc), &if_newobject); - TNode value = args.AtIndex(value_index); - GotoIf(IsNull(value), &if_newobject); - Branch(IsUndefined(value), &if_newobject, &if_toobject); - - BIND(&if_newobject); - { - var_result = EmitCreateEmptyObjectLiteral(context); - Goto(&return_result); - } - - BIND(&if_toobject); - { - var_result = CallBuiltin(Builtins::kToObject, context, value); - Goto(&return_result); - } - } - - BIND(&return_result); - args.PopAndReturn(var_result.value()); -} - -// ES #sec-number-constructor -TF_BUILTIN(NumberConstructor, ConstructorBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode argc = ChangeInt32ToIntPtr( - UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount))); - CodeStubArguments args(this, argc); - - // 1. If no arguments were passed to this function invocation, let n be +0. - TVARIABLE(Number, var_n, SmiConstant(0)); - Label if_nloaded(this, &var_n); - GotoIf(IntPtrEqual(argc, IntPtrConstant(0)), &if_nloaded); - - // 2. Else, - // a. Let prim be ? ToNumeric(value). - // b. If Type(prim) is BigInt, let n be the Number value for prim. - // c. Otherwise, let n be prim. - TNode value = args.AtIndex(0); - var_n = ToNumber(context, value, BigIntHandling::kConvertToNumber); - Goto(&if_nloaded); - - BIND(&if_nloaded); - { - // 3. If NewTarget is undefined, return n. - TNode n_value = var_n.value(); - TNode new_target = CAST(Parameter(Descriptor::kJSNewTarget)); - Label return_n(this), constructnumber(this, Label::kDeferred); - Branch(IsUndefined(new_target), &return_n, &constructnumber); - - BIND(&return_n); - { args.PopAndReturn(n_value); } - - BIND(&constructnumber); - { - // 4. Let O be ? OrdinaryCreateFromConstructor(NewTarget, - // "%NumberPrototype%", « [[NumberData]] »). - // 5. Set O.[[NumberData]] to n. - // 6. Return O. - - // We are not using Parameter(Descriptor::kJSTarget) and loading the value - // from the current frame here in order to reduce register pressure on the - // fast path. - TNode target = LoadTargetFromFrame(); - TNode result = CAST( - CallBuiltin(Builtins::kFastNewObject, context, target, new_target)); - StoreObjectField(result, JSPrimitiveWrapper::kValueOffset, n_value); - args.PopAndReturn(result); - } - } -} - -TF_BUILTIN(GenericLazyDeoptContinuation, ConstructorBuiltinsAssembler) { - TNode result = CAST(Parameter(Descriptor::kResult)); - Return(result); -} - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-constructor-gen.h b/deps/v8/src/builtins/builtins-constructor-gen.h index d6a698ddf64059..2ed83229eba4e2 100644 --- a/deps/v8/src/builtins/builtins-constructor-gen.h +++ b/deps/v8/src/builtins/builtins-constructor-gen.h @@ -15,37 +15,38 @@ class ConstructorBuiltinsAssembler : public CodeStubAssembler { explicit ConstructorBuiltinsAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} - TNode EmitFastNewFunctionContext(TNode scope_info, - TNode slots, - TNode context, - ScopeType scope_type); + TNode FastNewFunctionContext(TNode scope_info, + TNode slots, + TNode context, + ScopeType scope_type); - TNode EmitCreateRegExpLiteral( - TNode maybe_feedback_vector, TNode slot, - TNode pattern, TNode flags, TNode context); + TNode CreateRegExpLiteral(TNode maybe_feedback_vector, + TNode slot, + TNode pattern, TNode flags, + TNode context); - TNode EmitCreateShallowArrayLiteral( + TNode CreateShallowArrayLiteral( TNode feedback_vector, TNode slot, - TNode context, Label* call_runtime, - AllocationSiteMode allocation_site_mode); + TNode context, AllocationSiteMode allocation_site_mode, + Label* call_runtime); - TNode EmitCreateEmptyArrayLiteral( - TNode feedback_vector, TNode slot, - TNode context); + TNode CreateEmptyArrayLiteral(TNode feedback_vector, + TNode slot, + TNode context); - TNode EmitCreateShallowObjectLiteral( + TNode CreateShallowObjectLiteral( TNode feedback_vector, TNode slot, Label* call_runtime); - TNode EmitCreateEmptyObjectLiteral(TNode context); + TNode CreateEmptyObjectLiteral(TNode context); - TNode EmitFastNewObject(TNode context, - TNode target, - TNode new_target); + TNode FastNewObject(TNode context, + TNode target, + TNode new_target); - TNode EmitFastNewObject(TNode context, - TNode target, - TNode new_target, - Label* call_runtime); + TNode FastNewObject(TNode context, + TNode target, + TNode new_target, + Label* call_runtime); }; } // namespace internal diff --git a/deps/v8/src/builtins/builtins-conversion-gen.cc b/deps/v8/src/builtins/builtins-conversion-gen.cc index e524f39b5fddcf..54fa752969e505 100644 --- a/deps/v8/src/builtins/builtins-conversion-gen.cc +++ b/deps/v8/src/builtins/builtins-conversion-gen.cc @@ -12,181 +12,6 @@ namespace v8 { namespace internal { -class ConversionBuiltinsAssembler : public CodeStubAssembler { - public: - explicit ConversionBuiltinsAssembler(compiler::CodeAssemblerState* state) - : CodeStubAssembler(state) {} - - protected: - void Generate_NonPrimitiveToPrimitive(TNode context, - TNode input, - ToPrimitiveHint hint); - - void Generate_OrdinaryToPrimitive(TNode context, TNode input, - OrdinaryToPrimitiveHint hint); -}; - -// ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] ) -void ConversionBuiltinsAssembler::Generate_NonPrimitiveToPrimitive( - TNode context, TNode input, ToPrimitiveHint hint) { - // Lookup the @@toPrimitive property on the {input}. - TNode exotic_to_prim = - GetProperty(context, input, factory()->to_primitive_symbol()); - - // Check if {exotic_to_prim} is neither null nor undefined. - Label ordinary_to_primitive(this); - GotoIf(IsNullOrUndefined(exotic_to_prim), &ordinary_to_primitive); - { - // Invoke the {exotic_to_prim} method on the {input} with a string - // representation of the {hint}. - TNode hint_string = - HeapConstant(factory()->ToPrimitiveHintString(hint)); - TNode result = Call(context, exotic_to_prim, input, hint_string); - - // Verify that the {result} is actually a primitive. - Label if_resultisprimitive(this), - if_resultisnotprimitive(this, Label::kDeferred); - GotoIf(TaggedIsSmi(result), &if_resultisprimitive); - TNode result_instance_type = LoadInstanceType(CAST(result)); - Branch(IsPrimitiveInstanceType(result_instance_type), &if_resultisprimitive, - &if_resultisnotprimitive); - - BIND(&if_resultisprimitive); - { - // Just return the {result}. - Return(result); - } - - BIND(&if_resultisnotprimitive); - { - // Somehow the @@toPrimitive method on {input} didn't yield a primitive. - ThrowTypeError(context, MessageTemplate::kCannotConvertToPrimitive); - } - } - - // Convert using the OrdinaryToPrimitive algorithm instead. - BIND(&ordinary_to_primitive); - { - Callable callable = CodeFactory::OrdinaryToPrimitive( - isolate(), (hint == ToPrimitiveHint::kString) - ? OrdinaryToPrimitiveHint::kString - : OrdinaryToPrimitiveHint::kNumber); - TailCallStub(callable, context, input); - } -} - -TF_BUILTIN(NonPrimitiveToPrimitive_Default, ConversionBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode input = CAST(Parameter(Descriptor::kArgument)); - - Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kDefault); -} - -TF_BUILTIN(NonPrimitiveToPrimitive_Number, ConversionBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode input = CAST(Parameter(Descriptor::kArgument)); - - Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kNumber); -} - -TF_BUILTIN(NonPrimitiveToPrimitive_String, ConversionBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode input = CAST(Parameter(Descriptor::kArgument)); - - Generate_NonPrimitiveToPrimitive(context, input, ToPrimitiveHint::kString); -} - -TF_BUILTIN(StringToNumber, CodeStubAssembler) { - TNode input = CAST(Parameter(Descriptor::kArgument)); - - Return(StringToNumber(input)); -} - -TF_BUILTIN(ToName, CodeStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode input = CAST(Parameter(Descriptor::kArgument)); - - TVARIABLE(Object, var_input, input); - Label loop(this, &var_input); - Goto(&loop); - BIND(&loop); - { - // Load the current {input} value. - TNode input = var_input.value(); - - // Dispatch based on the type of the {input.} - Label if_inputisbigint(this), if_inputisname(this), if_inputisnumber(this), - if_inputisoddball(this), if_inputisreceiver(this, Label::kDeferred); - GotoIf(TaggedIsSmi(input), &if_inputisnumber); - TNode input_instance_type = LoadInstanceType(CAST(input)); - STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); - GotoIf(IsNameInstanceType(input_instance_type), &if_inputisname); - GotoIf(IsJSReceiverInstanceType(input_instance_type), &if_inputisreceiver); - GotoIf(IsHeapNumberInstanceType(input_instance_type), &if_inputisnumber); - Branch(IsBigIntInstanceType(input_instance_type), &if_inputisbigint, - &if_inputisoddball); - - BIND(&if_inputisbigint); - { - // We don't have a fast-path for BigInt currently, so just - // tail call to the %ToString runtime function here for now. - TailCallRuntime(Runtime::kToStringRT, context, input); - } - - BIND(&if_inputisname); - { - // The {input} is already a Name. - Return(input); - } - - BIND(&if_inputisnumber); - { - // Convert the String {input} to a Number. - TailCallBuiltin(Builtins::kNumberToString, context, input); - } - - BIND(&if_inputisoddball); - { - // Just return the {input}'s string representation. - CSA_ASSERT(this, IsOddballInstanceType(input_instance_type)); - Return(LoadObjectField(CAST(input), Oddball::kToStringOffset)); - } - - BIND(&if_inputisreceiver); - { - // Convert the JSReceiver {input} to a primitive first, - // and then run the loop again with the new {input}, - // which is then a primitive value. - var_input = CallBuiltin(Builtins::kNonPrimitiveToPrimitive_String, - context, input); - Goto(&loop); - } - } -} - -TF_BUILTIN(NonNumberToNumber, CodeStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode input = CAST(Parameter(Descriptor::kArgument)); - - Return(NonNumberToNumber(context, input)); -} - -TF_BUILTIN(NonNumberToNumeric, CodeStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode input = CAST(Parameter(Descriptor::kArgument)); - - Return(NonNumberToNumeric(context, input)); -} - -TF_BUILTIN(ToNumeric, CodeStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode input = CAST(Parameter(Descriptor::kArgument)); - - Return(Select( - IsNumber(input), [=] { return CAST(input); }, - [=] { return NonNumberToNumeric(context, CAST(input)); })); -} - // ES6 section 7.1.3 ToNumber ( argument ) TF_BUILTIN(ToNumber, CodeStubAssembler) { // TODO(solanes, v8:6949): Changing this to a TNode crashes with the @@ -206,93 +31,6 @@ TF_BUILTIN(ToNumberConvertBigInt, CodeStubAssembler) { Return(ToNumber(context, input, BigIntHandling::kConvertToNumber)); } -// ES section #sec-tostring-applied-to-the-number-type -TF_BUILTIN(NumberToString, CodeStubAssembler) { - TNode input = CAST(Parameter(Descriptor::kArgument)); - - Return(NumberToString(input)); -} - -// 7.1.1.1 OrdinaryToPrimitive ( O, hint ) -void ConversionBuiltinsAssembler::Generate_OrdinaryToPrimitive( - TNode context, TNode input, OrdinaryToPrimitiveHint hint) { - TVARIABLE(Object, var_result); - Label return_result(this, &var_result); - - Handle method_names[2]; - switch (hint) { - case OrdinaryToPrimitiveHint::kNumber: - method_names[0] = factory()->valueOf_string(); - method_names[1] = factory()->toString_string(); - break; - case OrdinaryToPrimitiveHint::kString: - method_names[0] = factory()->toString_string(); - method_names[1] = factory()->valueOf_string(); - break; - } - for (Handle name : method_names) { - // Lookup the {name} on the {input}. - TNode method = GetProperty(context, input, name); - - // Check if the {method} is callable. - Label if_methodiscallable(this), - if_methodisnotcallable(this, Label::kDeferred); - GotoIf(TaggedIsSmi(method), &if_methodisnotcallable); - TNode method_map = LoadMap(CAST(method)); - Branch(IsCallableMap(method_map), &if_methodiscallable, - &if_methodisnotcallable); - - BIND(&if_methodiscallable); - { - // Call the {method} on the {input}. - TNode result = Call(context, method, input); - var_result = result; - - // Return the {result} if it is a primitive. - GotoIf(TaggedIsSmi(result), &return_result); - TNode result_instance_type = LoadInstanceType(CAST(result)); - GotoIf(IsPrimitiveInstanceType(result_instance_type), &return_result); - } - - // Just continue with the next {name} if the {method} is not callable. - Goto(&if_methodisnotcallable); - BIND(&if_methodisnotcallable); - } - - ThrowTypeError(context, MessageTemplate::kCannotConvertToPrimitive); - - BIND(&return_result); - Return(var_result.value()); -} - -TF_BUILTIN(OrdinaryToPrimitive_Number, ConversionBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode input = CAST(Parameter(Descriptor::kArgument)); - Generate_OrdinaryToPrimitive(context, input, - OrdinaryToPrimitiveHint::kNumber); -} - -TF_BUILTIN(OrdinaryToPrimitive_String, ConversionBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode input = CAST(Parameter(Descriptor::kArgument)); - Generate_OrdinaryToPrimitive(context, input, - OrdinaryToPrimitiveHint::kString); -} - -// ES6 section 7.1.2 ToBoolean ( argument ) -TF_BUILTIN(ToBoolean, CodeStubAssembler) { - TNode value = CAST(Parameter(Descriptor::kArgument)); - - Label return_true(this), return_false(this); - BranchIfToBooleanIsTrue(value, &return_true, &return_false); - - BIND(&return_true); - Return(TrueConstant()); - - BIND(&return_false); - Return(FalseConstant()); -} - // ES6 section 7.1.2 ToBoolean ( argument ) // Requires parameter on stack so that it can be used as a continuation from a // LAZY deopt. @@ -309,130 +47,6 @@ TF_BUILTIN(ToBooleanLazyDeoptContinuation, CodeStubAssembler) { Return(FalseConstant()); } -TF_BUILTIN(ToLength, CodeStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - - // We might need to loop once for ToNumber conversion. - TVARIABLE(Object, var_len, CAST(Parameter(Descriptor::kArgument))); - Label loop(this, &var_len); - Goto(&loop); - BIND(&loop); - { - // Shared entry points. - Label return_len(this), return_two53minus1(this, Label::kDeferred), - return_zero(this, Label::kDeferred); - - // Load the current {len} value. - TNode len = var_len.value(); - - // Check if {len} is a positive Smi. - GotoIf(TaggedIsPositiveSmi(len), &return_len); - - // Check if {len} is a (negative) Smi. - GotoIf(TaggedIsSmi(len), &return_zero); - - // Check if {len} is a HeapNumber. - TNode len_heap_object = CAST(len); - Label if_lenisheapnumber(this), - if_lenisnotheapnumber(this, Label::kDeferred); - Branch(IsHeapNumber(len_heap_object), &if_lenisheapnumber, - &if_lenisnotheapnumber); - - BIND(&if_lenisheapnumber); - { - // Load the floating-point value of {len}. - TNode len_value = LoadHeapNumberValue(len_heap_object); - - // Check if {len} is not greater than zero. - GotoIfNot(Float64GreaterThan(len_value, Float64Constant(0.0)), - &return_zero); - - // Check if {len} is greater than or equal to 2^53-1. - GotoIf(Float64GreaterThanOrEqual(len_value, - Float64Constant(kMaxSafeInteger)), - &return_two53minus1); - - // Round the {len} towards -Infinity. - TNode value = Float64Floor(len_value); - TNode result = ChangeFloat64ToTagged(value); - Return(result); - } - - BIND(&if_lenisnotheapnumber); - { - // Need to convert {len} to a Number first. - var_len = CallBuiltin(Builtins::kNonNumberToNumber, context, len); - Goto(&loop); - } - - BIND(&return_len); - Return(var_len.value()); - - BIND(&return_two53minus1); - Return(NumberConstant(kMaxSafeInteger)); - - BIND(&return_zero); - Return(SmiConstant(0)); - } -} - -// ES6 section 7.1.13 ToObject (argument) -TF_BUILTIN(ToObject, CodeStubAssembler) { - Label if_smi(this, Label::kDeferred), if_jsreceiver(this), - if_noconstructor(this, Label::kDeferred), - if_wrapjs_primitive_wrapper(this); - - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode object = CAST(Parameter(Descriptor::kArgument)); - - TVARIABLE(IntPtrT, constructor_function_index_var); - - GotoIf(TaggedIsSmi(object), &if_smi); - - TNode map = LoadMap(CAST(object)); - TNode instance_type = LoadMapInstanceType(map); - GotoIf(IsJSReceiverInstanceType(instance_type), &if_jsreceiver); - - TNode constructor_function_index = - LoadMapConstructorFunctionIndex(map); - GotoIf(WordEqual(constructor_function_index, - IntPtrConstant(Map::kNoConstructorFunctionIndex)), - &if_noconstructor); - constructor_function_index_var = constructor_function_index; - Goto(&if_wrapjs_primitive_wrapper); - - BIND(&if_smi); - constructor_function_index_var = - IntPtrConstant(Context::NUMBER_FUNCTION_INDEX); - Goto(&if_wrapjs_primitive_wrapper); - - BIND(&if_wrapjs_primitive_wrapper); - TNode native_context = LoadNativeContext(context); - TNode constructor = CAST(LoadContextElement( - native_context, constructor_function_index_var.value())); - TNode initial_map = LoadObjectField( - constructor, JSFunction::kPrototypeOrInitialMapOffset); - TNode js_primitive_wrapper = - Allocate(JSPrimitiveWrapper::kHeaderSize); - StoreMapNoWriteBarrier(js_primitive_wrapper, initial_map); - StoreObjectFieldRoot(js_primitive_wrapper, - JSPrimitiveWrapper::kPropertiesOrHashOffset, - RootIndex::kEmptyFixedArray); - StoreObjectFieldRoot(js_primitive_wrapper, - JSPrimitiveWrapper::kElementsOffset, - RootIndex::kEmptyFixedArray); - StoreObjectField(js_primitive_wrapper, JSPrimitiveWrapper::kValueOffset, - object); - Return(js_primitive_wrapper); - - BIND(&if_noconstructor); - ThrowTypeError(context, MessageTemplate::kUndefinedOrNullToObject, - "ToObject"); - - BIND(&if_jsreceiver); - Return(object); -} - // ES6 section 12.5.5 typeof operator TF_BUILTIN(Typeof, CodeStubAssembler) { TNode object = CAST(Parameter(Descriptor::kObject)); diff --git a/deps/v8/src/builtins/builtins-date-gen.cc b/deps/v8/src/builtins/builtins-date-gen.cc index a32003303508cf..05fcc53f12b6a9 100644 --- a/deps/v8/src/builtins/builtins-date-gen.cc +++ b/deps/v8/src/builtins/builtins-date-gen.cc @@ -197,7 +197,7 @@ TF_BUILTIN(DatePrototypeToPrimitive, CodeStubAssembler) { hint_is_invalid(this, Label::kDeferred); // Fast cases for internalized strings. - TNode number_string = numberStringConstant(); + TNode number_string = NumberStringConstant(); GotoIf(TaggedEqual(hint, number_string), &hint_is_number); TNode default_string = DefaultStringConstant(); GotoIf(TaggedEqual(hint, default_string), &hint_is_string); diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h index 84ddf55f6f47d3..a6e020ada3f419 100644 --- a/deps/v8/src/builtins/builtins-definitions.h +++ b/deps/v8/src/builtins/builtins-definitions.h @@ -51,12 +51,19 @@ namespace internal { ASM(Call_ReceiverIsNullOrUndefined, CallTrampoline) \ ASM(Call_ReceiverIsNotNullOrUndefined, CallTrampoline) \ ASM(Call_ReceiverIsAny, CallTrampoline) \ + TFC(Call_ReceiverIsNullOrUndefined_WithFeedback, \ + CallTrampoline_WithFeedback) \ + TFC(Call_ReceiverIsNotNullOrUndefined_WithFeedback, \ + CallTrampoline_WithFeedback) \ + TFC(Call_ReceiverIsAny_WithFeedback, CallTrampoline_WithFeedback) \ \ /* ES6 section 9.5.12[[Call]] ( thisArgument, argumentsList ) */ \ TFC(CallProxy, CallTrampoline) \ ASM(CallVarargs, CallVarargs) \ TFC(CallWithSpread, CallWithSpread) \ + TFC(CallWithSpread_WithFeedback, CallWithSpread_WithFeedback) \ TFC(CallWithArrayLike, CallWithArrayLike) \ + TFC(CallWithArrayLike_WithFeedback, CallWithArrayLike_WithFeedback) \ ASM(CallForwardVarargs, CallForwardVarargs) \ ASM(CallFunctionForwardVarargs, CallForwardVarargs) \ /* Call an API callback via a {FunctionTemplateInfo}, doing appropriate */ \ @@ -76,20 +83,17 @@ namespace internal { ASM(Construct, JSTrampoline) \ ASM(ConstructVarargs, ConstructVarargs) \ TFC(ConstructWithSpread, ConstructWithSpread) \ + TFC(ConstructWithSpread_WithFeedback, ConstructWithSpread_WithFeedback) \ TFC(ConstructWithArrayLike, ConstructWithArrayLike) \ + TFC(ConstructWithArrayLike_WithFeedback, \ + ConstructWithArrayLike_WithFeedback) \ ASM(ConstructForwardVarargs, ConstructForwardVarargs) \ ASM(ConstructFunctionForwardVarargs, ConstructForwardVarargs) \ + TFC(Construct_WithFeedback, Construct_WithFeedback) \ ASM(JSConstructStubGeneric, Dummy) \ ASM(JSBuiltinsConstructStub, Dummy) \ TFC(FastNewObject, FastNewObject) \ TFS(FastNewClosure, kSharedFunctionInfo, kFeedbackCell) \ - TFC(FastNewFunctionContextEval, FastNewFunctionContext) \ - TFC(FastNewFunctionContextFunction, FastNewFunctionContext) \ - TFS(CreateRegExpLiteral, kFeedbackVector, kSlot, kPattern, kFlags) \ - TFS(CreateEmptyArrayLiteral, kFeedbackVector, kSlot) \ - TFS(CreateShallowArrayLiteral, kFeedbackVector, kSlot, kConstantElements) \ - TFS(CreateShallowObjectLiteral, kFeedbackVector, kSlot, \ - kObjectBoilerplateDescription, kFlags) \ /* ES6 section 9.5.14 [[Construct]] ( argumentsList, newTarget) */ \ TFC(ConstructProxy, JSTrampoline) \ \ @@ -177,7 +181,6 @@ namespace internal { TFS(CopyFastSmiOrObjectElements, kObject) \ TFC(GrowFastDoubleElements, GrowArrayElements) \ TFC(GrowFastSmiOrObjectElements, GrowArrayElements) \ - TFC(NewArgumentsElements, NewArgumentsElements) \ \ /* Debugger */ \ TFJ(DebugBreakTrampoline, kDontAdaptArgumentsSentinel) \ @@ -185,22 +188,8 @@ namespace internal { ASM(HandleDebuggerStatement, ContextOnly) \ \ /* Type conversions */ \ - TFC(ToObject, TypeConversion) \ - TFC(ToBoolean, TypeConversion) \ - TFC(OrdinaryToPrimitive_Number, TypeConversion) \ - TFC(OrdinaryToPrimitive_String, TypeConversion) \ - TFC(NonPrimitiveToPrimitive_Default, TypeConversion) \ - TFC(NonPrimitiveToPrimitive_Number, TypeConversion) \ - TFC(NonPrimitiveToPrimitive_String, TypeConversion) \ - TFC(StringToNumber, TypeConversion) \ - TFC(ToName, TypeConversion) \ - TFC(NonNumberToNumber, TypeConversion) \ - TFC(NonNumberToNumeric, TypeConversion) \ TFC(ToNumber, TypeConversion) \ TFC(ToNumberConvertBigInt, TypeConversion) \ - TFC(ToNumeric, TypeConversion) \ - TFC(NumberToString, TypeConversion) \ - TFC(ToLength, TypeConversion) \ TFC(Typeof, Typeof) \ TFC(GetSuperConstructor, Typeof) \ TFC(BigIntToI64, BigIntToI64) \ @@ -503,11 +492,7 @@ namespace internal { CPP(FunctionConstructor) \ ASM(FunctionPrototypeApply, JSTrampoline) \ CPP(FunctionPrototypeBind) \ - /* ES6 #sec-function.prototype.bind */ \ - TFJ(FastFunctionPrototypeBind, kDontAdaptArgumentsSentinel) \ ASM(FunctionPrototypeCall, JSTrampoline) \ - /* ES6 #sec-function.prototype-@@hasinstance */ \ - TFJ(FunctionPrototypeHasInstance, 1, kReceiver, kV) \ /* ES6 #sec-function.prototype.tostring */ \ CPP(FunctionPrototypeToString) \ \ @@ -605,57 +590,44 @@ namespace internal { TFJ(MapIteratorPrototypeNext, 0, kReceiver) \ TFS(MapIteratorToList, kSource) \ \ - /* Number */ \ - TFC(AllocateHeapNumber, AllocateHeapNumber) \ /* ES #sec-number-constructor */ \ - TFJ(NumberConstructor, kDontAdaptArgumentsSentinel) \ - /* ES6 #sec-number.isfinite */ \ - TFJ(NumberIsFinite, 1, kReceiver, kNumber) \ - /* ES6 #sec-number.isinteger */ \ - TFJ(NumberIsInteger, 1, kReceiver, kNumber) \ - /* ES6 #sec-number.isnan */ \ - TFJ(NumberIsNaN, 1, kReceiver, kNumber) \ - /* ES6 #sec-number.issafeinteger */ \ - TFJ(NumberIsSafeInteger, 1, kReceiver, kNumber) \ - /* ES6 #sec-number.parsefloat */ \ - TFJ(NumberParseFloat, 1, kReceiver, kString) \ - /* ES6 #sec-number.parseint */ \ - TFJ(NumberParseInt, 2, kReceiver, kString, kRadix) \ - TFS(ParseInt, kString, kRadix) \ CPP(NumberPrototypeToExponential) \ CPP(NumberPrototypeToFixed) \ CPP(NumberPrototypeToLocaleString) \ CPP(NumberPrototypeToPrecision) \ - /* ES6 #sec-number.prototype.valueof */ \ - TFJ(NumberPrototypeValueOf, 0, kReceiver) \ - TFC(Add, BinaryOp) \ - TFC(Subtract, BinaryOp) \ - TFC(Multiply, BinaryOp) \ - TFC(Divide, BinaryOp) \ - TFC(Modulus, BinaryOp) \ - TFC(Exponentiate, BinaryOp) \ - TFC(BitwiseAnd, BinaryOp) \ - TFC(BitwiseOr, BinaryOp) \ - TFC(BitwiseXor, BinaryOp) \ - TFC(ShiftLeft, BinaryOp) \ - TFC(ShiftRight, BinaryOp) \ - TFC(ShiftRightLogical, BinaryOp) \ - TFC(LessThan, Compare) \ - TFC(LessThanOrEqual, Compare) \ - TFC(GreaterThan, Compare) \ - TFC(GreaterThanOrEqual, Compare) \ - TFC(Equal, Compare) \ TFC(SameValue, Compare) \ TFC(SameValueNumbersOnly, Compare) \ - TFC(StrictEqual, Compare) \ - TFS(BitwiseNot, kValue) \ - TFS(Decrement, kValue) \ - TFS(Increment, kValue) \ - TFS(Negate, kValue) \ + \ + /* Binary ops with feedback collection */ \ + TFC(Add_WithFeedback, BinaryOp_WithFeedback) \ + TFC(Subtract_WithFeedback, BinaryOp_WithFeedback) \ + TFC(Multiply_WithFeedback, BinaryOp_WithFeedback) \ + TFC(Divide_WithFeedback, BinaryOp_WithFeedback) \ + TFC(Modulus_WithFeedback, BinaryOp_WithFeedback) \ + TFC(Exponentiate_WithFeedback, BinaryOp_WithFeedback) \ + TFC(BitwiseAnd_WithFeedback, BinaryOp_WithFeedback) \ + TFC(BitwiseOr_WithFeedback, BinaryOp_WithFeedback) \ + TFC(BitwiseXor_WithFeedback, BinaryOp_WithFeedback) \ + TFC(ShiftLeft_WithFeedback, BinaryOp_WithFeedback) \ + TFC(ShiftRight_WithFeedback, BinaryOp_WithFeedback) \ + TFC(ShiftRightLogical_WithFeedback, BinaryOp_WithFeedback) \ + \ + /* Compare ops with feedback collection */ \ + TFC(Equal_WithFeedback, Compare_WithFeedback) \ + TFC(StrictEqual_WithFeedback, Compare_WithFeedback) \ + TFC(LessThan_WithFeedback, Compare_WithFeedback) \ + TFC(GreaterThan_WithFeedback, Compare_WithFeedback) \ + TFC(LessThanOrEqual_WithFeedback, Compare_WithFeedback) \ + TFC(GreaterThanOrEqual_WithFeedback, Compare_WithFeedback) \ + \ + /* Unary ops with feedback collection */ \ + TFC(BitwiseNot_WithFeedback, UnaryOp_WithFeedback) \ + TFC(Decrement_WithFeedback, UnaryOp_WithFeedback) \ + TFC(Increment_WithFeedback, UnaryOp_WithFeedback) \ + TFC(Negate_WithFeedback, UnaryOp_WithFeedback) \ \ /* Object */ \ /* ES #sec-object-constructor */ \ - TFJ(ObjectConstructor, kDontAdaptArgumentsSentinel) \ TFJ(ObjectAssign, kDontAdaptArgumentsSentinel) \ /* ES #sec-object.create */ \ TFJ(ObjectCreate, kDontAdaptArgumentsSentinel) \ @@ -688,6 +660,7 @@ namespace internal { /* instanceof */ \ TFC(OrdinaryHasInstance, Compare) \ TFC(InstanceOf, Compare) \ + TFC(InstanceOf_WithFeedback, Compare_WithFeedback) \ \ /* for-in */ \ TFS(ForInEnumerate, kReceiver) \ @@ -727,6 +700,7 @@ namespace internal { TFS(RegExpExecAtom, kRegExp, kString, kLastIndex, kMatchInfo) \ TFS(RegExpExecInternal, kRegExp, kString, kLastIndex, kMatchInfo) \ ASM(RegExpInterpreterTrampoline, CCall) \ + ASM(RegExpExperimentalTrampoline, CCall) \ \ /* Set */ \ TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \ @@ -762,7 +736,7 @@ namespace internal { CPP(AtomicsNotify) \ CPP(AtomicsIsLockFree) \ CPP(AtomicsWait) \ - CPP(AtomicsWake) \ + CPP(AtomicsWaitAsync) \ \ /* String */ \ /* ES #sec-string.fromcodepoint */ \ @@ -804,7 +778,6 @@ namespace internal { /* TypedArray */ \ /* ES #sec-typedarray-constructors */ \ TFJ(TypedArrayBaseConstructor, 0, kReceiver) \ - TFJ(GenericLazyDeoptContinuation, 1, kReceiver, kResult) \ TFJ(TypedArrayConstructor, kDontAdaptArgumentsSentinel) \ CPP(TypedArrayPrototypeBuffer) \ /* ES6 #sec-get-%typedarray%.prototype.bytelength */ \ @@ -813,12 +786,6 @@ namespace internal { TFJ(TypedArrayPrototypeByteOffset, 0, kReceiver) \ /* ES6 #sec-get-%typedarray%.prototype.length */ \ TFJ(TypedArrayPrototypeLength, 0, kReceiver) \ - /* ES6 #sec-%typedarray%.prototype.entries */ \ - TFJ(TypedArrayPrototypeEntries, 0, kReceiver) \ - /* ES6 #sec-%typedarray%.prototype.keys */ \ - TFJ(TypedArrayPrototypeKeys, 0, kReceiver) \ - /* ES6 #sec-%typedarray%.prototype.values */ \ - TFJ(TypedArrayPrototypeValues, 0, kReceiver) \ /* ES6 #sec-%typedarray%.prototype.copywithin */ \ CPP(TypedArrayPrototypeCopyWithin) \ /* ES6 #sec-%typedarray%.prototype.fill */ \ @@ -837,19 +804,14 @@ namespace internal { TFJ(TypedArrayPrototypeMap, kDontAdaptArgumentsSentinel) \ \ /* Wasm */ \ + ASM(GenericJSToWasmWrapper, Dummy) \ ASM(WasmCompileLazy, Dummy) \ ASM(WasmDebugBreak, Dummy) \ TFC(WasmFloat32ToNumber, WasmFloat32ToNumber) \ TFC(WasmFloat64ToNumber, WasmFloat64ToNumber) \ - TFS(WasmAllocateArray, kMapIndex, kLength, kElementSize) \ - TFS(WasmAllocateStruct, kMapIndex) \ - TFC(WasmAtomicNotify, WasmAtomicNotify) \ + TFS(WasmAllocateArrayWithRtt, kMap, kLength, kElementSize) \ TFC(WasmI32AtomicWait32, WasmI32AtomicWait32) \ - TFC(WasmI32AtomicWait64, WasmI32AtomicWait64) \ TFC(WasmI64AtomicWait32, WasmI64AtomicWait32) \ - TFC(WasmI64AtomicWait64, WasmI64AtomicWait64) \ - TFC(WasmTableInit, WasmTableInit) \ - TFC(WasmTableCopy, WasmTableCopy) \ \ /* WeakMap */ \ TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \ @@ -906,11 +868,11 @@ namespace internal { /* %AsyncFromSyncIteratorPrototype% */ \ /* See tc39.github.io/proposal-async-iteration/ */ \ /* #sec-%asyncfromsynciteratorprototype%-object) */ \ - TFJ(AsyncFromSyncIteratorPrototypeNext, 1, kReceiver, kValue) \ + TFJ(AsyncFromSyncIteratorPrototypeNext, kDontAdaptArgumentsSentinel) \ /* #sec-%asyncfromsynciteratorprototype%.throw */ \ - TFJ(AsyncFromSyncIteratorPrototypeThrow, 1, kReceiver, kReason) \ + TFJ(AsyncFromSyncIteratorPrototypeThrow, kDontAdaptArgumentsSentinel) \ /* #sec-%asyncfromsynciteratorprototype%.return */ \ - TFJ(AsyncFromSyncIteratorPrototypeReturn, 1, kReceiver, kValue) \ + TFJ(AsyncFromSyncIteratorPrototypeReturn, kDontAdaptArgumentsSentinel) \ /* #sec-async-iterator-value-unwrap-functions */ \ TFJ(AsyncIteratorValueUnwrap, 1, kReceiver, kValue) \ \ @@ -946,11 +908,7 @@ namespace internal { CPP(Trace) \ \ /* Weak refs */ \ - CPP(FinalizationRegistryConstructor) \ - CPP(FinalizationRegistryRegister) \ CPP(FinalizationRegistryUnregister) \ - CPP(WeakRefConstructor) \ - CPP(WeakRefDeref) \ \ /* Async modules */ \ TFJ(AsyncModuleEvaluate, kDontAdaptArgumentsSentinel) \ @@ -1066,16 +1024,12 @@ namespace internal { CPP(SegmenterPrototypeSegment) \ /* ecma402 #sec-Intl.Segmenter.supportedLocalesOf */ \ CPP(SegmenterSupportedLocalesOf) \ - /* ecma402 #sec-segment-iterator-prototype-breakType */ \ - CPP(SegmentIteratorPrototypeBreakType) \ - /* ecma402 #sec-segment-iterator-prototype-following */ \ - CPP(SegmentIteratorPrototypeFollowing) \ - /* ecma402 #sec-segment-iterator-prototype-preceding */ \ - CPP(SegmentIteratorPrototypePreceding) \ - /* ecma402 #sec-segment-iterator-prototype-index */ \ - CPP(SegmentIteratorPrototypeIndex) \ /* ecma402 #sec-segment-iterator-prototype-next */ \ CPP(SegmentIteratorPrototypeNext) \ + /* ecma402 #sec-%segmentsprototype%.containing */ \ + CPP(SegmentsPrototypeContaining) \ + /* ecma402 #sec-%segmentsprototype%-@@iterator */ \ + CPP(SegmentsPrototypeIterator) \ /* ES #sec-string.prototype.normalize */ \ CPP(StringPrototypeNormalizeIntl) \ /* ecma402 #sup-string.prototype.tolocalelowercase */ \ diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h index 174b89795f5467..c2eb44debea354 100644 --- a/deps/v8/src/builtins/builtins-descriptors.h +++ b/deps/v8/src/builtins/builtins-descriptors.h @@ -13,34 +13,7 @@ namespace v8 { namespace internal { -#define REVERSE_0(a) a, -#define REVERSE_1(a, b) b, a, -#define REVERSE_2(a, b, c) c, b, a, -#define REVERSE_3(a, b, c, d) d, c, b, a, -#define REVERSE_4(a, b, c, d, e) e, d, c, b, a, -#define REVERSE_5(a, b, c, d, e, f) f, e, d, c, b, a, -#define REVERSE_6(a, b, c, d, e, f, g) g, f, e, d, c, b, a, -#define REVERSE_7(a, b, c, d, e, f, g, h) h, g, f, e, d, c, b, a, -#define REVERSE_8(a, b, c, d, e, f, g, h, i) i, h, g, f, e, d, c, b, a, -#define REVERSE_kDontAdaptArgumentsSentinel(...) -#define REVERSE(N, ...) REVERSE_##N(__VA_ARGS__) - // Define interface descriptors for builtins with JS linkage. -#ifdef V8_REVERSE_JSARGS -#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \ - struct Builtin_##Name##_InterfaceDescriptor { \ - enum ParameterIndices { \ - kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \ - REVERSE_##Argc(__VA_ARGS__) kJSNewTarget, \ - kJSActualArgumentsCount, \ - kContext, \ - kParameterCount, \ - }; \ - static_assert((Argc) == static_cast(kParameterCount - 4), \ - "Inconsistent set of arguments"); \ - static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \ - }; -#else #define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \ struct Builtin_##Name##_InterfaceDescriptor { \ enum ParameterIndices { \ @@ -55,7 +28,6 @@ namespace internal { "Inconsistent set of arguments"); \ static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \ }; -#endif // Define interface descriptors for builtins with StubCall linkage. #define DEFINE_TFC_INTERFACE_DESCRIPTOR(Name, InterfaceDescriptor) \ diff --git a/deps/v8/src/builtins/builtins-function-gen.cc b/deps/v8/src/builtins/builtins-function-gen.cc deleted file mode 100644 index 1d48ee84d1c492..00000000000000 --- a/deps/v8/src/builtins/builtins-function-gen.cc +++ /dev/null @@ -1,202 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "src/builtins/builtins-utils-gen.h" -#include "src/builtins/builtins.h" -#include "src/codegen/code-stub-assembler.h" -#include "src/execution/frame-constants.h" -#include "src/objects/api-callbacks.h" -#include "src/objects/descriptor-array.h" - -namespace v8 { -namespace internal { - -TF_BUILTIN(FastFunctionPrototypeBind, CodeStubAssembler) { - Label slow(this); - - TNode argc = - UncheckedCast(Parameter(Descriptor::kJSActualArgumentsCount)); - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode new_target = CAST(Parameter(Descriptor::kJSNewTarget)); - - CodeStubArguments args(this, argc); - - // Check that receiver has instance type of JS_FUNCTION_TYPE - TNode receiver = args.GetReceiver(); - GotoIf(TaggedIsSmi(receiver), &slow); - - TNode receiver_map = LoadMap(CAST(receiver)); - { - TNode instance_type = LoadMapInstanceType(receiver_map); - GotoIfNot( - Word32Or(InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE), - InstanceTypeEqual(instance_type, JS_BOUND_FUNCTION_TYPE)), - &slow); - } - - // Disallow binding of slow-mode functions. We need to figure out whether the - // length and name property are in the original state. - Comment("Disallow binding of slow-mode functions"); - GotoIf(IsDictionaryMap(receiver_map), &slow); - - // Check whether the length and name properties are still present as - // AccessorInfo objects. In that case, their value can be recomputed even if - // the actual value on the object changes. - Comment("Check descriptor array length"); - // Minimum descriptor array length required for fast path. - const int min_nof_descriptors = i::Max(JSFunction::kLengthDescriptorIndex, - JSFunction::kNameDescriptorIndex) + - 1; - TNode nof_descriptors = LoadNumberOfOwnDescriptors(receiver_map); - GotoIf(Int32LessThan(nof_descriptors, Int32Constant(min_nof_descriptors)), - &slow); - - // Check whether the length and name properties are still present as - // AccessorInfo objects. In that case, their value can be recomputed even if - // the actual value on the object changes. - Comment("Check name and length properties"); - { - TNode descriptors = LoadMapDescriptors(receiver_map); - const int length_index = JSFunction::kLengthDescriptorIndex; - TNode maybe_length = - LoadKeyByDescriptorEntry(descriptors, length_index); - GotoIf(TaggedNotEqual(maybe_length, LengthStringConstant()), &slow); - - TNode maybe_length_accessor = - LoadValueByDescriptorEntry(descriptors, length_index); - GotoIf(TaggedIsSmi(maybe_length_accessor), &slow); - TNode length_value_map = LoadMap(CAST(maybe_length_accessor)); - GotoIfNot(IsAccessorInfoMap(length_value_map), &slow); - - const int name_index = JSFunction::kNameDescriptorIndex; - TNode maybe_name = LoadKeyByDescriptorEntry(descriptors, name_index); - GotoIf(TaggedNotEqual(maybe_name, NameStringConstant()), &slow); - - TNode maybe_name_accessor = - LoadValueByDescriptorEntry(descriptors, name_index); - GotoIf(TaggedIsSmi(maybe_name_accessor), &slow); - TNode name_value_map = LoadMap(CAST(maybe_name_accessor)); - GotoIfNot(IsAccessorInfoMap(name_value_map), &slow); - } - - // Choose the right bound function map based on whether the target is - // constructable. - Comment("Choose the right bound function map"); - TVARIABLE(Map, bound_function_map); - { - Label with_constructor(this); - TNode native_context = LoadNativeContext(context); - - Label map_done(this, &bound_function_map); - GotoIf(IsConstructorMap(receiver_map), &with_constructor); - - bound_function_map = CAST(LoadContextElement( - native_context, Context::BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX)); - Goto(&map_done); - - BIND(&with_constructor); - bound_function_map = CAST(LoadContextElement( - native_context, Context::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX)); - Goto(&map_done); - - BIND(&map_done); - } - - // Verify that __proto__ matches that of a the target bound function. - Comment("Verify that __proto__ matches target bound function"); - TNode prototype = LoadMapPrototype(receiver_map); - TNode expected_prototype = - LoadMapPrototype(bound_function_map.value()); - GotoIf(TaggedNotEqual(prototype, expected_prototype), &slow); - - // Allocate the arguments array. - Comment("Allocate the arguments array"); - TVARIABLE(FixedArray, argument_array); - { - Label empty_arguments(this); - Label arguments_done(this, &argument_array); - GotoIf(Uint32LessThanOrEqual(argc, Int32Constant(1)), &empty_arguments); - TNode elements_length = - Signed(ChangeUint32ToWord(Unsigned(Int32Sub(argc, Int32Constant(1))))); - argument_array = CAST(AllocateFixedArray(PACKED_ELEMENTS, elements_length, - kAllowLargeObjectAllocation)); - TVARIABLE(IntPtrT, index, IntPtrConstant(0)); - VariableList foreach_vars({&index}, zone()); - args.ForEach( - foreach_vars, - [&](TNode arg) { - StoreFixedArrayElement(argument_array.value(), index.value(), arg); - Increment(&index); - }, - IntPtrConstant(1)); - Goto(&arguments_done); - - BIND(&empty_arguments); - argument_array = EmptyFixedArrayConstant(); - Goto(&arguments_done); - - BIND(&arguments_done); - } - - // Determine bound receiver. - Comment("Determine bound receiver"); - TVARIABLE(Object, bound_receiver); - { - Label has_receiver(this); - Label receiver_done(this, &bound_receiver); - GotoIf(Word32NotEqual(argc, Int32Constant(0)), &has_receiver); - bound_receiver = UndefinedConstant(); - Goto(&receiver_done); - - BIND(&has_receiver); - bound_receiver = args.AtIndex(0); - Goto(&receiver_done); - - BIND(&receiver_done); - } - - // Allocate the resulting bound function. - Comment("Allocate the resulting bound function"); - { - TNode bound_function = Allocate(JSBoundFunction::kHeaderSize); - StoreMapNoWriteBarrier(bound_function, bound_function_map.value()); - StoreObjectFieldNoWriteBarrier( - bound_function, JSBoundFunction::kBoundTargetFunctionOffset, receiver); - StoreObjectFieldNoWriteBarrier(bound_function, - JSBoundFunction::kBoundThisOffset, - bound_receiver.value()); - StoreObjectFieldNoWriteBarrier(bound_function, - JSBoundFunction::kBoundArgumentsOffset, - argument_array.value()); - TNode empty_fixed_array = EmptyFixedArrayConstant(); - StoreObjectFieldNoWriteBarrier( - bound_function, JSObject::kPropertiesOrHashOffset, empty_fixed_array); - StoreObjectFieldNoWriteBarrier(bound_function, JSObject::kElementsOffset, - empty_fixed_array); - - args.PopAndReturn(bound_function); - } - - BIND(&slow); - { - // We are not using Parameter(Descriptor::kJSTarget) and loading the value - // from the current frame here in order to reduce register pressure on the - // fast path. - TNode target = LoadTargetFromFrame(); - TailCallBuiltin(Builtins::kFunctionPrototypeBind, context, target, - new_target, argc); - } -} - -// ES6 #sec-function.prototype-@@hasinstance -TF_BUILTIN(FunctionPrototypeHasInstance, CodeStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode f = CAST(Parameter(Descriptor::kReceiver)); - TNode v = CAST(Parameter(Descriptor::kV)); - TNode result = OrdinaryHasInstance(context, f, v); - Return(result); -} - -} // namespace internal -} // namespace v8 diff --git a/deps/v8/src/builtins/builtins-handler-gen.cc b/deps/v8/src/builtins/builtins-handler-gen.cc index 0325ddab7c9745..3f4a53a3461bd4 100644 --- a/deps/v8/src/builtins/builtins-handler-gen.cc +++ b/deps/v8/src/builtins/builtins-handler-gen.cc @@ -9,6 +9,7 @@ #include "src/ic/ic.h" #include "src/ic/keyed-store-generic.h" #include "src/objects/objects-inl.h" +#include "torque-generated/exported-macros-assembler-tq.h" namespace v8 { namespace internal { @@ -78,37 +79,11 @@ TNode HandlerBuiltinsAssembler::EmitKeyedSloppyArguments( TNode receiver, TNode tagged_key, base::Optional> value, Label* bailout, ArgumentsAccessMode access_mode) { - // Mapped arguments are actual arguments. Unmapped arguments are values added - // to the arguments object after it was created for the call. Mapped arguments - // are stored in the context at indexes given by elements[key + 2]. Unmapped - // arguments are stored as regular indexed properties in the arguments array, - // held at elements[1]. See NewSloppyArguments() in runtime.cc for a detailed - // look at argument object construction. - // - // The sloppy arguments elements array has a special format: - // - // 0: context - // 1: unmapped arguments array - // 2: mapped_index0, - // 3: mapped_index1, - // ... - // - // length is 2 + min(number_of_actual_arguments, number_of_formal_arguments). - // If key + 2 >= elements.length then attempt to look in the unmapped - // arguments array (given by elements[1]) and return the value at key, missing - // to the runtime if the unmapped arguments array is not a fixed array or if - // key >= unmapped_arguments_array.length. - // - // Otherwise, t = elements[key + 2]. If t is the hole, then look up the value - // in the unmapped arguments array, as described above. Otherwise, t is a Smi - // index into the context array given at elements[0]. Return the value at - // context[t]. - GotoIfNot(TaggedIsSmi(tagged_key), bailout); TNode key = SmiUntag(CAST(tagged_key)); GotoIf(IntPtrLessThan(key, IntPtrConstant(0)), bailout); - TNode elements = CAST(LoadElements(receiver)); + TNode elements = CAST(LoadElements(receiver)); TNode elements_length = LoadAndUntagFixedArrayBaseLength(elements); TVARIABLE(Object, var_result); @@ -119,20 +94,18 @@ TNode HandlerBuiltinsAssembler::EmitKeyedSloppyArguments( access_mode == ArgumentsAccessMode::kHas); } Label if_mapped(this), if_unmapped(this), end(this, &var_result); - TNode intptr_two = IntPtrConstant(2); - TNode adjusted_length = IntPtrSub(elements_length, intptr_two); - GotoIf(UintPtrGreaterThanOrEqual(key, adjusted_length), &if_unmapped); + GotoIf(UintPtrGreaterThanOrEqual(key, elements_length), &if_unmapped); TNode mapped_index = - LoadFixedArrayElement(elements, IntPtrAdd(key, intptr_two)); + LoadSloppyArgumentsElementsMappedEntries(elements, key); Branch(TaggedEqual(mapped_index, TheHoleConstant()), &if_unmapped, &if_mapped); BIND(&if_mapped); { TNode mapped_index_intptr = SmiUntag(CAST(mapped_index)); - TNode the_context = CAST(LoadFixedArrayElement(elements, 0)); + TNode the_context = LoadSloppyArgumentsElementsContext(elements); if (access_mode == ArgumentsAccessMode::kLoad) { TNode result = LoadContextElement(the_context, mapped_index_intptr); @@ -151,7 +124,7 @@ TNode HandlerBuiltinsAssembler::EmitKeyedSloppyArguments( BIND(&if_unmapped); { TNode backing_store_ho = - CAST(LoadFixedArrayElement(elements, 1)); + LoadSloppyArgumentsElementsArguments(elements); GotoIf(TaggedNotEqual(LoadMap(backing_store_ho), FixedArrayMapConstant()), bailout); TNode backing_store = CAST(backing_store_ho); diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc index 61f03b3f993380..d8a5463288dda4 100644 --- a/deps/v8/src/builtins/builtins-internal-gen.cc +++ b/deps/v8/src/builtins/builtins-internal-gen.cc @@ -71,103 +71,6 @@ TF_BUILTIN(GrowFastSmiOrObjectElements, CodeStubAssembler) { key); } -TF_BUILTIN(NewArgumentsElements, CodeStubAssembler) { - TNode frame = UncheckedCast(Parameter(Descriptor::kFrame)); - TNode length = SmiToIntPtr(Parameter(Descriptor::kLength)); - TNode mapped_count = - SmiToIntPtr(Parameter(Descriptor::kMappedCount)); - - // Check if we can allocate in new space. - ElementsKind kind = PACKED_ELEMENTS; - int max_elements = FixedArray::GetMaxLengthForNewSpaceAllocation(kind); - Label if_newspace(this), if_oldspace(this, Label::kDeferred); - Branch(IntPtrLessThan(length, IntPtrConstant(max_elements)), &if_newspace, - &if_oldspace); - - BIND(&if_newspace); - { - // Prefer EmptyFixedArray in case of non-positive {length} (the {length} - // can be negative here for rest parameters). - Label if_empty(this), if_notempty(this); - Branch(IntPtrLessThanOrEqual(length, IntPtrConstant(0)), &if_empty, - &if_notempty); - - BIND(&if_empty); - Return(EmptyFixedArrayConstant()); - - BIND(&if_notempty); - { - // Allocate a FixedArray in new space. - TNode result = CAST(AllocateFixedArray(kind, length)); - - // The elements might be used to back mapped arguments. In that case fill - // the mapped elements (i.e. the first {mapped_count}) with the hole, but - // make sure not to overshoot the {length} if some arguments are missing. - TNode number_of_holes = IntPtrMin(mapped_count, length); - TNode the_hole = TheHoleConstant(); - - // Fill the first elements up to {number_of_holes} with the hole. - TVARIABLE(IntPtrT, var_index, IntPtrConstant(0)); - Label loop1(this, &var_index), done_loop1(this); - Goto(&loop1); - BIND(&loop1); - { - // Load the current {index}. - TNode index = var_index.value(); - - // Check if we are done. - GotoIf(IntPtrEqual(index, number_of_holes), &done_loop1); - - // Store the hole into the {result}. - StoreFixedArrayElement(result, index, the_hole, SKIP_WRITE_BARRIER); - - // Continue with next {index}. - var_index = IntPtrAdd(index, IntPtrConstant(1)); - Goto(&loop1); - } - BIND(&done_loop1); - - // Compute the effective {offset} into the {frame}. - TNode offset = IntPtrAdd(length, IntPtrConstant(1)); - - // Copy the parameters from {frame} (starting at {offset}) to {result}. - Label loop2(this, &var_index), done_loop2(this); - Goto(&loop2); - BIND(&loop2); - { - // Load the current {index}. - TNode index = var_index.value(); - - // Check if we are done. - GotoIf(IntPtrEqual(index, length), &done_loop2); - - // Load the parameter at the given {index}. - TNode value = BitcastWordToTagged( - Load(MachineType::Pointer(), frame, - TimesSystemPointerSize(IntPtrSub(offset, index)))); - - // Store the {value} into the {result}. - StoreFixedArrayElement(result, index, value, SKIP_WRITE_BARRIER); - - // Continue with next {index}. - var_index = IntPtrAdd(index, IntPtrConstant(1)); - Goto(&loop2); - } - BIND(&done_loop2); - - Return(result); - } - } - - BIND(&if_oldspace); - { - // Allocate in old space (or large object space). - TailCallRuntime(Runtime::kNewArgumentsElements, NoContextConstant(), - BitcastWordToTagged(frame), SmiFromIntPtr(length), - SmiFromIntPtr(mapped_count)); - } -} - TF_BUILTIN(ReturnReceiver, CodeStubAssembler) { TNode receiver = CAST(Parameter(Descriptor::kReceiver)); Return(receiver); @@ -221,9 +124,9 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { TNode IsPageFlagSet(TNode object, int mask) { TNode page = PageFromAddress(object); - TNode flags = - UncheckedCast(Load(MachineType::Pointer(), page, - IntPtrConstant(MemoryChunk::kFlagsOffset))); + TNode flags = UncheckedCast( + Load(MachineType::Pointer(), page, + IntPtrConstant(BasicMemoryChunk::kFlagsOffset))); return WordNotEqual(WordAnd(flags, IntPtrConstant(mask)), IntPtrConstant(0)); } @@ -243,7 +146,7 @@ class RecordWriteCodeStubAssembler : public CodeStubAssembler { TNode* mask) { TNode page = PageFromAddress(object); TNode bitmap = - Load(page, IntPtrConstant(MemoryChunk::kMarkBitmapOffset)); + IntPtrAdd(page, IntPtrConstant(MemoryChunk::kMarkingBitmapOffset)); { // Temp variable to calculate cell offset in bitmap. @@ -488,15 +391,12 @@ TF_BUILTIN(RecordWrite, RecordWriteCodeStubAssembler) { BIND(&call_incremental_wb); { TNode function = ExternalConstant( - ExternalReference::incremental_marking_record_write_function()); - TNode isolate_constant = - ExternalConstant(ExternalReference::isolate_address(isolate())); + ExternalReference::write_barrier_marking_from_code_function()); TNode fp_mode = UncheckedCast(Parameter(Descriptor::kFPMode)); TNode object = BitcastTaggedToWord(Parameter(Descriptor::kObject)); - CallCFunction3WithCallerSavedRegistersMode( - function, object, slot, isolate_constant, fp_mode, &exit); + CallCFunction2WithCallerSavedRegistersMode( + function, object, slot, fp_mode, &exit); } } diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc index 7c8cde70dd89bd..d5cdc16db358db 100644 --- a/deps/v8/src/builtins/builtins-intl.cc +++ b/deps/v8/src/builtins/builtins-intl.cc @@ -28,10 +28,10 @@ #include "src/objects/js-relative-time-format-inl.h" #include "src/objects/js-segment-iterator-inl.h" #include "src/objects/js-segmenter-inl.h" +#include "src/objects/js-segments-inl.h" #include "src/objects/objects-inl.h" #include "src/objects/property-descriptor.h" #include "src/objects/smi.h" - #include "unicode/brkiter.h" namespace v8 { @@ -176,13 +176,13 @@ V8_WARN_UNUSED_RESULT Object DateTimeFormatRange( Handle dtf = Handle::cast(date_format_holder); - // 4. If startDate is undefined or endDate is undefined, throw a RangeError + // 4. If startDate is undefined or endDate is undefined, throw a TypeError // exception. Handle start_date = args.atOrUndefined(isolate, 1); Handle end_date = args.atOrUndefined(isolate, 2); if (start_date->IsUndefined(isolate) || end_date->IsUndefined(isolate)) { THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewRangeError(MessageTemplate::kInvalidTimeValue)); + isolate, NewTypeError(MessageTemplate::kInvalidTimeValue)); } // 5. Let x be ? ToNumber(startDate). ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, start_date, @@ -968,32 +968,9 @@ BUILTIN(CollatorInternalCompare) { return *Intl::CompareStrings(isolate, *icu_collator, string_x, string_y); } -// ecma402 #sec-segment-iterator-prototype-breakType -BUILTIN(SegmentIteratorPrototypeBreakType) { - const char* const method = "get %SegmentIteratorPrototype%.breakType"; - HandleScope scope(isolate); - - CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method); - return *segment_iterator->BreakType(); -} - -// ecma402 #sec-segment-iterator-prototype-following -BUILTIN(SegmentIteratorPrototypeFollowing) { - const char* const method = "%SegmentIteratorPrototype%.following"; - HandleScope scope(isolate); - CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method); - - Handle from = args.atOrUndefined(isolate, 1); - - Maybe success = - JSSegmentIterator::Following(isolate, segment_iterator, from); - MAYBE_RETURN(success, ReadOnlyRoots(isolate).exception()); - return *isolate->factory()->ToBoolean(success.FromJust()); -} - -// ecma402 #sec-segment-iterator-prototype-next +// ecma402 #sec-%segmentiteratorprototype%.next BUILTIN(SegmentIteratorPrototypeNext) { - const char* const method = "%SegmentIteratorPrototype%.next"; + const char* const method = "%SegmentIterator.prototype%.next"; HandleScope scope(isolate); CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method); @@ -1001,29 +978,7 @@ BUILTIN(SegmentIteratorPrototypeNext) { JSSegmentIterator::Next(isolate, segment_iterator)); } -// ecma402 #sec-segment-iterator-prototype-preceding -BUILTIN(SegmentIteratorPrototypePreceding) { - const char* const method = "%SegmentIteratorPrototype%.preceding"; - HandleScope scope(isolate); - CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method); - - Handle from = args.atOrUndefined(isolate, 1); - - Maybe success = - JSSegmentIterator::Preceding(isolate, segment_iterator, from); - MAYBE_RETURN(success, ReadOnlyRoots(isolate).exception()); - return *isolate->factory()->ToBoolean(success.FromJust()); -} - -// ecma402 #sec-segment-iterator-prototype-index -BUILTIN(SegmentIteratorPrototypeIndex) { - const char* const method = "get %SegmentIteratorPrototype%.index"; - HandleScope scope(isolate); - - CHECK_RECEIVER(JSSegmentIterator, segment_iterator, method); - return *JSSegmentIterator::Index(isolate, segment_iterator); -} - +// ecma402 #sec-intl.segmenter BUILTIN(SegmenterConstructor) { HandleScope scope(isolate); @@ -1032,6 +987,7 @@ BUILTIN(SegmenterConstructor) { "Intl.Segmenter"); } +// ecma402 #sec-intl.segmenter.supportedlocalesof BUILTIN(SegmenterSupportedLocalesOf) { HandleScope scope(isolate); Handle locales = args.atOrUndefined(isolate, 1); @@ -1043,30 +999,54 @@ BUILTIN(SegmenterSupportedLocalesOf) { JSSegmenter::GetAvailableLocales(), locales, options)); } +// ecma402 #sec-intl.segmenter.prototype.resolvedoptions BUILTIN(SegmenterPrototypeResolvedOptions) { HandleScope scope(isolate); - CHECK_RECEIVER(JSSegmenter, segmenter_holder, + CHECK_RECEIVER(JSSegmenter, segmenter, "Intl.Segmenter.prototype.resolvedOptions"); - return *JSSegmenter::ResolvedOptions(isolate, segmenter_holder); + return *JSSegmenter::ResolvedOptions(isolate, segmenter); } -// ecma402 #sec-Intl.Segmenter.prototype.segment +// ecma402 #sec-intl.segmenter.prototype.segment BUILTIN(SegmenterPrototypeSegment) { HandleScope scope(isolate); - CHECK_RECEIVER(JSSegmenter, segmenter_holder, - "Intl.Segmenter.prototype.segment"); + CHECK_RECEIVER(JSSegmenter, segmenter, "Intl.Segmenter.prototype.segment"); Handle input_text = args.atOrUndefined(isolate, 1); // 3. Let string be ? ToString(string). - Handle text; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, text, + Handle string; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, string, Object::ToString(isolate, input_text)); - // 4. Return ? CreateSegmentIterator(segment, string). + // 4. Return ? CreateSegmentsObject(segmenter, string). + RETURN_RESULT_OR_FAILURE(isolate, + JSSegments::Create(isolate, segmenter, string)); +} + +// ecma402 #sec-%segmentsprototype%.containing +BUILTIN(SegmentsPrototypeContaining) { + const char* const method = "%Segments.prototype%.containing"; + HandleScope scope(isolate); + CHECK_RECEIVER(JSSegments, segments, method); + Handle index = args.atOrUndefined(isolate, 1); + + // 6. Let n be ? ToInteger(index). + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, index, + Object::ToInteger(isolate, index)); + double const n = index->Number(); + + RETURN_RESULT_OR_FAILURE( + isolate, JSSegments::Containing(isolate, segments, static_cast(n))); +} + +// ecma402 #sec-%segmentsprototype%-@@iterator +BUILTIN(SegmentsPrototypeIterator) { + const char* const method = "%SegmentIsPrototype%[@@iterator]"; + HandleScope scope(isolate); + CHECK_RECEIVER(JSSegments, segments, method); RETURN_RESULT_OR_FAILURE( isolate, - JSSegmentIterator::Create( - isolate, segmenter_holder->icu_break_iterator().raw()->clone(), - segmenter_holder->granularity(), text)); + JSSegmentIterator::Create(isolate, segments->icu_break_iterator().raw(), + segments->granularity())); } BUILTIN(V8BreakIteratorConstructor) { diff --git a/deps/v8/src/builtins/builtins-number-gen.cc b/deps/v8/src/builtins/builtins-number-gen.cc index 4513d73abc77b7..4e8bcae60be2a8 100644 --- a/deps/v8/src/builtins/builtins-number-gen.cc +++ b/deps/v8/src/builtins/builtins-number-gen.cc @@ -6,6 +6,7 @@ #include "src/builtins/builtins.h" #include "src/codegen/code-stub-assembler.h" #include "src/ic/binary-op-assembler.h" +#include "src/ic/unary-op-assembler.h" namespace v8 { namespace internal { @@ -13,983 +14,108 @@ namespace internal { // ----------------------------------------------------------------------------- // ES6 section 20.1 Number Objects -class NumberBuiltinsAssembler : public CodeStubAssembler { - public: - explicit NumberBuiltinsAssembler(compiler::CodeAssemblerState* state) - : CodeStubAssembler(state) {} - - protected: - template - void EmitBitwiseOp(Operation op) { - TNode left = CAST(Parameter(Descriptor::kLeft)); - TNode right = CAST(Parameter(Descriptor::kRight)); - TNode context = CAST(Parameter(Descriptor::kContext)); - - BinaryOpAssembler binop_asm(state()); - Return(binop_asm.Generate_BitwiseBinaryOp(op, left, right, context)); - } - - template - void RelationalComparisonBuiltin(Operation op) { - TNode lhs = CAST(Parameter(Descriptor::kLeft)); - TNode rhs = CAST(Parameter(Descriptor::kRight)); - TNode context = CAST(Parameter(Descriptor::kContext)); - - Return(RelationalComparison(op, lhs, rhs, context)); - } - - template - void UnaryOp(TVariable* var_input, Label* do_smi, Label* do_double, - TVariable* var_input_double, Label* do_bigint); - - template - void BinaryOp(Label* smis, TVariable* var_left, - TVariable* var_right, Label* doubles, - TVariable* var_left_double, - TVariable* var_right_double, Label* bigints); -}; - -// ES6 #sec-number.isfinite -TF_BUILTIN(NumberIsFinite, CodeStubAssembler) { - TNode number = CAST(Parameter(Descriptor::kNumber)); - - Label return_true(this), return_false(this); - - // Check if {number} is a Smi. - GotoIf(TaggedIsSmi(number), &return_true); - - // Check if {number} is a HeapNumber. - TNode number_heap_object = CAST(number); - GotoIfNot(IsHeapNumber(number_heap_object), &return_false); - - // Check if {number} contains a finite, non-NaN value. - TNode number_value = LoadHeapNumberValue(number_heap_object); - BranchIfFloat64IsNaN(Float64Sub(number_value, number_value), &return_false, - &return_true); - - BIND(&return_true); - Return(TrueConstant()); - - BIND(&return_false); - Return(FalseConstant()); -} - -TF_BUILTIN(AllocateHeapNumber, CodeStubAssembler) { - TNode result = AllocateHeapNumber(); - Return(result); -} - -// ES6 #sec-number.isinteger -TF_BUILTIN(NumberIsInteger, CodeStubAssembler) { - TNode number = CAST(Parameter(Descriptor::kNumber)); - Return(SelectBooleanConstant(IsInteger(number))); -} - -// ES6 #sec-number.isnan -TF_BUILTIN(NumberIsNaN, CodeStubAssembler) { - TNode number = CAST(Parameter(Descriptor::kNumber)); - - Label return_true(this), return_false(this); - - // Check if {number} is a Smi. - GotoIf(TaggedIsSmi(number), &return_false); - - // Check if {number} is a HeapNumber. - TNode number_heap_object = CAST(number); - GotoIfNot(IsHeapNumber(number_heap_object), &return_false); - - // Check if {number} contains a NaN value. - TNode number_value = LoadHeapNumberValue(number_heap_object); - BranchIfFloat64IsNaN(number_value, &return_true, &return_false); - - BIND(&return_true); - Return(TrueConstant()); - - BIND(&return_false); - Return(FalseConstant()); -} - -// ES6 #sec-number.issafeinteger -TF_BUILTIN(NumberIsSafeInteger, CodeStubAssembler) { - TNode number = CAST(Parameter(Descriptor::kNumber)); - Return(SelectBooleanConstant(IsSafeInteger(number))); -} - -// ES6 #sec-number.parsefloat -TF_BUILTIN(NumberParseFloat, CodeStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - - // We might need to loop once for ToString conversion. - TVARIABLE(Object, var_input, CAST(Parameter(Descriptor::kString))); - Label loop(this, &var_input); - Goto(&loop); - BIND(&loop); - { - // Load the current {input} value. - TNode input = var_input.value(); - - // Check if the {input} is a HeapObject or a Smi. - Label if_inputissmi(this), if_inputisnotsmi(this); - Branch(TaggedIsSmi(input), &if_inputissmi, &if_inputisnotsmi); - - BIND(&if_inputissmi); - { - // The {input} is already a Number, no need to do anything. - Return(input); - } - - BIND(&if_inputisnotsmi); - { - // The {input} is a HeapObject, check if it's already a String. - TNode input_heap_object = CAST(input); - Label if_inputisstring(this), if_inputisnotstring(this); - TNode input_map = LoadMap(input_heap_object); - TNode input_instance_type = LoadMapInstanceType(input_map); - Branch(IsStringInstanceType(input_instance_type), &if_inputisstring, - &if_inputisnotstring); - - BIND(&if_inputisstring); - { - // The {input} is already a String, check if {input} contains - // a cached array index. - Label if_inputcached(this), if_inputnotcached(this); - TNode input_hash = LoadNameHashField(CAST(input)); - Branch(IsClearWord32(input_hash, - Name::kDoesNotContainCachedArrayIndexMask), - &if_inputcached, &if_inputnotcached); - - BIND(&if_inputcached); - { - // Just return the {input}s cached array index. - TNode input_array_index = - DecodeWordFromWord32(input_hash); - Return(SmiTag(Signed(input_array_index))); - } - - BIND(&if_inputnotcached); - { - // Need to fall back to the runtime to convert {input} to double. - Return(CallRuntime(Runtime::kStringParseFloat, context, input)); - } - } - - BIND(&if_inputisnotstring); - { - // The {input} is neither a String nor a Smi, check for HeapNumber. - Label if_inputisnumber(this), - if_inputisnotnumber(this, Label::kDeferred); - Branch(IsHeapNumberMap(input_map), &if_inputisnumber, - &if_inputisnotnumber); - - BIND(&if_inputisnumber); - { - // The {input} is already a Number, take care of -0. - Label if_inputiszero(this), if_inputisnotzero(this); - TNode input_value = LoadHeapNumberValue(input_heap_object); - Branch(Float64Equal(input_value, Float64Constant(0.0)), - &if_inputiszero, &if_inputisnotzero); - - BIND(&if_inputiszero); - Return(SmiConstant(0)); - - BIND(&if_inputisnotzero); - Return(input); - } - - BIND(&if_inputisnotnumber); - { - // Need to convert the {input} to String first. - // TODO(bmeurer): This could be more efficient if necessary. - var_input = CallBuiltin(Builtins::kToString, context, input); - Goto(&loop); - } - } - } - } -} - -// ES6 #sec-number.parseint -TF_BUILTIN(ParseInt, CodeStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode input = CAST(Parameter(Descriptor::kString)); - TNode radix = CAST(Parameter(Descriptor::kRadix)); - - // Check if {radix} is treated as 10 (i.e. undefined, 0 or 10). - Label if_radix10(this), if_generic(this, Label::kDeferred); - GotoIf(IsUndefined(radix), &if_radix10); - GotoIf(TaggedEqual(radix, SmiConstant(10)), &if_radix10); - GotoIf(TaggedEqual(radix, SmiConstant(0)), &if_radix10); - Goto(&if_generic); - - BIND(&if_radix10); - { - // Check if we can avoid the ToString conversion on {input}. - Label if_inputissmi(this), if_inputisheapnumber(this), - if_inputisstring(this); - GotoIf(TaggedIsSmi(input), &if_inputissmi); - TNode input_map = LoadMap(CAST(input)); - GotoIf(IsHeapNumberMap(input_map), &if_inputisheapnumber); - TNode input_instance_type = LoadMapInstanceType(input_map); - Branch(IsStringInstanceType(input_instance_type), &if_inputisstring, - &if_generic); - - BIND(&if_inputissmi); - { - // Just return the {input}. - Return(input); - } - - BIND(&if_inputisheapnumber); - { - // Check if the {input} value is in Signed32 range. - Label if_inputissigned32(this); - TNode input_value = LoadHeapNumberValue(CAST(input)); - TNode input_value32 = - Signed(TruncateFloat64ToWord32(input_value)); - GotoIf(Float64Equal(input_value, ChangeInt32ToFloat64(input_value32)), - &if_inputissigned32); - - // Check if the absolute {input} value is in the [1,1<<31[ range. - // Take the generic path for the range [0,1[ because the result - // could be -0. - TNode input_value_abs = Float64Abs(input_value); - - GotoIfNot(Float64LessThan(input_value_abs, Float64Constant(1u << 31)), - &if_generic); - Branch(Float64LessThanOrEqual(Float64Constant(1), input_value_abs), - &if_inputissigned32, &if_generic); - - // Return the truncated int32 value, and return the tagged result. - BIND(&if_inputissigned32); - TNode result = ChangeInt32ToTagged(input_value32); - Return(result); - } - - BIND(&if_inputisstring); - { - // Check if the String {input} has a cached array index. - TNode input_hash = LoadNameHashField(CAST(input)); - GotoIf(IsSetWord32(input_hash, Name::kDoesNotContainCachedArrayIndexMask), - &if_generic); - - // Return the cached array index as result. - TNode input_index = - DecodeWordFromWord32(input_hash); - TNode result = SmiTag(Signed(input_index)); - Return(result); - } - } - - BIND(&if_generic); - { - TNode result = - CallRuntime(Runtime::kStringParseInt, context, input, radix); - Return(result); - } -} - -// ES6 #sec-number.parseint -TF_BUILTIN(NumberParseInt, CodeStubAssembler) { +#define DEF_BINOP(Name, Generator) \ + TF_BUILTIN(Name, CodeStubAssembler) { \ + TNode lhs = CAST(Parameter(Descriptor::kLeft)); \ + TNode rhs = CAST(Parameter(Descriptor::kRight)); \ + TNode context = CAST(Parameter(Descriptor::kContext)); \ + TNode maybe_feedback_vector = \ + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); \ + TNode slot = \ + UncheckedCast(Parameter(Descriptor::kSlot)); \ + \ + BinaryOpAssembler binop_asm(state()); \ + TNode result = binop_asm.Generator(context, lhs, rhs, slot, \ + maybe_feedback_vector, false); \ + \ + Return(result); \ + } +DEF_BINOP(Add_WithFeedback, Generate_AddWithFeedback) +DEF_BINOP(Subtract_WithFeedback, Generate_SubtractWithFeedback) +DEF_BINOP(Multiply_WithFeedback, Generate_MultiplyWithFeedback) +DEF_BINOP(Divide_WithFeedback, Generate_DivideWithFeedback) +DEF_BINOP(Modulus_WithFeedback, Generate_ModulusWithFeedback) +DEF_BINOP(Exponentiate_WithFeedback, Generate_ExponentiateWithFeedback) +DEF_BINOP(BitwiseOr_WithFeedback, Generate_BitwiseOrWithFeedback) +DEF_BINOP(BitwiseXor_WithFeedback, Generate_BitwiseXorWithFeedback) +DEF_BINOP(BitwiseAnd_WithFeedback, Generate_BitwiseAndWithFeedback) +DEF_BINOP(ShiftLeft_WithFeedback, Generate_ShiftLeftWithFeedback) +DEF_BINOP(ShiftRight_WithFeedback, Generate_ShiftRightWithFeedback) +DEF_BINOP(ShiftRightLogical_WithFeedback, + Generate_ShiftRightLogicalWithFeedback) +#undef DEF_BINOP + +#define DEF_UNOP(Name, Generator) \ + TF_BUILTIN(Name, CodeStubAssembler) { \ + TNode value = CAST(Parameter(Descriptor::kValue)); \ + TNode context = CAST(Parameter(Descriptor::kContext)); \ + TNode maybe_feedback_vector = \ + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); \ + TNode slot = \ + UncheckedCast(Parameter(Descriptor::kSlot)); \ + \ + UnaryOpAssembler a(state()); \ + TNode result = \ + a.Generator(context, value, slot, maybe_feedback_vector); \ + \ + Return(result); \ + } +DEF_UNOP(BitwiseNot_WithFeedback, Generate_BitwiseNotWithFeedback) +DEF_UNOP(Decrement_WithFeedback, Generate_DecrementWithFeedback) +DEF_UNOP(Increment_WithFeedback, Generate_IncrementWithFeedback) +DEF_UNOP(Negate_WithFeedback, Generate_NegateWithFeedback) +#undef DEF_UNOP + +#define DEF_COMPARE(Name) \ + TF_BUILTIN(Name##_WithFeedback, CodeStubAssembler) { \ + TNode lhs = CAST(Parameter(Descriptor::kLeft)); \ + TNode rhs = CAST(Parameter(Descriptor::kRight)); \ + TNode context = CAST(Parameter(Descriptor::kContext)); \ + TNode maybe_feedback_vector = \ + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); \ + TNode slot = \ + UncheckedCast(Parameter(Descriptor::kSlot)); \ + \ + TVARIABLE(Smi, var_type_feedback); \ + TNode result = RelationalComparison(Operation::k##Name, lhs, rhs, \ + context, &var_type_feedback); \ + UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot); \ + \ + Return(result); \ + } +DEF_COMPARE(LessThan) +DEF_COMPARE(LessThanOrEqual) +DEF_COMPARE(GreaterThan) +DEF_COMPARE(GreaterThanOrEqual) +#undef DEF_COMPARE + +TF_BUILTIN(Equal_WithFeedback, CodeStubAssembler) { + TNode lhs = CAST(Parameter(Descriptor::kLeft)); + TNode rhs = CAST(Parameter(Descriptor::kRight)); TNode context = CAST(Parameter(Descriptor::kContext)); - TNode input = CAST(Parameter(Descriptor::kString)); - TNode radix = CAST(Parameter(Descriptor::kRadix)); - Return(CallBuiltin(Builtins::kParseInt, context, input, radix)); -} + TNode maybe_feedback_vector = + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); + TNode slot = UncheckedCast(Parameter(Descriptor::kSlot)); -// ES6 #sec-number.prototype.valueof -TF_BUILTIN(NumberPrototypeValueOf, CodeStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode receiver = CAST(Parameter(Descriptor::kReceiver)); + TVARIABLE(Smi, var_type_feedback); + TNode result = Equal(lhs, rhs, context, &var_type_feedback); + UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot); - TNode result = ToThisValue(context, receiver, PrimitiveType::kNumber, - "Number.prototype.valueOf"); Return(result); } -class AddStubAssembler : public CodeStubAssembler { - public: - explicit AddStubAssembler(compiler::CodeAssemblerState* state) - : CodeStubAssembler(state) {} - - protected: - TNode ConvertReceiver(TNode js_receiver, - TNode context) { - // Call ToPrimitive explicitly without hint (whereas ToNumber - // would pass a "number" hint). - Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate()); - return CallStub(callable, context, js_receiver); - } - - void ConvertNonReceiverAndLoop(TVariable* var_value, Label* loop, - TNode context) { - *var_value = - CallBuiltin(Builtins::kNonNumberToNumeric, context, var_value->value()); - Goto(loop); - } - - void ConvertAndLoop(TVariable* var_value, - TNode instance_type, Label* loop, - TNode context) { - Label is_not_receiver(this, Label::kDeferred); - GotoIfNot(IsJSReceiverInstanceType(instance_type), &is_not_receiver); - - *var_value = ConvertReceiver(CAST(var_value->value()), context); - Goto(loop); - - BIND(&is_not_receiver); - ConvertNonReceiverAndLoop(var_value, loop, context); - } -}; - -TF_BUILTIN(Add, AddStubAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TVARIABLE(Object, var_left, CAST(Parameter(Descriptor::kLeft))); - TVARIABLE(Object, var_right, CAST(Parameter(Descriptor::kRight))); - - // Shared entry for floating point addition. - Label do_double_add(this); - TVARIABLE(Float64T, var_left_double); - TVARIABLE(Float64T, var_right_double); - - // We might need to loop several times due to ToPrimitive, ToString and/or - // ToNumeric conversions. - Label loop(this, {&var_left, &var_right}), - string_add_convert_left(this, Label::kDeferred), - string_add_convert_right(this, Label::kDeferred), - do_bigint_add(this, Label::kDeferred); - Goto(&loop); - BIND(&loop); - { - TNode left = var_left.value(); - TNode right = var_right.value(); - - Label if_left_smi(this), if_left_heapobject(this); - Branch(TaggedIsSmi(left), &if_left_smi, &if_left_heapobject); - - BIND(&if_left_smi); - { - Label if_right_smi(this), if_right_heapobject(this); - Branch(TaggedIsSmi(right), &if_right_smi, &if_right_heapobject); - - BIND(&if_right_smi); - { - Label if_overflow(this); - TNode left_smi = CAST(left); - TNode right_smi = CAST(right); - TNode result = TrySmiAdd(left_smi, right_smi, &if_overflow); - Return(result); - - BIND(&if_overflow); - { - var_left_double = SmiToFloat64(left_smi); - var_right_double = SmiToFloat64(right_smi); - Goto(&do_double_add); - } - } // if_right_smi - - BIND(&if_right_heapobject); - { - TNode right_heap_object = CAST(right); - TNode right_map = LoadMap(right_heap_object); - - Label if_right_not_number(this, Label::kDeferred); - GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number); - - // {right} is a HeapNumber. - var_left_double = SmiToFloat64(CAST(left)); - var_right_double = LoadHeapNumberValue(right_heap_object); - Goto(&do_double_add); - - BIND(&if_right_not_number); - { - TNode right_instance_type = LoadMapInstanceType(right_map); - GotoIf(IsStringInstanceType(right_instance_type), - &string_add_convert_left); - GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add); - ConvertAndLoop(&var_right, right_instance_type, &loop, context); - } - } // if_right_heapobject - } // if_left_smi - - BIND(&if_left_heapobject); - { - TNode left_heap_object = CAST(left); - TNode left_map = LoadMap(left_heap_object); - Label if_right_smi(this), if_right_heapobject(this); - Branch(TaggedIsSmi(right), &if_right_smi, &if_right_heapobject); - - BIND(&if_right_smi); - { - Label if_left_not_number(this, Label::kDeferred); - GotoIfNot(IsHeapNumberMap(left_map), &if_left_not_number); - - // {left} is a HeapNumber, {right} is a Smi. - var_left_double = LoadHeapNumberValue(left_heap_object); - var_right_double = SmiToFloat64(CAST(right)); - Goto(&do_double_add); - - BIND(&if_left_not_number); - { - TNode left_instance_type = LoadMapInstanceType(left_map); - GotoIf(IsStringInstanceType(left_instance_type), - &string_add_convert_right); - GotoIf(IsBigIntInstanceType(left_instance_type), &do_bigint_add); - // {left} is neither a Numeric nor a String, and {right} is a Smi. - ConvertAndLoop(&var_left, left_instance_type, &loop, context); - } - } // if_right_smi - - BIND(&if_right_heapobject); - { - TNode right_heap_object = CAST(right); - TNode right_map = LoadMap(right_heap_object); - - Label if_left_number(this), if_left_not_number(this, Label::kDeferred); - Branch(IsHeapNumberMap(left_map), &if_left_number, &if_left_not_number); - - BIND(&if_left_number); - { - Label if_right_not_number(this, Label::kDeferred); - GotoIfNot(IsHeapNumberMap(right_map), &if_right_not_number); - - // Both {left} and {right} are HeapNumbers. - var_left_double = LoadHeapNumberValue(CAST(left)); - var_right_double = LoadHeapNumberValue(right_heap_object); - Goto(&do_double_add); - - BIND(&if_right_not_number); - { - TNode right_instance_type = LoadMapInstanceType(right_map); - GotoIf(IsStringInstanceType(right_instance_type), - &string_add_convert_left); - GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add); - // {left} is a HeapNumber, {right} is neither Number nor String. - ConvertAndLoop(&var_right, right_instance_type, &loop, context); - } - } // if_left_number - - BIND(&if_left_not_number); - { - Label if_left_bigint(this); - TNode left_instance_type = LoadMapInstanceType(left_map); - GotoIf(IsStringInstanceType(left_instance_type), - &string_add_convert_right); - TNode right_instance_type = LoadMapInstanceType(right_map); - GotoIf(IsStringInstanceType(right_instance_type), - &string_add_convert_left); - GotoIf(IsBigIntInstanceType(left_instance_type), &if_left_bigint); - Label if_left_not_receiver(this, Label::kDeferred); - Label if_right_not_receiver(this, Label::kDeferred); - GotoIfNot(IsJSReceiverInstanceType(left_instance_type), - &if_left_not_receiver); - // {left} is a JSReceiver, convert it first. - var_left = ConvertReceiver(CAST(var_left.value()), context); - Goto(&loop); - - BIND(&if_left_bigint); - { - // {right} is a HeapObject, but not a String. Jump to - // {do_bigint_add} if {right} is already a Numeric. - GotoIf(IsBigIntInstanceType(right_instance_type), &do_bigint_add); - GotoIf(IsHeapNumberMap(right_map), &do_bigint_add); - ConvertAndLoop(&var_right, right_instance_type, &loop, context); - } - - BIND(&if_left_not_receiver); - GotoIfNot(IsJSReceiverInstanceType(right_instance_type), - &if_right_not_receiver); - // {left} is a Primitive, but {right} is a JSReceiver, so convert - // {right} with priority. - var_right = ConvertReceiver(CAST(var_right.value()), context); - Goto(&loop); - - BIND(&if_right_not_receiver); - // Neither {left} nor {right} are JSReceivers. - ConvertNonReceiverAndLoop(&var_left, &loop, context); - } - } // if_right_heapobject - } // if_left_heapobject - } - BIND(&string_add_convert_left); - { - // Convert {left} to a String and concatenate it with the String {right}. - TailCallBuiltin(Builtins::kStringAddConvertLeft, context, var_left.value(), - var_right.value()); - } - - BIND(&string_add_convert_right); - { - // Convert {right} to a String and concatenate it with the String {left}. - TailCallBuiltin(Builtins::kStringAddConvertRight, context, var_left.value(), - var_right.value()); - } - - BIND(&do_bigint_add); - { - TailCallBuiltin(Builtins::kBigIntAdd, context, var_left.value(), - var_right.value()); - } - - BIND(&do_double_add); - { - TNode value = - Float64Add(var_left_double.value(), var_right_double.value()); - Return(AllocateHeapNumberWithValue(value)); - } -} - -template -void NumberBuiltinsAssembler::UnaryOp(TVariable* var_input, - Label* do_smi, Label* do_double, - TVariable* var_input_double, - Label* do_bigint) { - TNode context = CAST(Parameter(Descriptor::kContext)); - *var_input = CAST(Parameter(Descriptor::kValue)); - - // We might need to loop for ToNumeric conversion. - Label loop(this, {var_input}); - Goto(&loop); - BIND(&loop); - TNode input = var_input->value(); - - Label not_number(this); - GotoIf(TaggedIsSmi(input), do_smi); - TNode input_heap_object = CAST(input); - GotoIfNot(IsHeapNumber(input_heap_object), ¬_number); - if (var_input_double != nullptr) { - *var_input_double = LoadHeapNumberValue(input_heap_object); - } - Goto(do_double); - - BIND(¬_number); - GotoIf(IsBigInt(input_heap_object), do_bigint); - *var_input = CallBuiltin(Builtins::kNonNumberToNumeric, context, input); - Goto(&loop); -} - -template -void NumberBuiltinsAssembler::BinaryOp(Label* smis, TVariable* var_left, - TVariable* var_right, - Label* doubles, - TVariable* var_left_double, - TVariable* var_right_double, - Label* bigints) { - DCHECK_EQ(var_left_double == nullptr, var_right_double == nullptr); - - TNode context = CAST(Parameter(Descriptor::kContext)); - *var_left = CAST(Parameter(Descriptor::kLeft)); - *var_right = CAST(Parameter(Descriptor::kRight)); - - // We might need to loop for ToNumeric conversions. - Label loop(this, {var_left, var_right}); - Goto(&loop); - BIND(&loop); - - Label left_not_smi(this), right_not_smi(this); - Label left_not_number(this), right_not_number(this); - GotoIfNot(TaggedIsSmi(var_left->value()), &left_not_smi); - GotoIf(TaggedIsSmi(var_right->value()), smis); - - // At this point, var_left is a Smi but var_right is not. - TNode var_left_smi = CAST(var_left->value()); - TNode var_right_heap_object = CAST(var_right->value()); - GotoIfNot(IsHeapNumber(var_right_heap_object), &right_not_number); - if (var_left_double != nullptr) { - *var_left_double = SmiToFloat64(var_left_smi); - *var_right_double = LoadHeapNumberValue(var_right_heap_object); - } - Goto(doubles); - - BIND(&left_not_smi); - { - TNode var_left_heap_object = CAST(var_left->value()); - GotoIfNot(IsHeapNumber(var_left_heap_object), &left_not_number); - GotoIfNot(TaggedIsSmi(var_right->value()), &right_not_smi); - - // At this point, var_left is a HeapNumber and var_right is a Smi. - if (var_left_double != nullptr) { - *var_left_double = LoadHeapNumberValue(var_left_heap_object); - *var_right_double = SmiToFloat64(CAST(var_right->value())); - } - Goto(doubles); - } - - BIND(&right_not_smi); - { - TNode var_right_heap_object = CAST(var_right->value()); - GotoIfNot(IsHeapNumber(var_right_heap_object), &right_not_number); - if (var_left_double != nullptr) { - *var_left_double = LoadHeapNumberValue(CAST(var_left->value())); - *var_right_double = LoadHeapNumberValue(var_right_heap_object); - } - Goto(doubles); - } - - BIND(&left_not_number); - { - Label left_bigint(this); - GotoIf(IsBigInt(CAST(var_left->value())), &left_bigint); - *var_left = - CallBuiltin(Builtins::kNonNumberToNumeric, context, var_left->value()); - Goto(&loop); - - BIND(&left_bigint); - { - // Jump to {bigints} if {var_right} is already a Numeric. - GotoIf(TaggedIsSmi(var_right->value()), bigints); - TNode var_right_heap_object = CAST(var_right->value()); - GotoIf(IsBigInt(var_right_heap_object), bigints); - GotoIf(IsHeapNumber(var_right_heap_object), bigints); - *var_right = CallBuiltin(Builtins::kNonNumberToNumeric, context, - var_right->value()); - Goto(&loop); - } - } - - BIND(&right_not_number); - { - GotoIf(IsBigInt(CAST(var_right->value())), bigints); - *var_right = - CallBuiltin(Builtins::kNonNumberToNumeric, context, var_right->value()); - Goto(&loop); - } -} - -TF_BUILTIN(Subtract, NumberBuiltinsAssembler) { - TVARIABLE(Object, var_left); - TVARIABLE(Object, var_right); - TVARIABLE(Float64T, var_left_double); - TVARIABLE(Float64T, var_right_double); - Label do_smi_sub(this), do_double_sub(this), do_bigint_sub(this); - - BinaryOp(&do_smi_sub, &var_left, &var_right, &do_double_sub, - &var_left_double, &var_right_double, &do_bigint_sub); - - BIND(&do_smi_sub); - { - Label if_overflow(this); - TNode var_left_smi = CAST(var_left.value()); - TNode var_right_smi = CAST(var_right.value()); - TNode result = TrySmiSub(var_left_smi, var_right_smi, &if_overflow); - Return(result); - - BIND(&if_overflow); - { - var_left_double = SmiToFloat64(var_left_smi); - var_right_double = SmiToFloat64(var_right_smi); - Goto(&do_double_sub); - } - } - - BIND(&do_double_sub); - { - TNode value = - Float64Sub(var_left_double.value(), var_right_double.value()); - Return(AllocateHeapNumberWithValue(value)); - } - - BIND(&do_bigint_sub); - { - TNode context = CAST(Parameter(Descriptor::kContext)); - TailCallBuiltin(Builtins::kBigIntSubtract, context, var_left.value(), - var_right.value()); - } -} - -TF_BUILTIN(BitwiseNot, NumberBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TVARIABLE(Object, var_input); - Label do_number(this), do_bigint(this); - - UnaryOp(&var_input, &do_number, &do_number, nullptr, &do_bigint); - - BIND(&do_number); - { - TailCallBuiltin(Builtins::kBitwiseXor, context, var_input.value(), - SmiConstant(-1)); - } - - BIND(&do_bigint); - { - Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(), - SmiConstant(Operation::kBitwiseNot))); - } -} - -TF_BUILTIN(Decrement, NumberBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TVARIABLE(Object, var_input); - Label do_number(this), do_bigint(this); - - UnaryOp(&var_input, &do_number, &do_number, nullptr, &do_bigint); - - BIND(&do_number); - { - TailCallBuiltin(Builtins::kSubtract, context, var_input.value(), - SmiConstant(1)); - } - - BIND(&do_bigint); - { - Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(), - SmiConstant(Operation::kDecrement))); - } -} - -TF_BUILTIN(Increment, NumberBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TVARIABLE(Object, var_input); - Label do_number(this), do_bigint(this); - - UnaryOp(&var_input, &do_number, &do_number, nullptr, &do_bigint); - - BIND(&do_number); - { - TailCallBuiltin(Builtins::kAdd, context, var_input.value(), SmiConstant(1)); - } - - BIND(&do_bigint); - { - Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(), - SmiConstant(Operation::kIncrement))); - } -} - -TF_BUILTIN(Negate, NumberBuiltinsAssembler) { - TVARIABLE(Object, var_input); - TVARIABLE(Float64T, var_input_double); - Label do_smi(this), do_double(this), do_bigint(this); - - UnaryOp(&var_input, &do_smi, &do_double, &var_input_double, - &do_bigint); - - BIND(&do_smi); - { Return(SmiMul(CAST(var_input.value()), SmiConstant(-1))); } - - BIND(&do_double); - { - TNode value = - Float64Mul(var_input_double.value(), Float64Constant(-1)); - Return(AllocateHeapNumberWithValue(value)); - } - - BIND(&do_bigint); - { - TNode context = CAST(Parameter(Descriptor::kContext)); - Return(CallRuntime(Runtime::kBigIntUnaryOp, context, var_input.value(), - SmiConstant(Operation::kNegate))); - } -} - -TF_BUILTIN(Multiply, NumberBuiltinsAssembler) { - TVARIABLE(Object, var_left); - TVARIABLE(Object, var_right); - TVARIABLE(Float64T, var_left_double); - TVARIABLE(Float64T, var_right_double); - Label do_smi_mul(this), do_double_mul(this), do_bigint_mul(this); - - BinaryOp(&do_smi_mul, &var_left, &var_right, &do_double_mul, - &var_left_double, &var_right_double, &do_bigint_mul); - - BIND(&do_smi_mul); - // The result is not necessarily a smi, in case of overflow. - Return(SmiMul(CAST(var_left.value()), CAST(var_right.value()))); - - BIND(&do_double_mul); - TNode value = - Float64Mul(var_left_double.value(), var_right_double.value()); - Return(AllocateHeapNumberWithValue(value)); - - BIND(&do_bigint_mul); - { - TNode context = CAST(Parameter(Descriptor::kContext)); - Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), - var_right.value(), SmiConstant(Operation::kMultiply))); - } -} - -TF_BUILTIN(Divide, NumberBuiltinsAssembler) { - TVARIABLE(Object, var_left); - TVARIABLE(Object, var_right); - TVARIABLE(Float64T, var_left_double); - TVARIABLE(Float64T, var_right_double); - Label do_smi_div(this), do_double_div(this), do_bigint_div(this); - - BinaryOp(&do_smi_div, &var_left, &var_right, &do_double_div, - &var_left_double, &var_right_double, &do_bigint_div); - - BIND(&do_smi_div); - { - // TODO(jkummerow): Consider just always doing a double division. - Label bailout(this); - TNode dividend = CAST(var_left.value()); - TNode divisor = CAST(var_right.value()); - - // Do floating point division if {divisor} is zero. - GotoIf(SmiEqual(divisor, SmiConstant(0)), &bailout); - - // Do floating point division if {dividend} is zero and {divisor} is - // negative. - Label dividend_is_zero(this), dividend_is_not_zero(this); - Branch(SmiEqual(dividend, SmiConstant(0)), ÷nd_is_zero, - ÷nd_is_not_zero); - - BIND(÷nd_is_zero); - { - GotoIf(SmiLessThan(divisor, SmiConstant(0)), &bailout); - Goto(÷nd_is_not_zero); - } - BIND(÷nd_is_not_zero); - - TNode untagged_divisor = SmiToInt32(divisor); - TNode untagged_dividend = SmiToInt32(dividend); - - // Do floating point division if {dividend} is kMinInt (or kMinInt - 1 - // if the Smi size is 31) and {divisor} is -1. - Label divisor_is_minus_one(this), divisor_is_not_minus_one(this); - Branch(Word32Equal(untagged_divisor, Int32Constant(-1)), - &divisor_is_minus_one, &divisor_is_not_minus_one); - - BIND(&divisor_is_minus_one); - { - GotoIf(Word32Equal( - untagged_dividend, - Int32Constant(kSmiValueSize == 32 ? kMinInt : (kMinInt >> 1))), - &bailout); - Goto(&divisor_is_not_minus_one); - } - BIND(&divisor_is_not_minus_one); - - // TODO(epertoso): consider adding a machine instruction that returns - // both the result and the remainder. - TNode untagged_result = - Int32Div(untagged_dividend, untagged_divisor); - TNode truncated = Int32Mul(untagged_result, untagged_divisor); - // Do floating point division if the remainder is not 0. - GotoIf(Word32NotEqual(untagged_dividend, truncated), &bailout); - Return(SmiFromInt32(untagged_result)); - - // Bailout: convert {dividend} and {divisor} to double and do double - // division. - BIND(&bailout); - { - var_left_double = SmiToFloat64(dividend); - var_right_double = SmiToFloat64(divisor); - Goto(&do_double_div); - } - } - - BIND(&do_double_div); - { - TNode value = - Float64Div(var_left_double.value(), var_right_double.value()); - Return(AllocateHeapNumberWithValue(value)); - } - - BIND(&do_bigint_div); - { - TNode context = CAST(Parameter(Descriptor::kContext)); - Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), - var_right.value(), SmiConstant(Operation::kDivide))); - } -} - -TF_BUILTIN(Modulus, NumberBuiltinsAssembler) { - TVARIABLE(Object, var_left); - TVARIABLE(Object, var_right); - TVARIABLE(Float64T, var_left_double); - TVARIABLE(Float64T, var_right_double); - Label do_smi_mod(this), do_double_mod(this), do_bigint_mod(this); - - BinaryOp(&do_smi_mod, &var_left, &var_right, &do_double_mod, - &var_left_double, &var_right_double, &do_bigint_mod); - - BIND(&do_smi_mod); - Return(SmiMod(CAST(var_left.value()), CAST(var_right.value()))); - - BIND(&do_double_mod); - TNode value = - Float64Mod(var_left_double.value(), var_right_double.value()); - Return(AllocateHeapNumberWithValue(value)); - - BIND(&do_bigint_mod); - { - TNode context = CAST(Parameter(Descriptor::kContext)); - Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), - var_right.value(), SmiConstant(Operation::kModulus))); - } -} - -TF_BUILTIN(Exponentiate, NumberBuiltinsAssembler) { - TVARIABLE(Object, var_left); - TVARIABLE(Object, var_right); - Label do_number_exp(this), do_bigint_exp(this); - TNode context = CAST(Parameter(Descriptor::kContext)); - - BinaryOp(&do_number_exp, &var_left, &var_right, &do_number_exp, - nullptr, nullptr, &do_bigint_exp); - - BIND(&do_number_exp); - { Return(MathPowImpl(context, var_left.value(), var_right.value())); } - - BIND(&do_bigint_exp); - Return(CallRuntime(Runtime::kBigIntBinaryOp, context, var_left.value(), - var_right.value(), SmiConstant(Operation::kExponentiate))); -} - -TF_BUILTIN(ShiftLeft, NumberBuiltinsAssembler) { - EmitBitwiseOp(Operation::kShiftLeft); -} - -TF_BUILTIN(ShiftRight, NumberBuiltinsAssembler) { - EmitBitwiseOp(Operation::kShiftRight); -} - -TF_BUILTIN(ShiftRightLogical, NumberBuiltinsAssembler) { - EmitBitwiseOp(Operation::kShiftRightLogical); -} - -TF_BUILTIN(BitwiseAnd, NumberBuiltinsAssembler) { - EmitBitwiseOp(Operation::kBitwiseAnd); -} - -TF_BUILTIN(BitwiseOr, NumberBuiltinsAssembler) { - EmitBitwiseOp(Operation::kBitwiseOr); -} - -TF_BUILTIN(BitwiseXor, NumberBuiltinsAssembler) { - EmitBitwiseOp(Operation::kBitwiseXor); -} - -TF_BUILTIN(LessThan, NumberBuiltinsAssembler) { - RelationalComparisonBuiltin(Operation::kLessThan); -} - -TF_BUILTIN(LessThanOrEqual, NumberBuiltinsAssembler) { - RelationalComparisonBuiltin(Operation::kLessThanOrEqual); -} - -TF_BUILTIN(GreaterThan, NumberBuiltinsAssembler) { - RelationalComparisonBuiltin(Operation::kGreaterThan); -} - -TF_BUILTIN(GreaterThanOrEqual, NumberBuiltinsAssembler) { - RelationalComparisonBuiltin(Operation::kGreaterThanOrEqual); -} - -TF_BUILTIN(Equal, CodeStubAssembler) { +TF_BUILTIN(StrictEqual_WithFeedback, CodeStubAssembler) { TNode lhs = CAST(Parameter(Descriptor::kLeft)); TNode rhs = CAST(Parameter(Descriptor::kRight)); - TNode context = CAST(Parameter(Descriptor::kContext)); + TNode maybe_feedback_vector = + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); + TNode slot = UncheckedCast(Parameter(Descriptor::kSlot)); - Return(Equal(lhs, rhs, context)); -} + TVARIABLE(Smi, var_type_feedback); + TNode result = StrictEqual(lhs, rhs, &var_type_feedback); + UpdateFeedback(var_type_feedback.value(), maybe_feedback_vector, slot); -TF_BUILTIN(StrictEqual, CodeStubAssembler) { - TNode lhs = CAST(Parameter(Descriptor::kLeft)); - TNode rhs = CAST(Parameter(Descriptor::kRight)); - - Return(StrictEqual(lhs, rhs)); + Return(result); } } // namespace internal diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc index 060454955817bb..60cc12ee17607b 100644 --- a/deps/v8/src/builtins/builtins-object-gen.cc +++ b/deps/v8/src/builtins/builtins-object-gen.cc @@ -314,7 +314,8 @@ TNode ObjectEntriesValuesBuiltinsAssembler::FastGetOwnValuesOrEntries( TNode array; TNode elements; std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( - PACKED_ELEMENTS, array_map, SmiConstant(2), {}, IntPtrConstant(2)); + PACKED_ELEMENTS, array_map, SmiConstant(2), base::nullopt, + IntPtrConstant(2)); StoreFixedArrayElement(CAST(elements), 0, next_key, SKIP_WRITE_BARRIER); StoreFixedArrayElement(CAST(elements), 1, value, SKIP_WRITE_BARRIER); value = TNode::UncheckedCast(array); @@ -482,6 +483,9 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { BIND(&if_empty_elements); Branch(WordEqual(object_enum_length, IntPtrConstant(0)), &if_empty, &if_fast); + // TODO(solanes): These if_xxx here and below seem to be quite similar for + // ObjectKeys and for ObjectGetOwnPropertyNames. In particular, if_fast seem + // to be the exact same. BIND(&if_fast); { // The {object} has a usable enum cache, use that. @@ -497,12 +501,13 @@ TF_BUILTIN(ObjectKeys, ObjectBuiltinsAssembler) { TNode native_context = LoadNativeContext(context); TNode array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); - TNode array_length = SmiTag(Signed(object_enum_length)); + TNode object_enum_length_intptr = Signed(object_enum_length); + TNode array_length = SmiTag(object_enum_length_intptr); std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( - PACKED_ELEMENTS, array_map, array_length, {}, - Signed(object_enum_length)); + PACKED_ELEMENTS, array_map, array_length, base::nullopt, + object_enum_length_intptr); CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_keys, elements, - object_enum_length, SKIP_WRITE_BARRIER); + object_enum_length_intptr, SKIP_WRITE_BARRIER); Return(array); } @@ -577,28 +582,32 @@ TF_BUILTIN(ObjectGetOwnPropertyNames, ObjectBuiltinsAssembler) { // Check whether there are enumerable properties. Branch(WordEqual(object_enum_length, IntPtrConstant(0)), &if_empty, &if_fast); + // TODO(solanes): These if_xxx here and below seem to be quite similar for + // ObjectKeys and for ObjectGetOwnPropertyNames. In particular, if_fast seem + // to be the exact same. BIND(&if_fast); { // The {object} has a usable enum cache and all own properties are // enumerable, use that. TNode object_descriptors = LoadMapDescriptors(object_map); - TNode object_enum_cache = CAST( - LoadObjectField(object_descriptors, DescriptorArray::kEnumCacheOffset)); + TNode object_enum_cache = LoadObjectField( + object_descriptors, DescriptorArray::kEnumCacheOffset); auto object_enum_keys = LoadObjectField( object_enum_cache, EnumCache::kKeysOffset); // Allocate a JSArray and copy the elements from the {object_enum_keys}. + TNode array; + TNode elements; TNode native_context = LoadNativeContext(context); TNode array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, native_context); - TNode array_length = SmiTag(Signed(object_enum_length)); - TNode array; - TNode elements; + TNode object_enum_length_intptr = Signed(object_enum_length); + TNode array_length = SmiTag(object_enum_length_intptr); std::tie(array, elements) = AllocateUninitializedJSArrayWithElements( - PACKED_ELEMENTS, array_map, array_length, {}, - Signed(object_enum_length)); + PACKED_ELEMENTS, array_map, array_length, base::nullopt, + object_enum_length_intptr); CopyFixedArrayElements(PACKED_ELEMENTS, object_enum_keys, elements, - object_enum_length, SKIP_WRITE_BARRIER); + object_enum_length_intptr, SKIP_WRITE_BARRIER); Return(array); } @@ -1155,6 +1164,18 @@ TF_BUILTIN(InstanceOf, ObjectBuiltinsAssembler) { Return(InstanceOf(object, callable, context)); } +TF_BUILTIN(InstanceOf_WithFeedback, ObjectBuiltinsAssembler) { + TNode object = CAST(Parameter(Descriptor::kLeft)); + TNode callable = CAST(Parameter(Descriptor::kRight)); + TNode context = CAST(Parameter(Descriptor::kContext)); + TNode maybe_feedback_vector = + CAST(Parameter(Descriptor::kMaybeFeedbackVector)); + TNode slot = UncheckedCast(Parameter(Descriptor::kSlot)); + + CollectInstanceOfFeedback(callable, context, maybe_feedback_vector, slot); + Return(InstanceOf(object, callable, context)); +} + // ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) TF_BUILTIN(OrdinaryHasInstance, ObjectBuiltinsAssembler) { TNode constructor = CAST(Parameter(Descriptor::kLeft)); diff --git a/deps/v8/src/builtins/builtins-promise.h b/deps/v8/src/builtins/builtins-promise.h index fd938ff8418d94..a775ea20411605 100644 --- a/deps/v8/src/builtins/builtins-promise.h +++ b/deps/v8/src/builtins/builtins-promise.h @@ -35,7 +35,7 @@ class PromiseBuiltins { kPromiseAllResolveElementCapabilitySlot, // Values array from Promise.all - kPromiseAllResolveElementValuesArraySlot, + kPromiseAllResolveElementValuesSlot, kPromiseAllResolveElementLength }; @@ -48,7 +48,7 @@ class PromiseBuiltins { kPromiseAnyRejectElementCapabilitySlot, // errors array from Promise.any - kPromiseAnyRejectElementErrorsArraySlot, + kPromiseAnyRejectElementErrorsSlot, kPromiseAnyRejectElementLength }; diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc index f398a6c28254d0..1e7b9c7057cfe2 100644 --- a/deps/v8/src/builtins/builtins-proxy-gen.cc +++ b/deps/v8/src/builtins/builtins-proxy-gen.cc @@ -62,9 +62,10 @@ TNode ProxiesCodeStubAssembler::AllocateProxy( TNode ProxiesCodeStubAssembler::CreateProxyRevokeFunctionContext( TNode proxy, TNode native_context) { - const TNode context = - AllocateSyntheticFunctionContext(native_context, kProxyContextLength); - StoreContextElementNoWriteBarrier(context, kProxySlot, proxy); + const TNode context = AllocateSyntheticFunctionContext( + native_context, ProxyRevokeFunctionContextSlot::kProxyContextLength); + StoreContextElementNoWriteBarrier( + context, ProxyRevokeFunctionContextSlot::kProxySlot, proxy); return context; } diff --git a/deps/v8/src/builtins/builtins-proxy-gen.h b/deps/v8/src/builtins/builtins-proxy-gen.h index e0ac0824590876..837f4d30aff359 100644 --- a/deps/v8/src/builtins/builtins-proxy-gen.h +++ b/deps/v8/src/builtins/builtins-proxy-gen.h @@ -33,7 +33,6 @@ class ProxiesCodeStubAssembler : public CodeStubAssembler { void CheckDeleteTrapResult(TNode context, TNode target, TNode proxy, TNode name); - protected: enum ProxyRevokeFunctionContextSlot { kProxySlot = Context::MIN_CONTEXT_SLOTS, kProxyContextLength, diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc index b9c1b8980ea8db..dace357d56a475 100644 --- a/deps/v8/src/builtins/builtins-regexp-gen.cc +++ b/deps/v8/src/builtins/builtins-regexp-gen.cc @@ -28,7 +28,15 @@ using compiler::Node; // static void Builtins::Generate_RegExpInterpreterTrampoline(MacroAssembler* masm) { ExternalReference interpreter_code_entry = - ExternalReference::re_match_for_call_from_js(masm->isolate()); + ExternalReference::re_match_for_call_from_js(); + masm->Jump(interpreter_code_entry); +} + +// Tail calls the experimental regular expression engine. +// static +void Builtins::Generate_RegExpExperimentalTrampoline(MacroAssembler* masm) { + ExternalReference interpreter_code_entry = + ExternalReference::re_experimental_match_for_call_from_js(); masm->Jump(interpreter_code_entry); } @@ -89,7 +97,7 @@ TNode RegExpBuiltinsAssembler::AllocateRegExpResult( const ElementsKind elements_kind = PACKED_ELEMENTS; TNode map = CAST(LoadContextElement(LoadNativeContext(context), Context::REGEXP_RESULT_MAP_INDEX)); - TNode no_allocation_site = {}; + base::Optional> no_allocation_site = base::nullopt; TNode length_intptr = SmiUntag(length); // Note: The returned `elements` may be in young large object space, but @@ -399,9 +407,9 @@ TNode RegExpBuiltinsAssembler::RegExpExecInternal( int32_t values[] = { JSRegExp::IRREGEXP, JSRegExp::ATOM, - JSRegExp::NOT_COMPILED, + JSRegExp::EXPERIMENTAL, }; - Label* labels[] = {&next, &atom, &runtime}; + Label* labels[] = {&next, &atom, &next}; STATIC_ASSERT(arraysize(values) == arraysize(labels)); Switch(tag, &unreachable, values, labels, arraysize(values)); @@ -1104,7 +1112,7 @@ TF_BUILTIN(RegExpConstructor, RegExpBuiltinsAssembler) { BIND(&allocate_generic); { ConstructorBuiltinsAssembler constructor_assembler(this->state()); - var_regexp = CAST(constructor_assembler.EmitFastNewObject( + var_regexp = CAST(constructor_assembler.FastNewObject( context, regexp_function, CAST(var_new_target.value()))); Goto(&next); } @@ -1354,9 +1362,7 @@ TNode RegExpBuiltinsAssembler::RegExpPrototypeSplitBody( const TNode int_limit = SmiUntag(limit); const ElementsKind kind = PACKED_ELEMENTS; - const ParameterMode mode = CodeStubAssembler::INTPTR_PARAMETERS; - TNode allocation_site = {}; const TNode native_context = LoadNativeContext(context); TNode array_map = LoadJSArrayElementsMap(kind, native_context); @@ -1396,6 +1402,7 @@ TNode RegExpBuiltinsAssembler::RegExpPrototypeSplitBody( { TNode length = SmiConstant(1); TNode capacity = IntPtrConstant(1); + base::Optional> allocation_site = base::nullopt; var_result = AllocateJSArray(kind, array_map, capacity, length, allocation_site); @@ -1508,10 +1515,10 @@ TNode RegExpBuiltinsAssembler::RegExpPrototypeSplitBody( const TNode reg = var_reg.value(); const TNode from = LoadFixedArrayElement( match_indices, reg, - RegExpMatchInfo::kFirstCaptureIndex * kTaggedSize, mode); + RegExpMatchInfo::kFirstCaptureIndex * kTaggedSize); const TNode to = CAST(LoadFixedArrayElement( match_indices, reg, - (RegExpMatchInfo::kFirstCaptureIndex + 1) * kTaggedSize, mode)); + (RegExpMatchInfo::kFirstCaptureIndex + 1) * kTaggedSize)); Label select_capture(this), select_undefined(this), store_value(this); TVARIABLE(Object, var_value); @@ -1570,6 +1577,7 @@ TNode RegExpBuiltinsAssembler::RegExpPrototypeSplitBody( { TNode length = SmiZero(); TNode capacity = IntPtrZero(); + base::Optional> allocation_site = base::nullopt; var_result = AllocateJSArray(kind, array_map, capacity, length, allocation_site); Goto(&done); diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc index 010bf965cc6a6e..26cf4fe159c3ff 100644 --- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc +++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc @@ -23,10 +23,10 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler { Node* base, Node* offset, Node* value, Node* value_high); - void ValidateSharedTypedArray(TNode maybe_array, - TNode context, - TNode* out_elements_kind, - TNode* out_backing_store); + TNode ValidateIntegerTypedArray( + TNode maybe_array, TNode context, + TNode* out_elements_kind, TNode* out_backing_store, + Label* detached); TNode ValidateAtomicAccess(TNode array, TNode index, @@ -38,7 +38,8 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler { void AtomicBinopBuiltinCommon(TNode maybe_array, TNode index, TNode value, TNode context, AssemblerFunction function, - Runtime::FunctionId runtime_function); + Runtime::FunctionId runtime_function, + const char* method_name); // Create a BigInt from the result of a 64-bit atomic operation, using // projections on 32-bit platforms. @@ -46,12 +47,17 @@ class SharedArrayBufferBuiltinsAssembler : public CodeStubAssembler { TNode BigIntFromUnsigned64(Node* unsigned64); }; -// https://tc39.es/ecma262/#sec-validatesharedintegertypedarray -void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray( +// https://tc39.es/ecma262/#sec-validateintegertypedarray +TNode +SharedArrayBufferBuiltinsAssembler::ValidateIntegerTypedArray( TNode maybe_array, TNode context, - TNode* out_elements_kind, TNode* out_backing_store) { + TNode* out_elements_kind, TNode* out_backing_store, + Label* detached) { Label not_float_or_clamped(this), invalid(this); + // The logic of TypedArrayBuiltinsAssembler::ValidateTypedArrayBuffer is + // inlined to avoid duplicate error branches. + // Fail if it is not a heap object. GotoIf(TaggedIsSmi(maybe_array), &invalid); @@ -60,10 +66,9 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray( GotoIfNot(IsJSTypedArrayMap(map), &invalid); TNode array = CAST(maybe_array); - // Fail if the array's JSArrayBuffer is not shared. - TNode array_buffer = LoadJSArrayBufferViewBuffer(array); - TNode bitfield = LoadJSArrayBufferBitField(array_buffer); - GotoIfNot(IsSetWord32(bitfield), &invalid); + // Fail if the array's JSArrayBuffer is detached. + TNode array_buffer = GetTypedArrayBuffer(context, array); + GotoIf(IsDetachedBuffer(array_buffer), detached); // Fail if the array's element type is float32, float64 or clamped. STATIC_ASSERT(INT8_ELEMENTS < FLOAT32_ELEMENTS); @@ -82,7 +87,7 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray( BIND(&invalid); { - ThrowTypeError(context, MessageTemplate::kNotIntegerSharedTypedArray, + ThrowTypeError(context, MessageTemplate::kNotIntegerTypedArray, maybe_array); } @@ -92,6 +97,8 @@ void SharedArrayBufferBuiltinsAssembler::ValidateSharedTypedArray( TNode backing_store = LoadJSArrayBufferBackingStorePtr(array_buffer); TNode byte_offset = LoadJSArrayBufferViewByteOffset(array); *out_backing_store = RawPtrAdd(backing_store, Signed(byte_offset)); + + return array_buffer; } // https://tc39.github.io/ecma262/#sec-validateatomicaccess @@ -100,24 +107,35 @@ TNode SharedArrayBufferBuiltinsAssembler::ValidateAtomicAccess( TNode array, TNode index, TNode context) { Label done(this), range_error(this); + // 1. Assert: typedArray is an Object that has a [[ViewedArrayBuffer]] + // internal slot. + // 2. Let length be typedArray.[[ArrayLength]]. + TNode array_length = LoadJSTypedArrayLength(array); + + // 3. Let accessIndex be ? ToIndex(requestIndex). TNode index_uintptr = ToIndex(context, index, &range_error); - TNode array_length = LoadJSTypedArrayLength(array); + // 4. Assert: accessIndex ≥ 0. + // 5. If accessIndex ≥ length, throw a RangeError exception. Branch(UintPtrLessThan(index_uintptr, array_length), &done, &range_error); BIND(&range_error); ThrowRangeError(context, MessageTemplate::kInvalidAtomicAccessIndex); + // 6. Return accessIndex. BIND(&done); return index_uintptr; } void SharedArrayBufferBuiltinsAssembler::DebugSanityCheckAtomicIndex( TNode array, TNode index) { - // In Debug mode, we re-validate the index as a sanity check because - // ToInteger above calls out to JavaScript. A SharedArrayBuffer can't be - // detached and the TypedArray length can't change either, so skipping this - // check in Release mode is safe. + // In Debug mode, we re-validate the index as a sanity check because ToInteger + // above calls out to JavaScript. Atomics work on ArrayBuffers, which may be + // detached, and detachment state must be checked and throw before this + // check. The length cannot change. + // + // This function must always be called after ValidateIntegerTypedArray, which + // will ensure that LoadJSArrayBufferViewBuffer will not be null. CSA_ASSERT(this, Word32BinaryNot( IsDetachedBuffer(LoadJSArrayBufferViewBuffer(array)))); CSA_ASSERT(this, UintPtrLessThan(index, LoadJSTypedArrayLength(array))); @@ -151,14 +169,27 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { TNode index = CAST(Parameter(Descriptor::kIndex)); TNode context = CAST(Parameter(Descriptor::kContext)); + // 1. Let buffer be ? ValidateIntegerTypedArray(typedArray). + Label detached(this); TNode elements_kind; TNode backing_store; - ValidateSharedTypedArray(maybe_array, context, &elements_kind, - &backing_store); + TNode array_buffer = ValidateIntegerTypedArray( + maybe_array, context, &elements_kind, &backing_store, &detached); TNode array = CAST(maybe_array); + // 2. Let i be ? ValidateAtomicAccess(typedArray, index). TNode index_word = ValidateAtomicAccess(array, index, context); + // 3. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. + // 4. NOTE: The above check is not redundant with the check in + // ValidateIntegerTypedArray because the call to ValidateAtomicAccess on the + // preceding line can have arbitrary side effects, which could cause the + // buffer to become detached. + GotoIf(IsDetachedBuffer(array_buffer), &detached); + + // Steps 5-10. + // + // (Not copied from ecma262 due to the axiomatic nature of the memory model.) Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this), i64(this), u64(this), other(this); int32_t case_values[] = { @@ -213,9 +244,16 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { Return(BigIntFromUnsigned64(AtomicLoad(MachineType::Uint64(), backing_store, WordShl(index_word, 3)))); #endif + // This shouldn't happen, we've already validated the type. BIND(&other); Unreachable(); + + BIND(&detached); + { + ThrowTypeError(context, MessageTemplate::kDetachedOperation, + "Atomics.load"); + } } // https://tc39.es/ecma262/#sec-atomics.store @@ -225,24 +263,43 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { TNode value = CAST(Parameter(Descriptor::kValue)); TNode context = CAST(Parameter(Descriptor::kContext)); + // 1. Let buffer be ? ValidateIntegerTypedArray(typedArray). + Label detached(this); TNode elements_kind; TNode backing_store; - ValidateSharedTypedArray(maybe_array, context, &elements_kind, - &backing_store); + TNode array_buffer = ValidateIntegerTypedArray( + maybe_array, context, &elements_kind, &backing_store, &detached); TNode array = CAST(maybe_array); + // 2. Let i be ? ValidateAtomicAccess(typedArray, index). TNode index_word = ValidateAtomicAccess(array, index, context); Label u8(this), u16(this), u32(this), u64(this), other(this); + + // 3. Let arrayTypeName be typedArray.[[TypedArrayName]]. + // 4. If arrayTypeName is "BigUint64Array" or "BigInt64Array", + // let v be ? ToBigInt(value). STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS); STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS); GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &u64); + // 5. Otherwise, let v be ? ToInteger(value). TNode value_integer = ToInteger_Inline(context, value); + + // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. + // 7. NOTE: The above check is not redundant with the check in + // ValidateIntegerTypedArray because the call to ToBigInt or ToInteger on the + // preceding lines can have arbitrary side effects, which could cause the + // buffer to become detached. + GotoIf(IsDetachedBuffer(array_buffer), &detached); + TNode value_word32 = TruncateTaggedToWord32(context, value_integer); DebugSanityCheckAtomicIndex(array, index_word); + // Steps 8-13. + // + // (Not copied from ecma262 due to the axiomatic nature of the memory model.) int32_t case_values[] = { INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS, UINT16_ELEMENTS, INT32_ELEMENTS, UINT32_ELEMENTS, @@ -272,8 +329,13 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { Return(CallRuntime(Runtime::kAtomicsStore64, context, array, index_number, value)); #else + // 4. If arrayTypeName is "BigUint64Array" or "BigInt64Array", + // let v be ? ToBigInt(value). TNode value_bigint = ToBigInt(context, value); + // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. + GotoIf(IsDetachedBuffer(array_buffer), &detached); + DebugSanityCheckAtomicIndex(array, index_word); TVARIABLE(UintPtrT, var_low); @@ -288,6 +350,12 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) { // This shouldn't happen, we've already validated the type. BIND(&other); Unreachable(); + + BIND(&detached); + { + ThrowTypeError(context, MessageTemplate::kDetachedOperation, + "Atomics.store"); + } } // https://tc39.es/ecma262/#sec-atomics.exchange @@ -297,15 +365,22 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { TNode value = CAST(Parameter(Descriptor::kValue)); TNode context = CAST(Parameter(Descriptor::kContext)); + // Inlines AtomicReadModifyWrite + // https://tc39.es/ecma262/#sec-atomicreadmodifywrite + + // 1. Let buffer be ? ValidateIntegerTypedArray(typedArray). + Label detached(this); TNode elements_kind; TNode backing_store; - ValidateSharedTypedArray(maybe_array, context, &elements_kind, - &backing_store); + TNode array_buffer = ValidateIntegerTypedArray( + maybe_array, context, &elements_kind, &backing_store, &detached); TNode array = CAST(maybe_array); + // 2. Let i be ? ValidateAtomicAccess(typedArray, index). TNode index_word = ValidateAtomicAccess(array, index, context); #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 + USE(array_buffer); TNode index_number = ChangeUintPtrToTagged(index_word); Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_number, value)); @@ -313,16 +388,30 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this), i64(this), u64(this), big(this), other(this); + + // 3. Let arrayTypeName be typedArray.[[TypedArrayName]]. + // 4. If typedArray.[[ContentType]] is BigInt, let v be ? ToBigInt(value). STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS); STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS); GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big); + // 5. Otherwise, let v be ? ToInteger(value). TNode value_integer = ToInteger_Inline(context, value); + // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. + // 7. NOTE: The above check is not redundant with the check in + // ValidateIntegerTypedArray because the call to ToBigInt or ToInteger on the + // preceding lines can have arbitrary side effects, which could cause the + // buffer to become detached. + GotoIf(IsDetachedBuffer(array_buffer), &detached); + DebugSanityCheckAtomicIndex(array, index_word); TNode value_word32 = TruncateTaggedToWord32(context, value_integer); + // Steps 8-12. + // + // (Not copied from ecma262 due to the axiomatic nature of the memory model.) int32_t case_values[] = { INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS, UINT16_ELEMENTS, INT32_ELEMENTS, UINT32_ELEMENTS, @@ -360,8 +449,12 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { WordShl(index_word, 2), value_word32))); BIND(&big); + // 4. If typedArray.[[ContentType]] is BigInt, let v be ? ToBigInt(value). TNode value_bigint = ToBigInt(context, value); + // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. + GotoIf(IsDetachedBuffer(array_buffer), &detached); + DebugSanityCheckAtomicIndex(array, index_word); TVARIABLE(UintPtrT, var_low); @@ -389,6 +482,12 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { BIND(&other); Unreachable(); #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 + + BIND(&detached); + { + ThrowTypeError(context, MessageTemplate::kDetachedOperation, + "Atomics.exchange"); + } } // https://tc39.es/ecma262/#sec-atomics.compareexchange @@ -399,29 +498,48 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { TNode new_value = CAST(Parameter(Descriptor::kNewValue)); TNode context = CAST(Parameter(Descriptor::kContext)); + // 1. Let buffer be ? ValidateIntegerTypedArray(typedArray). + Label detached(this); TNode elements_kind; TNode backing_store; - ValidateSharedTypedArray(maybe_array, context, &elements_kind, - &backing_store); + TNode array_buffer = ValidateIntegerTypedArray( + maybe_array, context, &elements_kind, &backing_store, &detached); TNode array = CAST(maybe_array); + // 2. Let i be ? ValidateAtomicAccess(typedArray, index). TNode index_word = ValidateAtomicAccess(array, index, context); #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X + USE(array_buffer); TNode index_number = ChangeUintPtrToTagged(index_word); Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array, index_number, old_value, new_value)); #else Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this), i64(this), u64(this), big(this), other(this); + + // 3. Let arrayTypeName be typedArray.[[TypedArrayName]]. + // 4. If typedArray.[[ContentType]] is BigInt, then + // a. Let expected be ? ToBigInt(expectedValue). + // b. Let replacement be ? ToBigInt(replacementValue). STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS); STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS); GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big); + // 5. Else, + // a. Let expected be ? ToInteger(expectedValue). + // b. Let replacement be ? ToInteger(replacementValue). TNode old_value_integer = ToInteger_Inline(context, old_value); TNode new_value_integer = ToInteger_Inline(context, new_value); + // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. + // 7. NOTE: The above check is not redundant with the check in + // ValidateIntegerTypedArray because the call to ToBigInt or ToInteger on the + // preceding lines can have arbitrary side effects, which could cause the + // buffer to become detached. + GotoIf(IsDetachedBuffer(array_buffer), &detached); + DebugSanityCheckAtomicIndex(array, index_word); TNode old_value_word32 = @@ -429,6 +547,9 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { TNode new_value_word32 = TruncateTaggedToWord32(context, new_value_integer); + // Steps 8-14. + // + // (Not copied from ecma262 due to the axiomatic nature of the memory model.) int32_t case_values[] = { INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS, UINT16_ELEMENTS, INT32_ELEMENTS, UINT32_ELEMENTS, @@ -470,9 +591,15 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { old_value_word32, new_value_word32))); BIND(&big); + // 4. If typedArray.[[ContentType]] is BigInt, then + // a. Let expected be ? ToBigInt(expectedValue). + // b. Let replacement be ? ToBigInt(replacementValue). TNode old_value_bigint = ToBigInt(context, old_value); TNode new_value_bigint = ToBigInt(context, new_value); + // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. + GotoIf(IsDetachedBuffer(array_buffer), &detached); + DebugSanityCheckAtomicIndex(array, index_word); TVARIABLE(UintPtrT, var_old_low); @@ -505,9 +632,15 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { Unreachable(); #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X + + BIND(&detached); + { + ThrowTypeError(context, MessageTemplate::kDetachedOperation, + "Atomics.store"); + } } -#define BINOP_BUILTIN(op) \ +#define BINOP_BUILTIN(op, method_name) \ TF_BUILTIN(Atomics##op, SharedArrayBufferBuiltinsAssembler) { \ TNode array = CAST(Parameter(Descriptor::kArray)); \ TNode index = CAST(Parameter(Descriptor::kIndex)); \ @@ -515,51 +648,68 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) { TNode context = CAST(Parameter(Descriptor::kContext)); \ AtomicBinopBuiltinCommon(array, index, value, context, \ &CodeAssembler::Atomic##op, \ - Runtime::kAtomics##op); \ + Runtime::kAtomics##op, method_name); \ } // https://tc39.es/ecma262/#sec-atomics.add -BINOP_BUILTIN(Add) +BINOP_BUILTIN(Add, "Atomics.add") // https://tc39.es/ecma262/#sec-atomics.sub -BINOP_BUILTIN(Sub) +BINOP_BUILTIN(Sub, "Atomics.sub") // https://tc39.es/ecma262/#sec-atomics.and -BINOP_BUILTIN(And) +BINOP_BUILTIN(And, "Atomics.and") // https://tc39.es/ecma262/#sec-atomics.or -BINOP_BUILTIN(Or) +BINOP_BUILTIN(Or, "Atomics.or") // https://tc39.es/ecma262/#sec-atomics.xor -BINOP_BUILTIN(Xor) +BINOP_BUILTIN(Xor, "Atomics.xor") #undef BINOP_BUILTIN // https://tc39.es/ecma262/#sec-atomicreadmodifywrite void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( TNode maybe_array, TNode index, TNode value, TNode context, AssemblerFunction function, - Runtime::FunctionId runtime_function) { + Runtime::FunctionId runtime_function, const char* method_name) { + // 1. Let buffer be ? ValidateIntegerTypedArray(typedArray). + Label detached(this); TNode elements_kind; TNode backing_store; - ValidateSharedTypedArray(maybe_array, context, &elements_kind, - &backing_store); + TNode array_buffer = ValidateIntegerTypedArray( + maybe_array, context, &elements_kind, &backing_store, &detached); TNode array = CAST(maybe_array); + // 2. Let i be ? ValidateAtomicAccess(typedArray, index). TNode index_word = ValidateAtomicAccess(array, index, context); #if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X + USE(array_buffer); TNode index_number = ChangeUintPtrToTagged(index_word); Return(CallRuntime(runtime_function, context, array, index_number, value)); #else Label i8(this), u8(this), i16(this), u16(this), i32(this), u32(this), i64(this), u64(this), big(this), other(this); + // 3. Let arrayTypeName be typedArray.[[TypedArrayName]]. + // 4. If typedArray.[[ContentType]] is BigInt, let v be ? ToBigInt(value). STATIC_ASSERT(BIGINT64_ELEMENTS > INT32_ELEMENTS); STATIC_ASSERT(BIGUINT64_ELEMENTS > INT32_ELEMENTS); GotoIf(Int32GreaterThan(elements_kind, Int32Constant(INT32_ELEMENTS)), &big); + // 5. Otherwise, let v be ? ToInteger(value). TNode value_integer = ToInteger_Inline(context, value); + // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. + // 7. NOTE: The above check is not redundant with the check in + // ValidateIntegerTypedArray because the call to ToBigInt or ToInteger on the + // preceding lines can have arbitrary side effects, which could cause the + // buffer to become detached. + GotoIf(IsDetachedBuffer(array_buffer), &detached); + DebugSanityCheckAtomicIndex(array, index_word); TNode value_word32 = TruncateTaggedToWord32(context, value_integer); + // Steps 8-12. + // + // (Not copied from ecma262 due to the axiomatic nature of the memory model.) int32_t case_values[] = { INT8_ELEMENTS, UINT8_ELEMENTS, INT16_ELEMENTS, UINT16_ELEMENTS, INT32_ELEMENTS, UINT32_ELEMENTS, @@ -599,8 +749,12 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( WordShl(index_word, 2), value_word32, nullptr))); BIND(&big); + // 4. If typedArray.[[ContentType]] is BigInt, let v be ? ToBigInt(value). TNode value_bigint = ToBigInt(context, value); + // 6. If IsDetachedBuffer(buffer) is true, throw a TypeError exception. + GotoIf(IsDetachedBuffer(array_buffer), &detached); + DebugSanityCheckAtomicIndex(array, index_word); TVARIABLE(UintPtrT, var_low); @@ -629,6 +783,9 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon( Unreachable(); #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 // || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X + + BIND(&detached); + ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name); } } // namespace internal diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc index f89bc25bdbe503..6d3a4d9e9c0d2e 100644 --- a/deps/v8/src/builtins/builtins-sharedarraybuffer.cc +++ b/deps/v8/src/builtins/builtins-sharedarraybuffer.cc @@ -20,9 +20,9 @@ namespace v8 { namespace internal { // See builtins-arraybuffer.cc for implementations of -// SharedArrayBuffer.prototye.byteLength and SharedArrayBuffer.prototype.slice +// SharedArrayBuffer.prototype.byteLength and SharedArrayBuffer.prototype.slice -// #sec-atomics.islockfree +// https://tc39.es/ecma262/#sec-atomics.islockfree inline bool AtomicIsLockFree(double size) { // According to the standard, 1, 2, and 4 byte atomics are supposed to be // 'lock free' on every platform. 'Lock free' means that all possible uses of @@ -39,7 +39,7 @@ inline bool AtomicIsLockFree(double size) { return size == 1 || size == 2 || size == 4 || size == 8; } -// ES #sec-atomics.islockfree +// https://tc39.es/ecma262/#sec-atomics.islockfree BUILTIN(AtomicsIsLockFree) { HandleScope scope(isolate); Handle size = args.atOrUndefined(isolate, 1); @@ -48,37 +48,45 @@ BUILTIN(AtomicsIsLockFree) { return *isolate->factory()->ToBoolean(AtomicIsLockFree(size->Number())); } -// ES #sec-validatesharedintegertypedarray -V8_WARN_UNUSED_RESULT MaybeHandle ValidateSharedIntegerTypedArray( - Isolate* isolate, Handle object, +// https://tc39.es/ecma262/#sec-validatesharedintegertypedarray +V8_WARN_UNUSED_RESULT MaybeHandle ValidateIntegerTypedArray( + Isolate* isolate, Handle object, const char* method_name, bool only_int32_and_big_int64 = false) { if (object->IsJSTypedArray()) { Handle typed_array = Handle::cast(object); - if (typed_array->GetBuffer()->is_shared()) { - if (only_int32_and_big_int64) { - if (typed_array->type() == kExternalInt32Array || - typed_array->type() == kExternalBigInt64Array) { - return typed_array; - } - } else { - if (typed_array->type() != kExternalFloat32Array && - typed_array->type() != kExternalFloat64Array && - typed_array->type() != kExternalUint8ClampedArray) - return typed_array; + + if (typed_array->WasDetached()) { + THROW_NEW_ERROR( + isolate, + NewTypeError( + MessageTemplate::kDetachedOperation, + isolate->factory()->NewStringFromAsciiChecked(method_name)), + JSTypedArray); + } + + if (only_int32_and_big_int64) { + if (typed_array->type() == kExternalInt32Array || + typed_array->type() == kExternalBigInt64Array) { + return typed_array; } + } else { + if (typed_array->type() != kExternalFloat32Array && + typed_array->type() != kExternalFloat64Array && + typed_array->type() != kExternalUint8ClampedArray) + return typed_array; } } THROW_NEW_ERROR( isolate, NewTypeError(only_int32_and_big_int64 - ? MessageTemplate::kNotInt32OrBigInt64SharedTypedArray - : MessageTemplate::kNotIntegerSharedTypedArray, + ? MessageTemplate::kNotInt32OrBigInt64TypedArray + : MessageTemplate::kNotIntegerTypedArray, object), JSTypedArray); } -// ES #sec-validateatomicaccess +// https://tc39.es/ecma262/#sec-validateatomicaccess // ValidateAtomicAccess( typedArray, requestIndex ) V8_WARN_UNUSED_RESULT Maybe ValidateAtomicAccess( Isolate* isolate, Handle typed_array, @@ -91,8 +99,9 @@ V8_WARN_UNUSED_RESULT Maybe ValidateAtomicAccess( Nothing()); size_t access_index; + size_t typed_array_length = typed_array->length(); if (!TryNumberToSize(*access_index_obj, &access_index) || - typed_array->WasDetached() || access_index >= typed_array->length()) { + access_index >= typed_array_length) { isolate->Throw(*isolate->factory()->NewRangeError( MessageTemplate::kInvalidAtomicAccessIndex)); return Nothing(); @@ -110,92 +119,88 @@ inline size_t GetAddress32(size_t index, size_t byte_offset) { return (index << 2) + byte_offset; } -MaybeHandle AtomicsWake(Isolate* isolate, Handle array, - Handle index, Handle count) { +} // namespace + +// ES #sec-atomics.notify +// Atomics.notify( typedArray, index, count ) +BUILTIN(AtomicsNotify) { + HandleScope scope(isolate); + Handle array = args.atOrUndefined(isolate, 1); + Handle index = args.atOrUndefined(isolate, 2); + Handle count = args.atOrUndefined(isolate, 3); + Handle sta; - ASSIGN_RETURN_ON_EXCEPTION( - isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true), - Object); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, sta, + ValidateIntegerTypedArray(isolate, array, "Atomics.notify", true)); + // 2. Let i be ? ValidateAtomicAccess(typedArray, index). Maybe maybe_index = ValidateAtomicAccess(isolate, sta, index); - MAYBE_RETURN_NULL(maybe_index); + if (maybe_index.IsNothing()) return ReadOnlyRoots(isolate).exception(); size_t i = maybe_index.FromJust(); + // 3. If count is undefined, let c be +∞. + // 4. Else, + // a. Let intCount be ? ToInteger(count). + // b. Let c be max(intCount, 0). uint32_t c; if (count->IsUndefined(isolate)) { c = kMaxUInt32; } else { - ASSIGN_RETURN_ON_EXCEPTION(isolate, count, - Object::ToInteger(isolate, count), Object); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, count, + Object::ToInteger(isolate, count)); double count_double = count->Number(); - if (count_double < 0) + if (count_double < 0) { count_double = 0; - else if (count_double > kMaxUInt32) + } else if (count_double > kMaxUInt32) { count_double = kMaxUInt32; + } c = static_cast(count_double); } + // Steps 5-9 performed in FutexEmulation::Wake. + + // 10. If IsSharedArrayBuffer(buffer) is false, return 0. Handle array_buffer = sta->GetBuffer(); + size_t wake_addr; + if (V8_UNLIKELY(!sta->GetBuffer()->is_shared())) { + return Smi::FromInt(0); + } + + // Steps 11-17 performed in FutexEmulation::Wake. if (sta->type() == kExternalBigInt64Array) { - return Handle( - FutexEmulation::Wake(array_buffer, GetAddress64(i, sta->byte_offset()), - c), - isolate); + wake_addr = GetAddress64(i, sta->byte_offset()); } else { DCHECK(sta->type() == kExternalInt32Array); - return Handle( - FutexEmulation::Wake(array_buffer, GetAddress32(i, sta->byte_offset()), - c), - isolate); + wake_addr = GetAddress32(i, sta->byte_offset()); } + return FutexEmulation::Wake(array_buffer, wake_addr, c); } -} // namespace - -// ES #sec-atomics.wake -// Atomics.wake( typedArray, index, count ) -BUILTIN(AtomicsWake) { - HandleScope scope(isolate); - Handle array = args.atOrUndefined(isolate, 1); - Handle index = args.atOrUndefined(isolate, 2); - Handle count = args.atOrUndefined(isolate, 3); - - isolate->CountUsage(v8::Isolate::UseCounterFeature::kAtomicsWake); - RETURN_RESULT_OR_FAILURE(isolate, AtomicsWake(isolate, array, index, count)); -} - -// ES #sec-atomics.notify -// Atomics.notify( typedArray, index, count ) -BUILTIN(AtomicsNotify) { - HandleScope scope(isolate); - Handle array = args.atOrUndefined(isolate, 1); - Handle index = args.atOrUndefined(isolate, 2); - Handle count = args.atOrUndefined(isolate, 3); - - isolate->CountUsage(v8::Isolate::UseCounterFeature::kAtomicsNotify); - RETURN_RESULT_OR_FAILURE(isolate, AtomicsWake(isolate, array, index, count)); -} - -// ES #sec-atomics.wait -// Atomics.wait( typedArray, index, value, timeout ) -BUILTIN(AtomicsWait) { - HandleScope scope(isolate); - Handle array = args.atOrUndefined(isolate, 1); - Handle index = args.atOrUndefined(isolate, 2); - Handle value = args.atOrUndefined(isolate, 3); - Handle timeout = args.atOrUndefined(isolate, 4); - +Object DoWait(Isolate* isolate, FutexEmulation::WaitMode mode, + Handle array, Handle index, Handle value, + Handle timeout) { + // 1. Let buffer be ? ValidateIntegerTypedArray(typedArray, true). Handle sta; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, sta, ValidateSharedIntegerTypedArray(isolate, array, true)); + isolate, sta, + ValidateIntegerTypedArray(isolate, array, "Atomics.wait", true)); + + // 2. If IsSharedArrayBuffer(buffer) is false, throw a TypeError exception. + if (V8_UNLIKELY(!sta->GetBuffer()->is_shared())) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kNotSharedTypedArray, array)); + } + // 3. Let i be ? ValidateAtomicAccess(typedArray, index). Maybe maybe_index = ValidateAtomicAccess(isolate, sta, index); if (maybe_index.IsNothing()) return ReadOnlyRoots(isolate).exception(); size_t i = maybe_index.FromJust(); - // According to the spec, we have to check value's type before - // looking at the timeout. + // 4. Let arrayTypeName be typedArray.[[TypedArrayName]]. + // 5. If arrayTypeName is "BigInt64Array", let v be ? ToBigInt64(value). + // 6. Otherwise, let v be ? ToInt32(value). if (sta->type() == kExternalBigInt64Array) { ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, value, BigInt::FromObject(isolate, value)); @@ -205,6 +210,8 @@ BUILTIN(AtomicsWait) { Object::ToInt32(isolate, value)); } + // 7. Let q be ? ToNumber(timeout). + // 8. If q is NaN, let t be +∞, else let t be max(q, 0). double timeout_number; if (timeout->IsUndefined(isolate)) { timeout_number = ReadOnlyRoots(isolate).infinity_value().Number(); @@ -218,7 +225,11 @@ BUILTIN(AtomicsWait) { timeout_number = 0; } - if (!isolate->allow_atomics_wait()) { + // 9. If mode is sync, then + // a. Let B be AgentCanSuspend(). + // b. If B is false, throw a TypeError exception. + if (mode == FutexEmulation::WaitMode::kSync && + !isolate->allow_atomics_wait()) { THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewTypeError(MessageTemplate::kAtomicsWaitNotAllowed)); } @@ -227,15 +238,39 @@ BUILTIN(AtomicsWait) { if (sta->type() == kExternalBigInt64Array) { return FutexEmulation::WaitJs64( - isolate, array_buffer, GetAddress64(i, sta->byte_offset()), + isolate, mode, array_buffer, GetAddress64(i, sta->byte_offset()), Handle::cast(value)->AsInt64(), timeout_number); } else { DCHECK(sta->type() == kExternalInt32Array); - return FutexEmulation::WaitJs32(isolate, array_buffer, + return FutexEmulation::WaitJs32(isolate, mode, array_buffer, GetAddress32(i, sta->byte_offset()), NumberToInt32(*value), timeout_number); } } +// https://tc39.es/ecma262/#sec-atomics.wait +// Atomics.wait( typedArray, index, value, timeout ) +BUILTIN(AtomicsWait) { + HandleScope scope(isolate); + Handle array = args.atOrUndefined(isolate, 1); + Handle index = args.atOrUndefined(isolate, 2); + Handle value = args.atOrUndefined(isolate, 3); + Handle timeout = args.atOrUndefined(isolate, 4); + + return DoWait(isolate, FutexEmulation::WaitMode::kSync, array, index, value, + timeout); +} + +BUILTIN(AtomicsWaitAsync) { + HandleScope scope(isolate); + Handle array = args.atOrUndefined(isolate, 1); + Handle index = args.atOrUndefined(isolate, 2); + Handle value = args.atOrUndefined(isolate, 3); + Handle timeout = args.atOrUndefined(isolate, 4); + + return DoWait(isolate, FutexEmulation::WaitMode::kAsync, array, index, value, + timeout); +} + } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc index 7ccb99792eda55..9920369136ad43 100644 --- a/deps/v8/src/builtins/builtins-string-gen.cc +++ b/deps/v8/src/builtins/builtins-string-gen.cc @@ -1164,10 +1164,11 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol( DescriptorIndexNameValue additional_property_to_check, const NodeFunction0& regexp_call, const NodeFunction1& generic_call) { Label out(this); + Label get_property_lookup(this); - // Smis definitely don't have an attached symbol. - GotoIf(TaggedIsSmi(object), &out); - TNode heap_object = CAST(object); + // Smis have to go through the GetProperty lookup in case Number.prototype or + // Object.prototype was modified. + GotoIf(TaggedIsSmi(object), &get_property_lookup); // Take the fast path for RegExps. // There's two conditions: {object} needs to be a fast regexp, and @@ -1176,6 +1177,8 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol( { Label stub_call(this), slow_lookup(this); + TNode heap_object = CAST(object); + GotoIf(TaggedIsSmi(maybe_string), &slow_lookup); GotoIfNot(IsString(CAST(maybe_string)), &slow_lookup); @@ -1196,10 +1199,10 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol( regexp_call(); BIND(&slow_lookup); + // Special case null and undefined to skip the property lookup. + Branch(IsNullOrUndefined(heap_object), &out, &get_property_lookup); } - GotoIf(IsNullOrUndefined(heap_object), &out); - // Fall back to a slow lookup of {heap_object[symbol]}. // // The spec uses GetMethod({heap_object}, {symbol}), which has a few quirks: @@ -1208,7 +1211,8 @@ void StringBuiltinsAssembler::MaybeCallFunctionAtSymbol( // We handle the former by jumping to {out} for null values as well, while // the latter is already handled by the Call({maybe_func}) operation. - const TNode maybe_func = GetProperty(context, heap_object, symbol); + BIND(&get_property_lookup); + const TNode maybe_func = GetProperty(context, object, symbol); GotoIf(IsUndefined(maybe_func), &out); GotoIf(IsNull(maybe_func), &out); diff --git a/deps/v8/src/builtins/builtins-string.cc b/deps/v8/src/builtins/builtins-string.cc index 8a897765c83ab5..df5ba93a59ebcb 100644 --- a/deps/v8/src/builtins/builtins-string.cc +++ b/deps/v8/src/builtins/builtins-string.cc @@ -40,14 +40,16 @@ bool IsValidCodePoint(Isolate* isolate, Handle value) { return true; } +static constexpr uc32 kInvalidCodePoint = static_cast(-1); + uc32 NextCodePoint(Isolate* isolate, BuiltinArguments args, int index) { Handle value = args.at(1 + index); - ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, value, - Object::ToNumber(isolate, value), -1); + ASSIGN_RETURN_ON_EXCEPTION_VALUE( + isolate, value, Object::ToNumber(isolate, value), kInvalidCodePoint); if (!IsValidCodePoint(isolate, value)) { isolate->Throw(*isolate->factory()->NewRangeError( MessageTemplate::kInvalidCodePoint, value)); - return -1; + return kInvalidCodePoint; } return DoubleToUint32(value->Number()); } @@ -69,7 +71,7 @@ BUILTIN(StringFromCodePoint) { int index; for (index = 0; index < length; index++) { code = NextCodePoint(isolate, args, index); - if (code < 0) { + if (code == kInvalidCodePoint) { return ReadOnlyRoots(isolate).exception(); } if (code > String::kMaxOneByteCharCode) { @@ -99,7 +101,7 @@ BUILTIN(StringFromCodePoint) { break; } code = NextCodePoint(isolate, args, index); - if (code < 0) { + if (code == kInvalidCodePoint) { return ReadOnlyRoots(isolate).exception(); } } diff --git a/deps/v8/src/builtins/builtins-string.tq b/deps/v8/src/builtins/builtins-string.tq index a4edc94418cb1a..816cbb4c11b1e8 100644 --- a/deps/v8/src/builtins/builtins-string.tq +++ b/deps/v8/src/builtins/builtins-string.tq @@ -5,6 +5,43 @@ #include 'src/builtins/builtins-string-gen.h' namespace string { + +// TODO(bbudge) Remove the 'RT' suffix on this runtime function. +extern transitioning runtime ToStringRT(Context, JSAny): String; + +@export +transitioning macro ToStringImpl(context: Context, o: JSAny): String { + let result: JSAny = o; + while (true) { + typeswitch (result) { + case (num: Number): { + return NumberToString(num); + } + case (str: String): { + return str; + } + case (oddball: Oddball): { + return oddball.to_string; + } + case (JSReceiver): { + result = NonPrimitiveToPrimitive_String(context, result); + continue; + } + case (Symbol): { + ThrowTypeError(MessageTemplate::kSymbolToString); + } + case (JSAny): { + return ToStringRT(context, o); + } + } + } + unreachable; +} + +transitioning builtin ToString(context: Context, o: JSAny): String { + return ToStringImpl(context, o); +} + extern macro StringBuiltinsAssembler::SubString( String, uintptr, uintptr): String; @@ -80,7 +117,7 @@ IfInBounds(String, uintptr, uintptr), IfOutOfBounds { // check. const kMaxStringLengthFitsSmi: constexpr bool = kStringMaxLengthUintptr < kSmiMaxValue; - StaticAssert(kMaxStringLengthFitsSmi); + static_assert(kMaxStringLengthFitsSmi); if (index >= length) goto IfOutOfBounds; goto IfInBounds(string, index, length); } diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc index a6d3887ad31fa0..26c67cfc1272ef 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.cc +++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc @@ -68,10 +68,8 @@ TNode TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer( StoreJSArrayBufferBackingStore( buffer, EncodeExternalPointer(ReinterpretCast(IntPtrConstant(0)))); - if (V8_ARRAY_BUFFER_EXTENSION_BOOL) { StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset, IntPtrConstant(0)); - } for (int offset = JSArrayBuffer::kHeaderSize; offset < JSArrayBuffer::kSizeWithEmbedderFields; offset += kTaggedSize) { StoreObjectFieldNoWriteBarrier(buffer, offset, SmiConstant(0)); @@ -233,28 +231,6 @@ TNode TypedArrayBuiltinsAssembler::GetDefaultConstructor( LoadContextElement(LoadNativeContext(context), context_slot.value())); } -TNode TypedArrayBuiltinsAssembler::GetBuffer( - TNode context, TNode array) { - Label call_runtime(this), done(this); - TVARIABLE(Object, var_result); - - TNode buffer = LoadJSArrayBufferViewBuffer(array); - GotoIf(IsDetachedBuffer(buffer), &call_runtime); - TNode backing_store = LoadJSArrayBufferBackingStorePtr(buffer); - GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime); - var_result = buffer; - Goto(&done); - - BIND(&call_runtime); - { - var_result = CallRuntime(Runtime::kTypedArrayGetBuffer, context, array); - Goto(&done); - } - - BIND(&done); - return CAST(var_result.value()); -} - TNode TypedArrayBuiltinsAssembler::ValidateTypedArray( TNode context, TNode obj, const char* method_name) { // If it is not a typed array, throw @@ -505,49 +481,5 @@ TF_BUILTIN(TypedArrayPrototypeToStringTag, TypedArrayBuiltinsAssembler) { BIND(&return_undefined); Return(UndefinedConstant()); } - -void TypedArrayBuiltinsAssembler::GenerateTypedArrayPrototypeIterationMethod( - TNode context, TNode receiver, const char* method_name, - IterationKind kind) { - Label throw_bad_receiver(this, Label::kDeferred); - - GotoIf(TaggedIsSmi(receiver), &throw_bad_receiver); - GotoIfNot(IsJSTypedArray(CAST(receiver)), &throw_bad_receiver); - - // Check if the {receiver}'s JSArrayBuffer was detached. - ThrowIfArrayBufferViewBufferIsDetached(context, CAST(receiver), method_name); - - Return(CreateArrayIterator(context, receiver, kind)); - - BIND(&throw_bad_receiver); - ThrowTypeError(context, MessageTemplate::kNotTypedArray, method_name); -} - -// ES #sec-%typedarray%.prototype.values -TF_BUILTIN(TypedArrayPrototypeValues, TypedArrayBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode receiver = CAST(Parameter(Descriptor::kReceiver)); - GenerateTypedArrayPrototypeIterationMethod(context, receiver, - "%TypedArray%.prototype.values()", - IterationKind::kValues); -} - -// ES #sec-%typedarray%.prototype.entries -TF_BUILTIN(TypedArrayPrototypeEntries, TypedArrayBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode receiver = CAST(Parameter(Descriptor::kReceiver)); - GenerateTypedArrayPrototypeIterationMethod(context, receiver, - "%TypedArray%.prototype.entries()", - IterationKind::kEntries); -} - -// ES #sec-%typedarray%.prototype.keys -TF_BUILTIN(TypedArrayPrototypeKeys, TypedArrayBuiltinsAssembler) { - TNode context = CAST(Parameter(Descriptor::kContext)); - TNode receiver = CAST(Parameter(Descriptor::kReceiver)); - GenerateTypedArrayPrototypeIterationMethod( - context, receiver, "%TypedArray%.prototype.keys()", IterationKind::kKeys); -} - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h index 1008b6bdd7343c..780c36123e05a3 100644 --- a/deps/v8/src/builtins/builtins-typed-array-gen.h +++ b/deps/v8/src/builtins/builtins-typed-array-gen.h @@ -16,11 +16,6 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { explicit TypedArrayBuiltinsAssembler(compiler::CodeAssemblerState* state) : CodeStubAssembler(state) {} - void GenerateTypedArrayPrototypeIterationMethod(TNode context, - TNode receiver, - const char* method_name, - IterationKind iteration_kind); - void SetupTypedArrayEmbedderFields(TNode holder); void AttachBuffer(TNode holder, TNode buffer, TNode map, TNode length, @@ -50,9 +45,6 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler { TNode GetDefaultConstructor(TNode context, TNode exemplar); - TNode GetBuffer(TNode context, - TNode array); - TNode ValidateTypedArray(TNode context, TNode obj, const char* method_name); diff --git a/deps/v8/src/builtins/builtins-wasm-gen.cc b/deps/v8/src/builtins/builtins-wasm-gen.cc index 28efa39c67dc10..d4e92d165de7c4 100644 --- a/deps/v8/src/builtins/builtins-wasm-gen.cc +++ b/deps/v8/src/builtins/builtins-wasm-gen.cc @@ -37,12 +37,10 @@ TNode WasmBuiltinsAssembler::LoadExternalFunctionsFromInstance( instance, WasmInstanceObject::kWasmExternalFunctionsOffset); } -TNode WasmBuiltinsAssembler::SmiFromUint32WithSaturation( - TNode value, uint32_t max) { - DCHECK_LE(max, static_cast(Smi::kMaxValue)); - TNode capped_value = SelectConstant( - Uint32LessThan(value, Uint32Constant(max)), value, Uint32Constant(max)); - return SmiFromUint32(capped_value); +TNode WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstance( + TNode instance) { + return LoadObjectField( + instance, WasmInstanceObject::kManagedObjectMapsOffset); } TF_BUILTIN(WasmFloat32ToNumber, WasmBuiltinsAssembler) { @@ -55,22 +53,6 @@ TF_BUILTIN(WasmFloat64ToNumber, WasmBuiltinsAssembler) { Return(ChangeFloat64ToTagged(val)); } -TF_BUILTIN(WasmAtomicNotify, WasmBuiltinsAssembler) { - TNode address = - UncheckedCast(Parameter(Descriptor::kAddress)); - TNode count = UncheckedCast(Parameter(Descriptor::kCount)); - - TNode instance = LoadInstanceFromFrame(); - TNode address_number = ChangeUint32ToTagged(address); - TNode count_number = ChangeUint32ToTagged(count); - TNode context = LoadContextFromInstance(instance); - - TNode result_smi = - CAST(CallRuntime(Runtime::kWasmAtomicNotify, context, instance, - address_number, count_number)); - Return(Unsigned(SmiToInt32(result_smi))); -} - TF_BUILTIN(WasmI32AtomicWait32, WasmBuiltinsAssembler) { if (!Is32()) { Unreachable(); @@ -100,33 +82,6 @@ TF_BUILTIN(WasmI32AtomicWait32, WasmBuiltinsAssembler) { Return(Unsigned(SmiToInt32(result_smi))); } -TF_BUILTIN(WasmI32AtomicWait64, WasmBuiltinsAssembler) { - if (!Is64()) { - Unreachable(); - return; - } - - TNode address = - UncheckedCast(Parameter(Descriptor::kAddress)); - TNode address_number = ChangeUint32ToTagged(address); - - TNode expected_value = - UncheckedCast(Parameter(Descriptor::kExpectedValue)); - TNode expected_value_number = ChangeInt32ToTagged(expected_value); - - TNode timeout_raw = - UncheckedCast(Parameter(Descriptor::kTimeout)); - TNode timeout = BigIntFromInt64(timeout_raw); - - TNode instance = LoadInstanceFromFrame(); - TNode context = LoadContextFromInstance(instance); - - TNode result_smi = - CAST(CallRuntime(Runtime::kWasmI32AtomicWait, context, instance, - address_number, expected_value_number, timeout)); - Return(Unsigned(SmiToInt32(result_smi))); -} - TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) { if (!Is32()) { Unreachable(); @@ -159,101 +114,10 @@ TF_BUILTIN(WasmI64AtomicWait32, WasmBuiltinsAssembler) { Return(Unsigned(SmiToInt32(result_smi))); } -TF_BUILTIN(WasmI64AtomicWait64, WasmBuiltinsAssembler) { - if (!Is64()) { - Unreachable(); - return; - } - - TNode address = - UncheckedCast(Parameter(Descriptor::kAddress)); - TNode address_number = ChangeUint32ToTagged(address); - - TNode expected_value_raw = - UncheckedCast(Parameter(Descriptor::kExpectedValue)); - TNode expected_value = BigIntFromInt64(expected_value_raw); - - TNode timeout_raw = - UncheckedCast(Parameter(Descriptor::kTimeout)); - TNode timeout = BigIntFromInt64(timeout_raw); - - TNode instance = LoadInstanceFromFrame(); - TNode context = LoadContextFromInstance(instance); - - TNode result_smi = - CAST(CallRuntime(Runtime::kWasmI64AtomicWait, context, instance, - address_number, expected_value, timeout)); - Return(Unsigned(SmiToInt32(result_smi))); -} - -TF_BUILTIN(WasmTableInit, WasmBuiltinsAssembler) { - TNode dst_raw = - UncheckedCast(Parameter(Descriptor::kDestination)); - // We cap {dst}, {src}, and {size} by {wasm::kV8MaxWasmTableSize + 1} to make - // sure that the values fit into a Smi. - STATIC_ASSERT(static_cast(Smi::kMaxValue) >= - wasm::kV8MaxWasmTableSize + 1); - constexpr uint32_t kCap = - static_cast(wasm::kV8MaxWasmTableSize + 1); - TNode dst = SmiFromUint32WithSaturation(dst_raw, kCap); - TNode src_raw = - UncheckedCast(Parameter(Descriptor::kSource)); - TNode src = SmiFromUint32WithSaturation(src_raw, kCap); - TNode size_raw = - UncheckedCast(Parameter(Descriptor::kSize)); - TNode size = SmiFromUint32WithSaturation(size_raw, kCap); - TNode table_index = - UncheckedCast(Parameter(Descriptor::kTableIndex)); - TNode segment_index = - UncheckedCast(Parameter(Descriptor::kSegmentIndex)); - TNode instance = LoadInstanceFromFrame(); - TNode context = LoadContextFromInstance(instance); - - TailCallRuntime(Runtime::kWasmTableInit, context, instance, table_index, - segment_index, dst, src, size); -} - -TF_BUILTIN(WasmTableCopy, WasmBuiltinsAssembler) { - // We cap {dst}, {src}, and {size} by {wasm::kV8MaxWasmTableSize + 1} to make - // sure that the values fit into a Smi. - STATIC_ASSERT(static_cast(Smi::kMaxValue) >= - wasm::kV8MaxWasmTableSize + 1); - constexpr uint32_t kCap = - static_cast(wasm::kV8MaxWasmTableSize + 1); - - TNode dst_raw = - UncheckedCast(Parameter(Descriptor::kDestination)); - TNode dst = SmiFromUint32WithSaturation(dst_raw, kCap); - - TNode src_raw = - UncheckedCast(Parameter(Descriptor::kSource)); - TNode src = SmiFromUint32WithSaturation(src_raw, kCap); - - TNode size_raw = - UncheckedCast(Parameter(Descriptor::kSize)); - TNode size = SmiFromUint32WithSaturation(size_raw, kCap); - - TNode dst_table = - UncheckedCast(Parameter(Descriptor::kDestinationTable)); - - TNode src_table = - UncheckedCast(Parameter(Descriptor::kSourceTable)); - - TNode instance = LoadInstanceFromFrame(); - TNode context = LoadContextFromInstance(instance); - - TailCallRuntime(Runtime::kWasmTableCopy, context, instance, dst_table, - src_table, dst, src, size); -} - -TF_BUILTIN(WasmAllocateArray, WasmBuiltinsAssembler) { - TNode instance = LoadInstanceFromFrame(); - TNode map_index = CAST(Parameter(Descriptor::kMapIndex)); +TF_BUILTIN(WasmAllocateArrayWithRtt, WasmBuiltinsAssembler) { + TNode map = CAST(Parameter(Descriptor::kMap)); TNode length = CAST(Parameter(Descriptor::kLength)); TNode element_size = CAST(Parameter(Descriptor::kElementSize)); - TNode maps_list = LoadObjectField( - instance, WasmInstanceObject::kManagedObjectMapsOffset); - TNode map = CAST(LoadFixedArrayElement(maps_list, map_index)); TNode untagged_length = SmiUntag(length); // instance_size = WasmArray::kHeaderSize // + RoundUp(element_size * length, kObjectAlignment) @@ -270,18 +134,5 @@ TF_BUILTIN(WasmAllocateArray, WasmBuiltinsAssembler) { Return(result); } -TF_BUILTIN(WasmAllocateStruct, WasmBuiltinsAssembler) { - TNode instance = LoadInstanceFromFrame(); - TNode map_index = CAST(Parameter(Descriptor::kMapIndex)); - TNode maps_list = LoadObjectField( - instance, WasmInstanceObject::kManagedObjectMapsOffset); - TNode map = CAST(LoadFixedArrayElement(maps_list, map_index)); - TNode instance_size = - TimesTaggedSize(LoadMapInstanceSizeInWords(map)); - TNode result = UncheckedCast(Allocate(instance_size)); - StoreMap(result, map); - Return(result); -} - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins-wasm-gen.h b/deps/v8/src/builtins/builtins-wasm-gen.h index 3740560666d5ae..ccf5bae7a150fe 100644 --- a/deps/v8/src/builtins/builtins-wasm-gen.h +++ b/deps/v8/src/builtins/builtins-wasm-gen.h @@ -25,8 +25,8 @@ class WasmBuiltinsAssembler : public CodeStubAssembler { TNode LoadExternalFunctionsFromInstance( TNode instance); - protected: - TNode SmiFromUint32WithSaturation(TNode value, uint32_t max); + TNode LoadManagedObjectMapsFromInstance( + TNode instance); }; } // namespace internal diff --git a/deps/v8/src/builtins/builtins-weak-refs.cc b/deps/v8/src/builtins/builtins-weak-refs.cc index d5cceda4541f89..aee330b4bd4f3c 100644 --- a/deps/v8/src/builtins/builtins-weak-refs.cc +++ b/deps/v8/src/builtins/builtins-weak-refs.cc @@ -9,89 +9,6 @@ namespace v8 { namespace internal { -BUILTIN(FinalizationRegistryConstructor) { - HandleScope scope(isolate); - Handle target = args.target(); - if (args.new_target()->IsUndefined(isolate)) { // [[Call]] - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kConstructorNotFunction, - handle(target->shared().Name(), isolate))); - } - // [[Construct]] - Handle new_target = Handle::cast(args.new_target()); - Handle cleanup = args.atOrUndefined(isolate, 1); - - if (!cleanup->IsCallable()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kWeakRefsCleanupMustBeCallable)); - } - - Handle result; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, result, - JSObject::New(target, new_target, Handle::null())); - - Handle finalization_registry = - Handle::cast(result); - finalization_registry->set_native_context(*isolate->native_context()); - finalization_registry->set_cleanup(*cleanup); - finalization_registry->set_flags( - JSFinalizationRegistry::ScheduledForCleanupBit::encode(false)); - - DCHECK(finalization_registry->active_cells().IsUndefined(isolate)); - DCHECK(finalization_registry->cleared_cells().IsUndefined(isolate)); - DCHECK(finalization_registry->key_map().IsUndefined(isolate)); - return *finalization_registry; -} - -BUILTIN(FinalizationRegistryRegister) { - HandleScope scope(isolate); - const char* method_name = "FinalizationRegistry.prototype.register"; - - // 1. Let finalizationGroup be the this value. - // - // 2. If Type(finalizationGroup) is not Object, throw a TypeError - // exception. - // - // 4. If finalizationGroup does not have a [[Cells]] internal slot, - // throw a TypeError exception. - CHECK_RECEIVER(JSFinalizationRegistry, finalization_registry, method_name); - - Handle target = args.atOrUndefined(isolate, 1); - - // 3. If Type(target) is not Object, throw a TypeError exception. - if (!target->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, - NewTypeError(MessageTemplate::kWeakRefsRegisterTargetMustBeObject)); - } - - Handle holdings = args.atOrUndefined(isolate, 2); - if (target->SameValue(*holdings)) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, - NewTypeError( - MessageTemplate::kWeakRefsRegisterTargetAndHoldingsMustNotBeSame)); - } - - Handle unregister_token = args.atOrUndefined(isolate, 3); - - // 5. If Type(unregisterToken) is not Object, - // a. If unregisterToken is not undefined, throw a TypeError exception. - if (!unregister_token->IsJSReceiver() && !unregister_token->IsUndefined()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, - NewTypeError(MessageTemplate::kWeakRefsUnregisterTokenMustBeObject, - unregister_token)); - } - // TODO(marja): Realms. - - JSFinalizationRegistry::Register(finalization_registry, - Handle::cast(target), holdings, - unregister_token, isolate); - return ReadOnlyRoots(isolate).undefined_value(); -} - BUILTIN(FinalizationRegistryUnregister) { HandleScope scope(isolate); const char* method_name = "FinalizationRegistry.prototype.unregister"; @@ -122,53 +39,5 @@ BUILTIN(FinalizationRegistryUnregister) { return *isolate->factory()->ToBoolean(success); } -BUILTIN(WeakRefConstructor) { - HandleScope scope(isolate); - Handle target = args.target(); - if (args.new_target()->IsUndefined(isolate)) { // [[Call]] - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kConstructorNotFunction, - handle(target->shared().Name(), isolate))); - } - // [[Construct]] - Handle new_target = Handle::cast(args.new_target()); - Handle target_object = args.atOrUndefined(isolate, 1); - if (!target_object->IsJSReceiver()) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, - NewTypeError( - MessageTemplate::kWeakRefsWeakRefConstructorTargetMustBeObject)); - } - Handle target_receiver = - handle(JSReceiver::cast(*target_object), isolate); - isolate->heap()->KeepDuringJob(target_receiver); - - // TODO(marja): Realms. - - Handle result; - ASSIGN_RETURN_FAILURE_ON_EXCEPTION( - isolate, result, - JSObject::New(target, new_target, Handle::null())); - - Handle weak_ref = Handle::cast(result); - weak_ref->set_target(*target_receiver); - return *weak_ref; -} - -BUILTIN(WeakRefDeref) { - HandleScope scope(isolate); - CHECK_RECEIVER(JSWeakRef, weak_ref, "WeakRef.prototype.deref"); - if (weak_ref->target().IsJSReceiver()) { - Handle target = - handle(JSReceiver::cast(weak_ref->target()), isolate); - // KeepDuringJob might allocate and cause a GC, but it won't clear - // weak_ref since we hold a Handle to its target. - isolate->heap()->KeepDuringJob(target); - } else { - DCHECK(weak_ref->target().IsUndefined(isolate)); - } - return weak_ref->target(); -} - } // namespace internal } // namespace v8 diff --git a/deps/v8/src/builtins/builtins.cc b/deps/v8/src/builtins/builtins.cc index 34f7ddc18ad714..f79ddaa5e4c3dd 100644 --- a/deps/v8/src/builtins/builtins.cc +++ b/deps/v8/src/builtins/builtins.cc @@ -350,8 +350,8 @@ constexpr int OffHeapTrampolineGenerator::kBufferSize; Handle Builtins::GenerateOffHeapTrampolineFor( Isolate* isolate, Address off_heap_entry, int32_t kind_specfic_flags, bool generate_jump_to_instruction_stream) { - DCHECK_NOT_NULL(isolate->embedded_blob()); - DCHECK_NE(0, isolate->embedded_blob_size()); + DCHECK_NOT_NULL(isolate->embedded_blob_code()); + DCHECK_NE(0, isolate->embedded_blob_code_size()); OffHeapTrampolineGenerator generator(isolate); @@ -360,7 +360,7 @@ Handle Builtins::GenerateOffHeapTrampolineFor( ? TrampolineType::kJump : TrampolineType::kAbort); - return Factory::CodeBuilder(isolate, desc, Code::BUILTIN) + return Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN) .set_read_only_data_container(kind_specfic_flags) .set_self_reference(generator.CodeObject()) .set_is_executable(generate_jump_to_instruction_stream) diff --git a/deps/v8/src/builtins/builtins.h b/deps/v8/src/builtins/builtins.h index 0eaa9d48d81680..e391cccfb5c824 100644 --- a/deps/v8/src/builtins/builtins.h +++ b/deps/v8/src/builtins/builtins.h @@ -25,7 +25,7 @@ class RootVisitor; enum class InterpreterPushArgsMode : unsigned; namespace compiler { class CodeAssemblerState; -} +} // namespace compiler template static constexpr T FirstFromVarArgs(T x, ...) noexcept { diff --git a/deps/v8/src/builtins/cast.tq b/deps/v8/src/builtins/cast.tq index dfac2035784ee3..1562b7b4ddc0a2 100644 --- a/deps/v8/src/builtins/cast.tq +++ b/deps/v8/src/builtins/cast.tq @@ -2,56 +2,118 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -extern macro IsAllocationSite(HeapObject): bool; extern macro IsBigInt(HeapObject): bool; extern macro IsConstructor(HeapObject): bool; -extern macro IsContext(HeapObject): bool; extern macro IsCustomElementsReceiverInstanceType(int32): bool; extern macro IsExtensibleMap(Map): bool; -extern macro IsFeedbackCell(HeapObject): bool; -extern macro IsFeedbackVector(HeapObject): bool; -extern macro IsFixedArray(HeapObject): bool; -extern macro IsHeapNumber(HeapObject): bool; -extern macro IsJSAggregateError(HeapObject): bool; -extern macro IsJSArray(HeapObject): bool; -extern macro IsJSArrayMap(Map): bool; -extern macro IsJSBoundFunction(HeapObject): bool; -extern macro IsJSFinalizationRegistry(HeapObject): bool; -extern macro IsJSFunction(HeapObject): bool; -extern macro IsJSObject(HeapObject): bool; -extern macro IsJSPrimitiveWrapper(HeapObject): bool; -extern macro IsJSPromise(HeapObject): bool; -extern macro IsJSProxy(HeapObject): bool; -extern macro IsJSReceiver(HeapObject): bool; -extern macro IsJSRegExp(HeapObject): bool; -extern macro IsJSRegExpStringIterator(HeapObject): bool; -extern macro IsJSTypedArray(HeapObject): bool; -extern macro IsMap(HeapObject): bool; -extern macro IsName(HeapObject): bool; -extern macro IsNativeContext(HeapObject): bool; -extern macro IsNumberDictionary(HeapObject): bool; extern macro IsNumberNormalized(Number): bool; -extern macro IsNumber(Object): bool; -extern macro IsOddball(HeapObject): bool; -extern macro IsPrivateSymbol(HeapObject): bool; -extern macro IsPromiseCapability(HeapObject): bool; -extern macro IsPromiseFulfillReactionJobTask(HeapObject): bool; -extern macro IsPromiseReaction(HeapObject): bool; -extern macro IsPromiseReactionJobTask(HeapObject): bool; -extern macro IsPromiseRejectReactionJobTask(HeapObject): bool; extern macro IsSafeInteger(Object): bool; -extern macro IsSharedFunctionInfo(HeapObject): bool; -extern macro IsSymbol(HeapObject): bool; -extern macro IsTuple2(HeapObject): bool; -extern macro HeapObjectToJSDataView(HeapObject): JSDataView - labels CastError; -extern macro HeapObjectToJSProxy(HeapObject): JSProxy - labels CastError; -extern macro HeapObjectToJSStringIterator(HeapObject): JSStringIterator - labels CastError; -extern macro HeapObjectToJSArrayBuffer(HeapObject): JSArrayBuffer - labels CastError; +@export +macro IsAccessorInfo(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsAccessorPair(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsAllocationSite(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsCell(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsCode(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsContext(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsCoverageInfo(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsDebugInfo(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsFixedDoubleArray(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsFeedbackCell(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsFeedbackVector(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsHeapNumber(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsNativeContext(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsNumber(o: Object): bool { + return Is(o); +} + +@export +macro IsPrivateSymbol(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsPromiseCapability(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsPromiseFulfillReactionJobTask(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsPromiseReaction(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsPromiseRejectReactionJobTask(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsSharedFunctionInfo(o: HeapObject): bool { + return Is(o); +} + +@export +macro IsSymbol(o: HeapObject): bool { + return Is(o); +} + extern macro TaggedToHeapObject(Object): HeapObject labels CastError; extern macro TaggedToSmi(Object): Smi @@ -60,37 +122,30 @@ extern macro TaggedToPositiveSmi(Object): PositiveSmi labels CastError; extern macro TaggedToDirectString(Object): DirectString labels CastError; -extern macro HeapObjectToJSAggregateError(HeapObject): JSAggregateError - labels CastError; -extern macro HeapObjectToJSArray(HeapObject): JSArray - labels CastError; extern macro HeapObjectToCallable(HeapObject): Callable labels CastError; -extern macro HeapObjectToFixedArray(HeapObject): FixedArray - labels CastError; -extern macro HeapObjectToFixedDoubleArray(HeapObject): FixedDoubleArray - labels CastError; -extern macro HeapObjectToString(HeapObject): String - labels CastError; extern macro HeapObjectToConstructor(HeapObject): Constructor labels CastError; extern macro HeapObjectToJSFunctionWithPrototypeSlot(HeapObject): JSFunctionWithPrototypeSlot labels CastError; -extern macro HeapObjectToHeapNumber(HeapObject): HeapNumber - labels CastError; -extern macro HeapObjectToSloppyArgumentsElements(HeapObject): - SloppyArgumentsElements - labels CastError; -extern macro TaggedToNumber(Object): Number - labels CastError; macro Cast(o: A|Object): A labels CastError { if (!IsWeakOrCleared(o)) goto CastError; return %RawDownCast(o); } -macro Cast(o: MaybeObject): A labels CastError; +macro Cast(implicit context: Context)(o: MaybeObject): + A labels CastError { + typeswitch (o) { + case (WeakHeapObject): { + goto CastError; + } + case (o: Object): { + return Cast(o) otherwise CastError; + } + } +} Cast(o: MaybeObject): Undefined labels CastError { if (TaggedNotEqual(o, Undefined)) goto CastError; @@ -120,7 +175,17 @@ Cast(o: Object): Zero labels CastError { Cast(o: Object): Number labels CastError { - return TaggedToNumber(o) otherwise CastError; + typeswitch (o) { + case (s: Smi): { + return s; + } + case (n: HeapNumber): { + return n; + } + case (Object): { + goto CastError; + } + } } Cast(o: Object): Undefined @@ -283,22 +348,6 @@ Cast(o: HeapObject): Undefined return Cast(o) otherwise CastError; } -Cast(o: HeapObject): AllocationSite - labels CastError { - if (IsAllocationSite(o)) return %RawDownCast(o); - goto CastError; -} - -Cast(o: HeapObject): FixedArray - labels CastError { - return HeapObjectToFixedArray(o) otherwise CastError; -} - -Cast(o: HeapObject): FixedDoubleArray - labels CastError { - return HeapObjectToFixedDoubleArray(o) otherwise CastError; -} - Cast(o: Object): EmptyFixedArray labels CastError { if (o != kEmptyFixedArray) goto CastError; @@ -325,46 +374,6 @@ Cast<(FixedDoubleArray | EmptyFixedArray)>(o: HeapObject): FixedDoubleArray| } } -Cast(o: HeapObject): SloppyArgumentsElements - labels CastError { - return HeapObjectToSloppyArgumentsElements(o) otherwise CastError; -} - -Cast(o: HeapObject): JSDataView - labels CastError { - return HeapObjectToJSDataView(o) otherwise CastError; -} - -Cast(o: HeapObject): JSProxy - labels CastError { - return HeapObjectToJSProxy(o) otherwise CastError; -} - -Cast(o: HeapObject): JSStringIterator - labels CastError { - return HeapObjectToJSStringIterator(o) otherwise CastError; -} - -Cast(o: HeapObject): JSRegExpStringIterator - labels CastError { - if (IsJSRegExpStringIterator(o)) { - return %RawDownCast(o); - } - goto CastError; -} - -Cast(o: HeapObject): JSTypedArray - labels CastError { - if (IsJSTypedArray(o)) return %RawDownCast(o); - goto CastError; -} - -Cast(implicit context: Context)(o: Object): JSTypedArray - labels CastError { - const heapObject = Cast(o) otherwise CastError; - return Cast(heapObject) otherwise CastError; -} - Cast(o: HeapObject): Callable labels CastError { return HeapObjectToCallable(o) otherwise CastError; @@ -376,81 +385,22 @@ Cast(o: HeapObject): Undefined|Callable return HeapObjectToCallable(o) otherwise CastError; } -Cast(o: HeapObject): JSAggregateError - labels CastError { - return HeapObjectToJSAggregateError(o) otherwise CastError; -} - -Cast(o: HeapObject): JSArray - labels CastError { - return HeapObjectToJSArray(o) otherwise CastError; -} - -Cast(o: HeapObject): JSArrayBuffer - labels CastError { - return HeapObjectToJSArrayBuffer(o) otherwise CastError; -} - -Cast(o: HeapObject): Context - labels CastError { - if (IsContext(o)) return %RawDownCast(o); - goto CastError; -} - -Cast(o: HeapObject): NativeContext - labels CastError { - if (IsNativeContext(o)) return %RawDownCast(o); - goto CastError; -} - -Cast(o: HeapObject): JSObject - labels CastError { - if (IsJSObject(o)) return %RawDownCast(o); - goto CastError; -} - -Cast(o: HeapObject): NumberDictionary - labels CastError { - if (IsNumberDictionary(o)) return %RawDownCast(o); - goto CastError; -} - -Cast(o: HeapObject): String - labels CastError { - return HeapObjectToString(o) otherwise CastError; -} - -Cast(o: HeapObject): Oddball - labels CastError { - if (IsOddball(o)) return %RawDownCast(o); - goto CastError; -} - -Cast(o: HeapObject): Symbol - labels CastError { - if (IsSymbol(o)) return %RawDownCast(o); - goto CastError; -} - macro Cast(o: Symbol): T labels CastError; -Cast(o: Symbol): PublicSymbol labels CastError { - if (IsPrivateSymbol(o)) goto CastError; - return %RawDownCast(o); +Cast(s: Symbol): PublicSymbol labels CastError { + if (s.flags.is_private) goto CastError; + return %RawDownCast(s); } -Cast(o: Symbol): PrivateSymbol labels CastError { - if (IsPrivateSymbol(o)) { - return %RawDownCast(o); - } +Cast(s: Symbol): PrivateSymbol labels CastError { + if (s.flags.is_private) return %RawDownCast(s); goto CastError; } - Cast(o: HeapObject): PublicSymbol labels CastError { - const o = Cast(o) otherwise CastError; - return Cast(o) otherwise CastError; + const s = Cast(o) otherwise CastError; + return Cast(s) otherwise CastError; } Cast(o: HeapObject): PrivateSymbol labels CastError { - const o = Cast(o) otherwise CastError; - return Cast(o) otherwise CastError; + const s = Cast(o) otherwise CastError; + return Cast(s) otherwise CastError; } Cast(o: HeapObject): DirectString @@ -468,53 +418,17 @@ Cast(o: HeapObject): JSFunctionWithPrototypeSlot return HeapObjectToJSFunctionWithPrototypeSlot(o) otherwise CastError; } -Cast(o: HeapObject): HeapNumber - labels CastError { - if (IsHeapNumber(o)) return %RawDownCast(o); - goto CastError; -} - Cast(o: HeapObject): BigInt labels CastError { if (IsBigInt(o)) return %RawDownCast(o); goto CastError; } -Cast(o: HeapObject): JSRegExp - labels CastError { - if (IsJSRegExp(o)) return %RawDownCast(o); - goto CastError; -} - Cast(implicit context: Context)(o: HeapObject): JSRegExpResult labels CastError { if (regexp::IsRegExpResult(o)) return %RawDownCast(o); goto CastError; } -Cast(implicit context: Context)(o: HeapObject): Map - labels CastError { - if (IsMap(o)) return %RawDownCast(o); - goto CastError; -} - -Cast(implicit context: Context)(o: HeapObject): FeedbackCell - labels CastError { - if (IsFeedbackCell(o)) return %RawDownCast(o); - goto CastError; -} - -Cast(implicit context: Context)(o: HeapObject): FeedbackVector - labels CastError { - if (IsFeedbackVector(o)) return %RawDownCast(o); - goto CastError; -} - -Cast(o: HeapObject): JSPrimitiveWrapper - labels CastError { - if (IsJSPrimitiveWrapper(o)) return %RawDownCast(o); - goto CastError; -} - Cast(implicit context: Context)(o: HeapObject): JSSloppyArgumentsObject labels CastError { @@ -566,10 +480,10 @@ Cast(implicit context: Context)(o: HeapObject): FastJSArray labels CastError { if (IsForceSlowPath()) goto CastError; - const map: Map = o.map; - if (!IsJSArrayMap(map)) goto CastError; + if (!Is(o)) goto CastError; // Bailout if receiver has slow elements. + const map: Map = o.map; const elementsKind: ElementsKind = LoadMapElementsKind(map); if (!IsFastElementsKind(elementsKind)) goto CastError; @@ -583,10 +497,10 @@ Cast(implicit context: Context)(o: HeapObject): FastJSArray Cast(implicit context: Context)(o: HeapObject): FastJSArrayForRead labels CastError { - const map: Map = o.map; - if (!IsJSArrayMap(map)) goto CastError; + if (!Is(o)) goto CastError; // Bailout if receiver has slow elements. + const map: Map = o.map; const elementsKind: ElementsKind = LoadMapElementsKind(map); if (!IsElementsKindLessThanOrEqual( elementsKind, ElementsKind::LAST_ANY_NONEXTENSIBLE_ELEMENTS_KIND)) @@ -623,30 +537,9 @@ Cast(implicit context: Context)( return %RawDownCast(a); } -Cast(o: HeapObject): JSReceiver - labels CastError { - if (IsJSReceiver(o)) return %RawDownCast(o); - goto CastError; -} - -Cast(implicit context: Context)(o: HeapObject): JSFunction - labels CastError { - if (IsJSFunction(o)) return %RawDownCast(o); - goto CastError; -} - -extern macro IsDebugInfo(HeapObject): bool; -Cast(implicit context: Context)(o: HeapObject): DebugInfo - labels CastError { - if (IsDebugInfo(o)) return %RawDownCast(o); - goto CastError; -} - -extern macro IsCoverageInfo(HeapObject): bool; -Cast(implicit context: Context)(o: HeapObject): CoverageInfo - labels CastError { - if (IsCoverageInfo(o)) return %RawDownCast(o); - goto CastError; +Cast(o: HeapObject): SeqOneByteString labels CastError { + if (!IsSeqOneByteString(o)) goto CastError; + return %RawDownCast(o); } Cast(o: HeapObject): JSReceiver|Null @@ -664,35 +557,6 @@ Cast(o: HeapObject): JSReceiver|Null } } -Cast(o: HeapObject): - PromiseReactionJobTask labels CastError { - if (IsPromiseReactionJobTask(o)) { - return %RawDownCast(o); - } - goto CastError; -} - -Cast(o: HeapObject): - PromiseFulfillReactionJobTask labels CastError { - if (IsPromiseFulfillReactionJobTask(o)) { - return %RawDownCast(o); - } - goto CastError; -} - -Cast(o: HeapObject): - PromiseRejectReactionJobTask labels CastError { - if (IsPromiseRejectReactionJobTask(o)) { - return %RawDownCast(o); - } - goto CastError; -} - -Cast(o: HeapObject): PromiseReaction labels CastError { - if (IsPromiseReaction(o)) return %RawDownCast(o); - goto CastError; -} - Cast(o: Object): Smi|PromiseReaction labels CastError { typeswitch (o) { case (o: Smi): { @@ -737,32 +601,64 @@ Cast(implicit context: Context)(o: Object): Zero| } } -Cast(o: HeapObject): JSBoundFunction labels CastError { - if (IsJSBoundFunction(o)) return %RawDownCast(o); - goto CastError; +Cast(implicit context: Context)(o: Object): + JSFunction|JSBoundFunction labels CastError { + typeswitch (o) { + case (o: JSFunction): { + return o; + } + case (o: JSBoundFunction): { + return o; + } + case (Object): { + goto CastError; + } + } } -Cast(o: HeapObject): PromiseCapability labels CastError { - if (IsPromiseCapability(o)) return %RawDownCast(o); - goto CastError; +Cast(o: HeapObject): FixedArray| + Undefined labels CastError { + typeswitch (o) { + case (o: Undefined): { + return o; + } + case (o: FixedArray): { + return o; + } + case (Object): { + goto CastError; + } + } } -Cast(o: HeapObject): SharedFunctionInfo labels CastError { - if (IsSharedFunctionInfo(o)) return %RawDownCast(o); - goto CastError; +Cast(o: HeapObject): JSProxy|Null labels CastError { + typeswitch (o) { + case (o: Null): { + return o; + } + case (o: JSProxy): { + return o; + } + case (Object): { + goto CastError; + } + } } -Cast(o: HeapObject): JSPromise labels CastError { - if (IsJSPromise(o)) return %RawDownCast(o); - goto CastError; +macro Is( + implicit context: Context)(o: B): bool { + Cast(o) otherwise return false; + return true; } -Cast(o: HeapObject): - JSFinalizationRegistry labels CastError { - if (IsJSFinalizationRegistry(o)) { - return %RawDownCast(o); - } - goto CastError; +macro UnsafeCast(implicit context: Context)(o: Object): + A { + assert(Is(o)); + return %RawDownCast(o); +} + +macro UnsafeConstCast(r: const &T):&T { + return %RawDownCast<&T>(r); } UnsafeCast(implicit context: Context)(o: Object): diff --git a/deps/v8/src/builtins/constants-table-builder.cc b/deps/v8/src/builtins/constants-table-builder.cc index 94e8dc05ec7024..97565f2e3741fd 100644 --- a/deps/v8/src/builtins/constants-table-builder.cc +++ b/deps/v8/src/builtins/constants-table-builder.cc @@ -57,24 +57,30 @@ uint32_t BuiltinsConstantsTableBuilder::AddObject(Handle object) { } } -void BuiltinsConstantsTableBuilder::PatchSelfReference( - Handle self_reference, Handle code_object) { -#ifdef DEBUG +namespace { +void CheckPreconditionsForPatching(Isolate* isolate, + Handle replacement_object) { // Roots must not be inserted into the constants table as they are already - // accessibly from the root list. + // accessible from the root list. RootIndex root_list_index; - DCHECK(!isolate_->roots_table().IsRootHandle(code_object, &root_list_index)); + DCHECK(!isolate->roots_table().IsRootHandle(replacement_object, + &root_list_index)); + USE(root_list_index); // Not yet finalized. - DCHECK_EQ(ReadOnlyRoots(isolate_).empty_fixed_array(), - isolate_->heap()->builtins_constants_table()); + DCHECK_EQ(ReadOnlyRoots(isolate).empty_fixed_array(), + isolate->heap()->builtins_constants_table()); - DCHECK(isolate_->IsGeneratingEmbeddedBuiltins()); + DCHECK(isolate->IsGeneratingEmbeddedBuiltins()); +} +} // namespace +void BuiltinsConstantsTableBuilder::PatchSelfReference( + Handle self_reference, Handle code_object) { + CheckPreconditionsForPatching(isolate_, code_object); DCHECK(self_reference->IsOddball()); DCHECK(Oddball::cast(*self_reference).kind() == Oddball::kSelfReferenceMarker); -#endif uint32_t key; if (map_.Delete(self_reference, &key)) { @@ -83,6 +89,17 @@ void BuiltinsConstantsTableBuilder::PatchSelfReference( } } +void BuiltinsConstantsTableBuilder::PatchBasicBlockCountersReference( + Handle counters) { + CheckPreconditionsForPatching(isolate_, counters); + + uint32_t key; + if (map_.Delete(ReadOnlyRoots(isolate_).basic_block_counters_marker(), + &key)) { + map_.Set(counters, key); + } +} + void BuiltinsConstantsTableBuilder::Finalize() { HandleScope handle_scope(isolate_); @@ -101,7 +118,7 @@ void BuiltinsConstantsTableBuilder::Finalize() { for (auto it = it_scope.begin(); it != it_scope.end(); ++it) { uint32_t index = *it.entry(); Object value = it.key(); - if (value.IsCode() && Code::cast(value).kind() == Code::BUILTIN) { + if (value.IsCode() && Code::cast(value).kind() == CodeKind::BUILTIN) { // Replace placeholder code objects with the real builtin. // See also: SetupIsolateDelegate::PopulateWithPlaceholders. // TODO(jgruber): Deduplicate placeholders and their corresponding @@ -117,6 +134,8 @@ void BuiltinsConstantsTableBuilder::Finalize() { DCHECK(table->get(i).IsHeapObject()); DCHECK_NE(ReadOnlyRoots(isolate_).undefined_value(), table->get(i)); DCHECK_NE(ReadOnlyRoots(isolate_).self_reference_marker(), table->get(i)); + DCHECK_NE(ReadOnlyRoots(isolate_).basic_block_counters_marker(), + table->get(i)); } #endif diff --git a/deps/v8/src/builtins/constants-table-builder.h b/deps/v8/src/builtins/constants-table-builder.h index 89c95912a1ea0b..fa9d7dee3a3ac8 100644 --- a/deps/v8/src/builtins/constants-table-builder.h +++ b/deps/v8/src/builtins/constants-table-builder.h @@ -34,6 +34,11 @@ class BuiltinsConstantsTableBuilder final { void PatchSelfReference(Handle self_reference, Handle code_object); + // References to the array that stores basic block usage counters start out as + // references to a unique oddball. Once the actual array has been allocated, + // such entries in the constants map must be patched up. + void PatchBasicBlockCountersReference(Handle counters); + // Should be called after all affected code (e.g. builtins and bytecode // handlers) has been generated. void Finalize(); diff --git a/deps/v8/src/builtins/constructor.tq b/deps/v8/src/builtins/constructor.tq new file mode 100644 index 00000000000000..ba68e190282138 --- /dev/null +++ b/deps/v8/src/builtins/constructor.tq @@ -0,0 +1,157 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/ast/ast.h" + +namespace runtime { +extern runtime CreateArrayLiteral( + Context, FeedbackVector, TaggedIndex, ArrayBoilerplateDescription, + Smi): HeapObject; +extern runtime CreateObjectLiteral( + Context, FeedbackVector, TaggedIndex, ObjectBoilerplateDescription, + Smi): HeapObject; +} + +namespace constructor { + +extern builtin FastNewObject(Context, JSFunction, JSReceiver): JSObject; + +extern enum AllocationSiteMode constexpr 'AllocationSiteMode' { + DONT_TRACK_ALLOCATION_SITE, + TRACK_ALLOCATION_SITE +} + +const kIsShallowAndDisableMementos: constexpr int31 + generates 'AggregateLiteral::Flags::kIsShallowAndDisableMementos'; +const kEvalScope: constexpr ScopeType generates 'ScopeType::EVAL_SCOPE'; +const kFunctionScope: + constexpr ScopeType generates 'ScopeType::FUNCTION_SCOPE'; + +extern macro ConstructorBuiltinsAssembler::FastNewFunctionContext( + ScopeInfo, uint32, Context, constexpr ScopeType): Context; +extern macro ConstructorBuiltinsAssembler::CreateRegExpLiteral( + HeapObject, TaggedIndex, Object, Smi, Context): JSRegExp; +extern macro ConstructorBuiltinsAssembler::CreateShallowArrayLiteral( + FeedbackVector, TaggedIndex, Context, + constexpr AllocationSiteMode): HeapObject labels CallRuntime; +extern macro ConstructorBuiltinsAssembler::CreateEmptyArrayLiteral( + FeedbackVector, TaggedIndex, Context): HeapObject; +extern macro ConstructorBuiltinsAssembler::CreateShallowObjectLiteral( + FeedbackVector, TaggedIndex): HeapObject labels CallRuntime; +extern macro ConstructorBuiltinsAssembler::CreateEmptyObjectLiteral(Context): + JSObject; + +builtin FastNewFunctionContextEval(implicit context: Context)( + scopeInfo: ScopeInfo, slots: uint32): Context { + return FastNewFunctionContext(scopeInfo, slots, context, kEvalScope); +} + +builtin FastNewFunctionContextFunction(implicit context: Context)( + scopeInfo: ScopeInfo, slots: uint32): Context { + return FastNewFunctionContext(scopeInfo, slots, context, kFunctionScope); +} + +builtin CreateRegExpLiteral(implicit context: Context)( + maybeFeedbackVector: HeapObject, slot: TaggedIndex, pattern: Object, + flags: Smi): JSRegExp { + return CreateRegExpLiteral( + maybeFeedbackVector, slot, pattern, flags, context); +} + +builtin CreateShallowArrayLiteral(implicit context: Context)( + feedbackVector: FeedbackVector, slot: TaggedIndex, + constantElements: ArrayBoilerplateDescription): HeapObject { + try { + return CreateShallowArrayLiteral( + feedbackVector, slot, context, + AllocationSiteMode::DONT_TRACK_ALLOCATION_SITE) + otherwise CallRuntime; + } label CallRuntime deferred { + tail runtime::CreateArrayLiteral( + context, feedbackVector, slot, constantElements, + SmiConstant(kIsShallowAndDisableMementos)); + } +} + +builtin CreateEmptyArrayLiteral(implicit context: Context)( + feedbackVector: FeedbackVector, slot: TaggedIndex): HeapObject { + return CreateEmptyArrayLiteral(feedbackVector, slot, context); +} + +builtin CreateShallowObjectLiteral(implicit context: Context)( + feedbackVector: FeedbackVector, slot: TaggedIndex, + desc: ObjectBoilerplateDescription, flags: Smi): HeapObject { + try { + return CreateShallowObjectLiteral(feedbackVector, slot) + otherwise CallRuntime; + } label CallRuntime deferred { + tail runtime::CreateObjectLiteral( + context, feedbackVector, slot, desc, flags); + } +} + +// ES #sec-object-constructor +transitioning javascript builtin +ObjectConstructor( + js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny, + target: JSFunction)(...arguments): JSAny { + if (newTarget == Undefined || newTarget == target) { + // Not Subclass. + const value = arguments[0]; + if (arguments.length <= 0 || value == Undefined || value == Null) { + // New object. + return CreateEmptyObjectLiteral(context); + } else { + return ToObject(context, value); + } + } else { + // Subclass. + return FastNewObject(context, target, UnsafeCast(newTarget)); + } +} + +builtin CreateEmptyLiteralObject(implicit context: Context)(): JSAny { + return CreateEmptyObjectLiteral(context); +} + +// ES #sec-number-constructor +transitioning javascript builtin +NumberConstructor( + js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny, + target: JSFunction)(...arguments): JSAny { + // 1. If no arguments were passed to this function invocation, let n be +0. + let n: Number = 0; + if (arguments.length > 0) { + // 2. Else, + // a. Let prim be ? ToNumeric(value). + // b. If Type(prim) is BigInt, let n be the Number value for prim. + // c. Otherwise, let n be prim. + const value = arguments[0]; + n = ToNumber(value, BigIntHandling::kConvertToNumber); + } + + // 3. If NewTarget is undefined, return n. + if (newTarget == Undefined) return n; + + // 4. Let O be ? OrdinaryCreateFromConstructor(NewTarget, + // "%NumberPrototype%", « [[NumberData]] »). + // 5. Set O.[[NumberData]] to n. + // 6. Return O. + + // We ignore the normal target parameter and load the value from the + // current frame here in order to reduce register pressure on the fast path. + const target: JSFunction = LoadTargetFromFrame(); + const result = UnsafeCast( + FastNewObject(context, target, UnsafeCast(newTarget))); + result.value = n; + return result; +} + +javascript builtin +GenericLazyDeoptContinuation(js-implicit context: NativeContext)(result: JSAny): + JSAny { + return result; +} + +} // namespace constructor diff --git a/deps/v8/src/builtins/conversion.tq b/deps/v8/src/builtins/conversion.tq new file mode 100644 index 00000000000000..14b953f416c7ef --- /dev/null +++ b/deps/v8/src/builtins/conversion.tq @@ -0,0 +1,233 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace runtime { +extern transitioning runtime ToStringRT(Context, BigInt): String; +} + +extern enum OrdinaryToPrimitiveHint { kString, kNumber } + +extern macro OrdinaryToPrimitive(implicit context: Context)( + JSAny, constexpr OrdinaryToPrimitiveHint): JSPrimitive; + +namespace conversion { + +builtin StringToNumber(implicit context: Context)(input: String): Number { + return ::StringToNumber(input); +} + +transitioning builtin NonNumberToNumber(implicit context: Context)( + input: JSAnyNotNumber): Number { + return ::NonNumberToNumber(input); +} + +transitioning builtin NonNumberToNumeric(implicit context: Context)( + input: JSAnyNotNumber): Numeric { + return ::NonNumberToNumeric(input); +} + +transitioning builtin ToNumeric(implicit context: Context)(input: JSAny): + Numeric { + typeswitch (input) { + case (n: Number): { + return n; + } + case (h: JSAnyNotNumber): { + return conversion::NonNumberToNumeric(h); + } + } +} + +// ES section #sec-tostring-applied-to-the-number-type +builtin NumberToString(implicit context: Context)(input: Number): String { + return ::NumberToString(input); +} + +// ES6 section 7.1.2 ToBoolean ( argument ) +builtin ToBoolean(implicit context: Context)(input: JSAny): Boolean { + BranchIfToBooleanIsTrue(input) otherwise return TrueConstant(), + return FalseConstant(); +} + +transitioning builtin ToLength(implicit context: Context)(input: JSAny): + Number { + // We might need to loop once for ToNumber conversion. + let x: JSAny = input; + while (true) { + typeswitch (x) { + case (s: Smi): { + if (s < 0) return 0; + return s; + } + case (h: HeapNumber): { + let value: float64 = Convert(h); + // The sense of this test is important for the NaN and -0 cases. + if (!(value > 0)) return 0; + if (value > kMaxSafeInteger) return kMaxSafeInteger; + value = math::Float64Floor(value); + return ChangeFloat64ToTagged(value); + } + case (h: JSAnyNotNumber): { + x = ::NonNumberToNumber(h); + } + } + } + VerifiedUnreachable(); +} + +transitioning builtin ToName(implicit context: Context)(input: JSAny): Name { + // We might need to loop once for ToNumber conversion. + let x: JSAny = input; + while (true) { + typeswitch (x) { + case (n: Name): { + return n; + } + case (n: Number): { + return ::NumberToString(n); + } + case (b: BigInt): { + // We don't have a fast-path for BigInt currently, so just + // tail call to the %ToString runtime function here for now. + tail runtime::ToStringRT(context, b); + } + case (o: Oddball): { + return o.to_string; + } + case (o: JSReceiver): { + x = NonPrimitiveToPrimitive_String(o); + } + } + } + VerifiedUnreachable(); +} + +const kNoConstructorFunctionIndex: + constexpr int31 generates 'Map::kNoConstructorFunctionIndex'; + +// ES6 section 7.1.13 ToObject (argument) +transitioning builtin ToObject(implicit context: Context)(input: JSAny): + JSReceiver { + try { + typeswitch (input) { + case (Smi): { + goto WrapPrimitive(ContextSlot::NUMBER_FUNCTION_INDEX); + } + case (o: JSReceiver): { + return o; + } + case (o: JSAnyNotSmi): { + const index: intptr = Convert( + o.map.in_object_properties_start_or_constructor_function_index); + if (index != kNoConstructorFunctionIndex) + goto WrapPrimitive( + %RawDownCast>(index)); + ThrowTypeError(MessageTemplate::kUndefinedOrNullToObject, 'ToObject'); + } + } + } label WrapPrimitive(constructorIndex: Slot) { + const constructor = *NativeContextSlot(constructorIndex); + const map: Map = UnsafeCast(constructor.prototype_or_initial_map); + const wrapper = + UnsafeCast(AllocateFastOrSlowJSObjectFromMap(map)); + wrapper.value = input; + return wrapper; + } +} + +// ES6 section 7.1.1 ToPrimitive ( input [ , PreferredType ] ) + +transitioning macro TryGetExoticToPrimitive(implicit context: Context)( + input: JSAny): JSAny labels OrdinaryToPrimitive { + // Look up the @@toPrimitive property. + const exoticToPrimitive: JSAny = + GetProperty(input, ToPrimitiveSymbolConstant()); + if (IsNullOrUndefined(exoticToPrimitive)) goto OrdinaryToPrimitive; + return exoticToPrimitive; +} + +transitioning macro CallExoticToPrimitive(implicit context: Context)( + input: JSAny, exoticToPrimitive: JSAny, hint: String): JSPrimitive { + // Invoke the exoticToPrimitive method on the input with a string + // representation of the hint. + const result: JSAny = Call(context, exoticToPrimitive, input, hint); + + // Verify that the result is primitive. + typeswitch (result) { + case (o: JSPrimitive): { + return o; + } + case (JSReceiver): { + // Somehow the @@toPrimitive method on input didn't yield a primitive. + ThrowTypeError(MessageTemplate::kCannotConvertToPrimitive); + } + } +} + +transitioning builtin NonPrimitiveToPrimitive_Default( + implicit context: Context)(input: JSReceiver): JSPrimitive { + const exoticToPrimitive: JSAny = TryGetExoticToPrimitive(input) + otherwise return OrdinaryToPrimitive_Number(input); + return CallExoticToPrimitive( + input, exoticToPrimitive, DefaultStringConstant()); +} + +transitioning builtin NonPrimitiveToPrimitive_Number(implicit context: Context)( + input: JSReceiver): JSPrimitive { + const exoticToPrimitive: JSAny = TryGetExoticToPrimitive(input) + otherwise return OrdinaryToPrimitive_Number(input); + return CallExoticToPrimitive( + input, exoticToPrimitive, NumberStringConstant()); +} + +transitioning builtin NonPrimitiveToPrimitive_String(implicit context: Context)( + input: JSReceiver): JSPrimitive { + const exoticToPrimitive: JSAny = TryGetExoticToPrimitive(input) + otherwise return OrdinaryToPrimitive_String(input); + return CallExoticToPrimitive( + input, exoticToPrimitive, StringStringConstant()); +} + +// 7.1.1.1 OrdinaryToPrimitive ( O, hint ) + +transitioning macro TryToPrimitiveMethod(implicit context: Context)( + input: JSAny, name: String): JSPrimitive labels Continue { + const method: JSAny = GetProperty(input, name); + typeswitch (method) { + case (Callable): { + const value: JSAny = Call(context, method, input); + return Cast(value) otherwise Continue; + } + case (JSAny): { + goto Continue; + } + } +} + +transitioning builtin OrdinaryToPrimitive_Number(implicit context: Context)( + input: JSAny): JSPrimitive { + try { + return TryToPrimitiveMethod(input, ValueOfStringConstant()) + otherwise String; + } label String { + return TryToPrimitiveMethod(input, ToStringStringConstant()) + otherwise Throw; + } label Throw { + ThrowTypeError(MessageTemplate::kCannotConvertToPrimitive); + } +} + +transitioning builtin OrdinaryToPrimitive_String(implicit context: Context)( + input: JSAny): JSPrimitive { + try { + return TryToPrimitiveMethod(input, ToStringStringConstant()) + otherwise String; + } label String { + return TryToPrimitiveMethod(input, ValueOfStringConstant()) otherwise Throw; + } label Throw { + ThrowTypeError(MessageTemplate::kCannotConvertToPrimitive); + } +} + +} // namespace conversion diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq index e2c11120381aec..6ac99010285500 100644 --- a/deps/v8/src/builtins/convert.tq +++ b/deps/v8/src/builtins/convert.tq @@ -49,9 +49,20 @@ FromConstexpr(i: constexpr int31): Number { } FromConstexpr(i: constexpr int31): uint8 { const i: uint32 = i; - StaticAssert(i <= 255); + static_assert(i <= 255); return %RawDownCast(i); } +FromConstexpr(i: constexpr int31): int8 { + const i: int32 = i; + static_assert(-128 <= i && i <= 127); + return %RawDownCast(i); +} +FromConstexpr(i: constexpr int31): char8 { + const i: int32 = i; + static_assert(i <= 255); + static_assert(0 <= i); + return %RawDownCast(i); +} FromConstexpr(s: constexpr Smi): Number { return SmiConstant(s); } @@ -94,6 +105,15 @@ FromConstexpr(c: constexpr PromiseState): PromiseState { return %RawDownCast(Int32Constant(c)); } +FromConstexpr(c: constexpr InstanceType): + InstanceType { + return %RawDownCast(Uint16Constant(c)); +} + +FromConstexpr( + c: constexpr IterationKind): IterationKind { + return %RawDownCast(Unsigned(%FromConstexpr(c))); +} macro Convert(i: From): To { return i; @@ -103,6 +123,9 @@ macro Convert(i: From): To labels Overflow { return i; } +Convert(b: bool): Boolean { + return b ? True : False; +} extern macro ConvertElementsKindToInt(ElementsKind): int32; Convert(elementsKind: ElementsKind): int32 { return ConvertElementsKindToInt(elementsKind); @@ -113,6 +136,9 @@ Convert(i: int32): Number { Convert(i: int32): intptr { return ChangeInt32ToIntPtr(i); } +Convert(i: int31): intptr { + return ChangeInt32ToIntPtr(i); +} Convert(i: uint32): intptr { return Signed(ChangeUint32ToWord(i)); } @@ -137,6 +163,9 @@ Convert(ui: uint8): intptr { Convert(i: intptr): uint8 { return %RawDownCast(Unsigned(TruncateIntPtrToInt32(i)) & 0xFF); } +Convert(i: intptr): int8 { + return %RawDownCast(TruncateIntPtrToInt32(i) << 24 >> 24); +} Convert(i: uint8): int32 { return Signed(Convert(i)); } @@ -201,6 +230,9 @@ Convert(i: intptr): PositiveSmi labels IfOverflow { goto IfOverflow; } } +Convert(ui: uint32): PositiveSmi labels IfOverflow { + return Convert(Convert(ui)) otherwise IfOverflow; +} Convert(s: Smi): int32 { return SmiToInt32(s); } diff --git a/deps/v8/src/builtins/finalization-registry.tq b/deps/v8/src/builtins/finalization-registry.tq index 143486c73768f1..84499e19e1a0c9 100644 --- a/deps/v8/src/builtins/finalization-registry.tq +++ b/deps/v8/src/builtins/finalization-registry.tq @@ -6,6 +6,8 @@ namespace runtime { extern runtime ShrinkFinalizationRegistryUnregisterTokenMap( Context, JSFinalizationRegistry): void; +extern runtime JSFinalizationRegistryRegisterWeakCellWithUnregisterToken( + implicit context: Context)(JSFinalizationRegistry, WeakCell): void; } namespace weakref { @@ -52,6 +54,19 @@ PopClearedCell(finalizationRegistry: JSFinalizationRegistry): WeakCell| } } +transitioning macro PushCell( + finalizationRegistry: JSFinalizationRegistry, cell: WeakCell) { + cell.next = finalizationRegistry.active_cells; + typeswitch (finalizationRegistry.active_cells) { + case (Undefined): { + } + case (oldHead: WeakCell): { + oldHead.prev = cell; + } + } + finalizationRegistry.active_cells = cell; +} + transitioning macro FinalizationRegistryCleanupLoop(implicit context: Context)( finalizationRegistry: JSFinalizationRegistry, callback: Callable) { @@ -77,6 +92,103 @@ FinalizationRegistryCleanupLoop(implicit context: Context)( context, finalizationRegistry); } +transitioning javascript builtin +FinalizationRegistryConstructor( + js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny, + target: JSFunction)(cleanupCallback: JSAny): JSFinalizationRegistry { + // 1. If NewTarget is undefined, throw a TypeError exception. + if (newTarget == Undefined) { + ThrowTypeError( + MessageTemplate::kConstructorNotFunction, 'FinalizationRegistry'); + } + // 2. If IsCallable(cleanupCallback) is false, throw a TypeError exception. + const cleanupCallback = Cast(cleanupCallback) otherwise + ThrowTypeError(MessageTemplate::kWeakRefsCleanupMustBeCallable); + // 3. Let finalizationRegistry be ? OrdinaryCreateFromConstructor(NewTarget, + // "%FinalizationRegistryPrototype%", « [[Realm]], [[CleanupCallback]], + // [[Cells]] »). + const map = GetDerivedMap(target, UnsafeCast(newTarget)); + const finalizationRegistry = UnsafeCast( + AllocateFastOrSlowJSObjectFromMap(map)); + // 4. Let fn be the active function object. + // 5. Set finalizationRegistry.[[Realm]] to fn.[[Realm]]. + finalizationRegistry.native_context = context; + // 6. Set finalizationRegistry.[[CleanupCallback]] to cleanupCallback. + finalizationRegistry.cleanup = cleanupCallback; + finalizationRegistry.flags = + SmiTag(FinalizationRegistryFlags{scheduled_for_cleanup: false}); + // 7. Set finalizationRegistry.[[Cells]] to be an empty List. + assert(finalizationRegistry.active_cells == Undefined); + assert(finalizationRegistry.cleared_cells == Undefined); + assert(finalizationRegistry.key_map == Undefined); + // 8. Return finalizationRegistry. + return finalizationRegistry; +} + +transitioning javascript builtin +FinalizationRegistryRegister( + js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { + // 1. Let finalizationRegistry be the this value. + // 2. Perform ? RequireInternalSlot(finalizationRegistry, [[Cells]]). + const finalizationRegistry = Cast(receiver) otherwise + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, + 'FinalizationRegistry.prototype.register', receiver); + // 3. If Type(target) is not Object, throw a TypeError exception. + const target = Cast(arguments[0]) otherwise ThrowTypeError( + MessageTemplate::kWeakRefsRegisterTargetMustBeObject); + const heldValue = arguments[1]; + // 4. If SameValue(target, heldValue), throw a TypeError exception. + if (target == heldValue) { + ThrowTypeError( + MessageTemplate::kWeakRefsRegisterTargetAndHoldingsMustNotBeSame); + } + const unregisterToken = arguments[2]; + // 5. If Type(unregisterToken) is not Object, + // a. If unregisterToken is not undefined, throw a TypeError exception. + // b. Set unregisterToken to empty. + let hasUnregisterToken: bool = false; + typeswitch (unregisterToken) { + case (Undefined): { + } + case (JSReceiver): { + hasUnregisterToken = true; + } + case (JSAny): deferred { + ThrowTypeError( + MessageTemplate::kWeakRefsUnregisterTokenMustBeObject, + unregisterToken); + } + } + // 6. Let cell be the Record { [[WeakRefTarget]] : target, [[HeldValue]]: + // heldValue, [[UnregisterToken]]: unregisterToken }. + // Allocate the WeakCell object in the old space, because 1) WeakCell weakness + // handling is only implemented in the old space 2) they're supposedly + // long-living. TODO(marja, gsathya): Support WeakCells in Scavenger. + const cell = new (Pretenured) WeakCell{ + map: GetWeakCellMap(), + finalization_registry: finalizationRegistry, + target: target, + unregister_token: unregisterToken, + holdings: heldValue, + prev: Undefined, + next: Undefined, + key_list_prev: Undefined, + key_list_next: Undefined + }; + // 7. Append cell to finalizationRegistry.[[Cells]]. + PushCell(finalizationRegistry, cell); + if (hasUnregisterToken) { + // If an unregister token is provided, a runtime call is needed to + // do some OrderedHashTable operations and register the mapping. + // See v8:10705. + runtime::JSFinalizationRegistryRegisterWeakCellWithUnregisterToken( + finalizationRegistry, cell); + } + // 8. Return undefined. + return Undefined; +} + transitioning javascript builtin FinalizationRegistryPrototypeCleanupSome( js-implicit context: NativeContext, receiver: JSAny)(...arguments): JSAny { diff --git a/deps/v8/src/builtins/function.tq b/deps/v8/src/builtins/function.tq new file mode 100644 index 00000000000000..c1c08cd9616b4b --- /dev/null +++ b/deps/v8/src/builtins/function.tq @@ -0,0 +1,109 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace function { + +extern macro OrdinaryHasInstance(Context, Object, Object): JSAny; + +// ES6 section 19.2.3.6 Function.prototype[@@hasInstance] +javascript builtin FunctionPrototypeHasInstance( + js-implicit context: NativeContext, receiver: JSAny)(value: JSAny): JSAny { + return OrdinaryHasInstance(context, receiver, value); +} + +extern transitioning builtin +FunctionPrototypeBind(implicit context: Context)( + JSFunction, JSAny, int32): JSAny; + +const kLengthDescriptorIndex: + constexpr int32 generates 'JSFunction::kLengthDescriptorIndex'; +const kNameDescriptorIndex: + constexpr int32 generates 'JSFunction::kNameDescriptorIndex'; +const kMinDescriptorsForFastBind: + constexpr int31 generates 'JSFunction::kMinDescriptorsForFastBind'; + +macro CheckAccessor(implicit context: Context)( + array: DescriptorArray, index: constexpr int32, name: Name) labels Slow { + const descriptor: DescriptorEntry = array.descriptors[index]; + const key: Name|Undefined = descriptor.key; + if (!TaggedEqual(key, name)) goto Slow; + + // The descriptor value must be an AccessorInfo. + Cast(descriptor.value) otherwise goto Slow; +} + +// ES6 section 19.2.3.2 Function.prototype.bind +transitioning javascript builtin +FastFunctionPrototypeBind( + js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny, + target: JSFunction)(...arguments): JSAny { + const argc: intptr = arguments.length; + try { + typeswitch (receiver) { + case (fn: JSFunction|JSBoundFunction): { + // Disallow binding of slow-mode functions. We need to figure out + // whether the length and name property are in the original state. + Comment('Disallow binding of slow-mode functions'); + if (IsDictionaryMap(fn.map)) goto Slow; + + // Check whether the length and name properties are still present as + // AccessorInfo objects. If so, their value can be recomputed even if + // the actual value on the object changes. + + if (fn.map.bit_field3.number_of_own_descriptors < + kMinDescriptorsForFastBind) { + goto Slow; + } + + const descriptors: DescriptorArray = fn.map.instance_descriptors; + CheckAccessor( + descriptors, kLengthDescriptorIndex, LengthStringConstant()) + otherwise Slow; + CheckAccessor(descriptors, kNameDescriptorIndex, NameStringConstant()) + otherwise Slow; + + // Choose the right bound function map based on whether the target is + // constructable. + + const boundFunctionMap: Map = + IsConstructor(fn) ? + *NativeContextSlot( + ContextSlot::BOUND_FUNCTION_WITH_CONSTRUCTOR_MAP_INDEX) : + *NativeContextSlot(ContextSlot:: + BOUND_FUNCTION_WITHOUT_CONSTRUCTOR_MAP_INDEX); + + // Verify that prototype matches that of the target bound function. + + if (fn.map.prototype != boundFunctionMap.prototype) goto Slow; + + // Allocate the arguments array. + + const argumentsArray = arguments.length <= 1 ? + kEmptyFixedArray : + NewFixedArray( + arguments.length - 1, ArgumentsIterator{arguments, current: 1}); + + const boundReceiver: JSAny = arguments[0]; + + const result = new JSBoundFunction{ + map: boundFunctionMap, + properties_or_hash: kEmptyFixedArray, + elements: kEmptyFixedArray, + bound_target_function: fn, + bound_this: boundReceiver, + bound_arguments: argumentsArray + }; + return result; + } + + case (JSAny): { + goto Slow; + } + } + } label Slow { + tail FunctionPrototypeBind( + LoadTargetFromFrame(), newTarget, Convert(argc)); + } +} +} // namespace function diff --git a/deps/v8/src/builtins/growable-fixed-array-gen.cc b/deps/v8/src/builtins/growable-fixed-array-gen.cc index 042207bff6a6c6..e242ced5c6af0a 100644 --- a/deps/v8/src/builtins/growable-fixed-array-gen.cc +++ b/deps/v8/src/builtins/growable-fixed-array-gen.cc @@ -91,7 +91,9 @@ TNode GrowableFixedArray::ResizeFixedArray( CodeStubAssembler::ExtractFixedArrayFlags flags; flags |= CodeStubAssembler::ExtractFixedArrayFlag::kFixedArrays; TNode to_array = CAST(ExtractFixedArray( - from_array, nullptr, element_count, new_capacity, flags)); + from_array, base::Optional>(base::nullopt), + base::Optional>(element_count), + base::Optional>(new_capacity), flags)); return to_array; } diff --git a/deps/v8/src/builtins/growable-fixed-array.tq b/deps/v8/src/builtins/growable-fixed-array.tq index 094e051a65acd2..af9418b0c9168c 100644 --- a/deps/v8/src/builtins/growable-fixed-array.tq +++ b/deps/v8/src/builtins/growable-fixed-array.tq @@ -25,9 +25,6 @@ struct GrowableFixedArray { this.array = this.ResizeFixedArray(this.capacity); } } - macro ToFixedArray(): FixedArray { - return this.ResizeFixedArray(this.length); - } macro ToJSArray(implicit context: Context)(): JSArray { const nativeContext: NativeContext = LoadNativeContext(context); diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc index 04a1fa9e0db4d9..10b9a9e308241a 100644 --- a/deps/v8/src/builtins/ia32/builtins-ia32.cc +++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc @@ -1439,7 +1439,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress( #endif } -} // end anonymous namespace +} // anonymous namespace // static void Builtins::Generate_InterpreterPushArgsThenConstructImpl( @@ -1664,12 +1664,27 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, const RegisterConfiguration* config(RegisterConfiguration::Default()); int allocatable_register_count = config->num_allocatable_general_registers(); if (with_result) { +#ifdef V8_REVERSE_JSARGS + if (java_script_builtin) { + // xmm0 is not included in the allocateable registers. + __ movd(xmm0, eax); + } else { + // Overwrite the hole inserted by the deoptimizer with the return value + // from the LAZY deopt point. + __ mov( + Operand(esp, config->num_allocatable_general_registers() * + kSystemPointerSize + + BuiltinContinuationFrameConstants::kFixedFrameSize), + eax); + } +#else // Overwrite the hole inserted by the deoptimizer with the return value from // the LAZY deopt point. __ mov(Operand(esp, config->num_allocatable_general_registers() * kSystemPointerSize + BuiltinContinuationFrameConstants::kFixedFrameSize), eax); +#endif } // Replace the builtin index Smi on the stack with the start address of the @@ -1687,6 +1702,16 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, __ SmiUntag(Register::from_code(code)); } } +#ifdef V8_REVERSE_JSARGS + if (with_result && java_script_builtin) { + // Overwrite the hole inserted by the deoptimizer with the return value from + // the LAZY deopt point. eax contains the arguments count, the return value + // from LAZY is always the last argument. + __ movd(Operand(esp, eax, times_system_pointer_size, + BuiltinContinuationFrameConstants::kFixedFrameSize), + xmm0); + } +#endif __ mov( ebp, Operand(esp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); @@ -2248,13 +2273,29 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, Label loop; __ add(eax, edx); __ PopReturnAddressTo(ecx); +#ifdef V8_REVERSE_JSARGS + // TODO(victor): When we remove the arguments adaptor machinery above, + // we can free the scratch register and avoid this move. + __ movd(xmm2, ebx); // Save root register. + __ Pop(ebx); // Save new receiver. +#endif __ bind(&loop); { - __ Push(Operand(scratch, edx, times_system_pointer_size, - 1 * kSystemPointerSize)); __ dec(edx); +#ifdef V8_REVERSE_JSARGS + // Skips old receiver. + __ Push(Operand(scratch, edx, times_system_pointer_size, + kFPOnStackSize + kPCOnStackSize + kSystemPointerSize)); +#else + __ Push(Operand(scratch, edx, times_system_pointer_size, + kFPOnStackSize + kPCOnStackSize)); +#endif __ j(not_zero, &loop); } +#ifdef V8_REVERSE_JSARGS + __ Push(ebx); // Push new receiver. + __ movd(ebx, xmm2); // Recover root register. +#endif __ PushReturnAddressFrom(ecx); } } @@ -3250,6 +3291,11 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ ret(0); } +void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { + // TODO(v8:10701): Implement for this platform. + __ Trap(); +} + namespace { // Generates an Operand for saving parameters after PrepareCallApiFunction. diff --git a/deps/v8/src/builtins/internal.tq b/deps/v8/src/builtins/internal.tq new file mode 100644 index 00000000000000..b6512da471d32e --- /dev/null +++ b/deps/v8/src/builtins/internal.tq @@ -0,0 +1,44 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace internal { + +namespace runtime { +extern runtime GetTemplateObject(implicit context: Context)( + TemplateObjectDescription, SharedFunctionInfo, Smi): JSAny; +extern runtime BytecodeBudgetInterruptFromCode(implicit context: Context)( + FeedbackCell): JSAny; +} + +builtin GetTemplateObject( + context: Context, shared: SharedFunctionInfo, + description: TemplateObjectDescription, slot: uintptr, + maybeFeedbackVector: FeedbackVector|Undefined): JSArray { + // TODO(jgruber): Consider merging with the GetTemplateObject bytecode + // handler; the current advantage of the split implementation is that the + // bytecode can skip most work if feedback exists. + + try { + const vector = + Cast(maybeFeedbackVector) otherwise CallRuntime; + return Cast(ic::LoadFeedbackVectorSlot(vector, slot)) + otherwise CallRuntime; + } label CallRuntime deferred { + const result = UnsafeCast(runtime::GetTemplateObject( + description, shared, Convert(Signed(slot)))); + const vector = + Cast(maybeFeedbackVector) otherwise return result; + ic::StoreFeedbackVectorSlot(vector, slot, result); + return result; + } +} + +builtin BytecodeBudgetInterruptFromCode(implicit context: Context)( + feedbackCell: FeedbackCell): Object { + // The runtime call is wrapped by a builtin since the calling sequence in + // generated code is shorter for builtins than for runtime calls. + tail runtime::BytecodeBudgetInterruptFromCode(feedbackCell); +} + +} // namespace internal diff --git a/deps/v8/src/builtins/math.tq b/deps/v8/src/builtins/math.tq index 0586f432f5b0ec..fbcf35fedc29ff 100644 --- a/deps/v8/src/builtins/math.tq +++ b/deps/v8/src/builtins/math.tq @@ -4,9 +4,6 @@ namespace math { -extern transitioning builtin -NonNumberToNumber(implicit context: Context)(HeapObject): Number; - transitioning macro ReduceToSmiOrFloat64(implicit context: Context)(x: JSAny): never labels SmiResult(Smi), Float64Result(float64) { @@ -20,7 +17,7 @@ transitioning macro ReduceToSmiOrFloat64(implicit context: Context)(x: JSAny): goto Float64Result(Convert(h)); } case (a: JSAnyNotNumber): { - x1 = NonNumberToNumber(a); + x1 = conversion::NonNumberToNumber(a); } } } @@ -29,6 +26,7 @@ transitioning macro ReduceToSmiOrFloat64(implicit context: Context)(x: JSAny): // ES6 #sec-math.abs extern macro IsIntPtrAbsWithOverflowSupported(): constexpr bool; +extern macro TrySmiAdd(Smi, Smi): Smi labels Overflow; extern macro TrySmiSub(Smi, Smi): Smi labels Overflow; extern macro TrySmiAbs(Smi): Smi labels Overflow; extern macro Float64Abs(float64): float64; @@ -439,19 +437,16 @@ extern macro RefillMathRandom(NativeContext): Smi; transitioning javascript builtin MathRandom(js-implicit context: NativeContext, receiver: JSAny)(): Number { - let smiIndex: Smi = - Cast(context[NativeContextSlot::MATH_RANDOM_INDEX_INDEX]) - otherwise unreachable; + let smiIndex: Smi = *NativeContextSlot(ContextSlot::MATH_RANDOM_INDEX_INDEX); if (smiIndex == 0) { // refill math random. smiIndex = RefillMathRandom(context); } const newSmiIndex: Smi = smiIndex - 1; - context[NativeContextSlot::MATH_RANDOM_INDEX_INDEX] = newSmiIndex; + *NativeContextSlot(ContextSlot::MATH_RANDOM_INDEX_INDEX) = newSmiIndex; - const array: FixedDoubleArray = Cast( - context[NativeContextSlot::MATH_RANDOM_CACHE_INDEX]) - otherwise unreachable; + const array: FixedDoubleArray = + *NativeContextSlot(ContextSlot::MATH_RANDOM_CACHE_INDEX); const random: float64 = array.floats[Convert(newSmiIndex)].ValueUnsafeAssumeNotHole(); return AllocateHeapNumberWithValue(random); diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index c98961f2ad4556..cefa88401d9aa8 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -2787,6 +2787,11 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ Ret(); } +void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { + // TODO(v8:10701): Implement for this platform. + __ Trap(); +} + namespace { int AddressOffset(ExternalReference ref0, ExternalReference ref1) { diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index babe084bb057ce..8093d102116b2e 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -2828,6 +2828,11 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ Ret(); } +void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { + // TODO(v8:10701): Implement for this platform. + __ Trap(); +} + namespace { int AddressOffset(ExternalReference ref0, ExternalReference ref1) { diff --git a/deps/v8/src/builtins/number.tq b/deps/v8/src/builtins/number.tq index 98680cf5533c5f..f51f5a2c8be3bd 100644 --- a/deps/v8/src/builtins/number.tq +++ b/deps/v8/src/builtins/number.tq @@ -2,9 +2,48 @@ // source code is governed by a BSD-style license that can be found in the // LICENSE file. +#include 'src/ic/binary-op-assembler.h' + +extern enum Operation extends uint31 { + // Binary operations. + kAdd, + kSubtract, + kMultiply, + kDivide, + kModulus, + kExponentiate, + kBitwiseAnd, + kBitwiseOr, + kBitwiseXor, + kShiftLeft, + kShiftRight, + kShiftRightLogical, + // Unary operations. + kBitwiseNot, + kNegate, + kIncrement, + kDecrement, + // Compare operations. + kEqual, + kStrictEqual, + kLessThan, + kLessThanOrEqual, + kGreaterThan, + kGreaterThanOrEqual +} + namespace runtime { extern transitioning runtime DoubleToStringWithRadix(implicit context: Context)(Number, Number): String; + +extern transitioning runtime StringParseFloat(implicit context: Context)( + String): Number; +extern transitioning runtime StringParseInt(implicit context: Context)( + JSAny, JSAny): Number; + +extern runtime BigIntUnaryOp(Context, BigInt, SmiTagged): BigInt; +extern runtime BigIntBinaryOp( + Context, Numeric, Numeric, SmiTagged): BigInt; } // namespace runtime namespace number { @@ -22,6 +61,61 @@ transitioning macro ThisNumberValue(implicit context: Context)( ToThisValue(receiver, PrimitiveType::kNumber, method)); } +macro ToCharCode(input: int32): char8 { + assert(0 <= input && input < 36); + return input < 10 ? %RawDownCast(input + kAsciiZero) : + %RawDownCast(input - 10 + kAsciiLowerCaseA); +} + +@export +macro NumberToStringSmi(x: int32, radix: int32): String labels Slow { + const isNegative: bool = x < 0; + let n: int32 = x; + if (!isNegative) { + // Fast case where the result is a one character string. + if (x < radix) { + return StringFromSingleCharCode(ToCharCode(n)); + } + } else { + assert(isNegative); + if (n == kMinInt32) { + goto Slow; + } + n = 0 - n; + } + + // Calculate length and pre-allocate the result string. + let temp: int32 = n; + let length: int32 = isNegative ? 1 : 0; + while (temp > 0) { + temp = temp / radix; + length = length + 1; + } + assert(length > 0); + const strSeq: SeqOneByteString = AllocateSeqOneByteString(Unsigned(length)); + let cursor: intptr = Convert(length) - 1; + while (n > 0) { + const digit: int32 = n % radix; + n = n / radix; + strSeq.chars[cursor] = ToCharCode(digit); + cursor = cursor - 1; + } + if (isNegative) { + assert(cursor == 0); + // Insert '-' to result. + strSeq.chars[0] = 45; + } else { + assert(cursor == -1); + // In sync with Factory::SmiToString: If radix = 10 and positive number, + // update hash for string. + if (radix == 10) { + assert(strSeq.hash_field == kNameEmptyHashField); + strSeq.hash_field = MakeArrayIndexHash(Unsigned(x), Unsigned(length)); + } + } + return strSeq; +} + // https://tc39.github.io/ecma262/#sec-number.prototype.tostring transitioning javascript builtin NumberPrototypeToString( js-implicit context: NativeContext, receiver: JSAny)(...arguments): String { @@ -47,20 +141,14 @@ transitioning javascript builtin NumberPrototypeToString( // 7. Return the String representation of this Number // value using the radix specified by radixNumber. - // Fast case where the result is a one character string. - if (TaggedIsPositiveSmi(x) && x < radixNumber) { - let charCode = Convert(UnsafeCast(x)); - if (charCode < 10) { - charCode += kAsciiZero; - } else { - charCode = charCode - 10 + kAsciiLowerCaseA; - } - return StringFromSingleCharCode(charCode); + if (TaggedIsSmi(x)) { + return NumberToStringSmi(Convert(x), Convert(radixNumber)) + otherwise return runtime::DoubleToStringWithRadix(x, radixNumber); } if (x == -0) { return ZeroStringConstant(); - } else if (NumberIsNaN(x)) { + } else if (::NumberIsNaN(x)) { return NaNStringConstant(); } else if (x == V8_INFINITY) { return InfinityStringConstant(); @@ -70,4 +158,640 @@ transitioning javascript builtin NumberPrototypeToString( return runtime::DoubleToStringWithRadix(x, radixNumber); } + +// ES6 #sec-number.isfinite +javascript builtin NumberIsFinite( + js-implicit context: NativeContext, + receiver: JSAny)(value: JSAny): Boolean { + typeswitch (value) { + case (Smi): { + return True; + } + case (h: HeapNumber): { + const number: float64 = Convert(h); + const infiniteOrNaN: bool = Float64IsNaN(number - number); + return Convert(!infiniteOrNaN); + } + case (JSAnyNotNumber): { + return False; + } + } +} + +// ES6 #sec-number.isinteger +javascript builtin NumberIsInteger(js-implicit context: NativeContext)( + value: JSAny): Boolean { + return SelectBooleanConstant(IsInteger(value)); +} + +// ES6 #sec-number.isnan +javascript builtin NumberIsNaN(js-implicit context: NativeContext)( + value: JSAny): Boolean { + typeswitch (value) { + case (Smi): { + return False; + } + case (h: HeapNumber): { + const number: float64 = Convert(h); + return Convert(Float64IsNaN(number)); + } + case (JSAnyNotNumber): { + return False; + } + } +} + +// ES6 #sec-number.issafeinteger +javascript builtin NumberIsSafeInteger(js-implicit context: NativeContext)( + value: JSAny): Boolean { + return SelectBooleanConstant(IsSafeInteger(value)); +} + +// ES6 #sec-number.prototype.valueof +transitioning javascript builtin NumberPrototypeValueOf( + js-implicit context: NativeContext, receiver: JSAny)(): JSAny { + return ToThisValue( + receiver, PrimitiveType::kNumber, 'Number.prototype.valueOf'); +} + +// ES6 #sec-number.parsefloat +transitioning javascript builtin NumberParseFloat( + js-implicit context: NativeContext)(value: JSAny): Number { + try { + typeswitch (value) { + case (s: Smi): { + return s; + } + case (h: HeapNumber): { + // The input is already a Number. Take care of -0. + // The sense of comparison is important for the NaN case. + return (Convert(h) == 0) ? SmiConstant(0) : h; + } + case (s: String): { + goto String(s); + } + case (HeapObject): { + goto String(string::ToString(context, value)); + } + } + } label String(s: String) { + // Check if the string is a cached array index. + const hash: NameHash = s.hash_field; + if (!hash.is_not_integer_index_mask && + hash.array_index_length < kMaxCachedArrayIndexLength) { + const arrayIndex: uint32 = hash.array_index_value; + return SmiFromUint32(arrayIndex); + } + // Fall back to the runtime to convert string to a number. + return runtime::StringParseFloat(s); + } +} + +extern macro TruncateFloat64ToWord32(float64): uint32; + +transitioning builtin ParseInt(implicit context: Context)( + input: JSAny, radix: JSAny): Number { + try { + // Check if radix should be 10 (i.e. undefined, 0 or 10). + if (radix != Undefined && !TaggedEqual(radix, SmiConstant(10)) && + !TaggedEqual(radix, SmiConstant(0))) { + goto CallRuntime; + } + + typeswitch (input) { + case (s: Smi): { + return s; + } + case (h: HeapNumber): { + // Check if the input value is in Signed32 range. + const asFloat64: float64 = Convert(h); + const asInt32: int32 = Signed(TruncateFloat64ToWord32(asFloat64)); + // The sense of comparison is important for the NaN case. + if (asFloat64 == ChangeInt32ToFloat64(asInt32)) goto Int32(asInt32); + + // Check if the absolute value of input is in the [1,1<<31[ range. Call + // the runtime for the range [0,1[ because the result could be -0. + const kMaxAbsValue: float64 = 2147483648.0; + const absInput: float64 = math::Float64Abs(asFloat64); + if (absInput < kMaxAbsValue && absInput >= 1) goto Int32(asInt32); + goto CallRuntime; + } + case (s: String): { + goto String(s); + } + case (HeapObject): { + goto CallRuntime; + } + } + } label Int32(i: int32) { + return ChangeInt32ToTagged(i); + } label String(s: String) { + // Check if the string is a cached array index. + const hash: NameHash = s.hash_field; + if (!hash.is_not_integer_index_mask && + hash.array_index_length < kMaxCachedArrayIndexLength) { + const arrayIndex: uint32 = hash.array_index_value; + return SmiFromUint32(arrayIndex); + } + // Fall back to the runtime. + goto CallRuntime; + } label CallRuntime { + tail runtime::StringParseInt(input, radix); + } +} + +// ES6 #sec-number.parseint +transitioning javascript builtin NumberParseInt( + js-implicit context: NativeContext)(value: JSAny, radix: JSAny): Number { + return ParseInt(value, radix); +} + +extern builtin NonNumberToNumeric(implicit context: Context)(JSAny): Numeric; +extern builtin BitwiseXor(implicit context: Context)(Number, Number): Number; +extern builtin Subtract(implicit context: Context)(Number, Number): Number; +extern builtin Add(implicit context: Context)(Number, Number): Number; +extern builtin StringAddConvertLeft(implicit context: Context)( + JSAny, String): JSAny; +extern builtin StringAddConvertRight(implicit context: Context)( + String, JSAny): JSAny; + +extern macro BitwiseOp(int32, int32, constexpr Operation): Number; +extern macro RelationalComparison( + constexpr Operation, JSAny, JSAny, Context): Boolean; + +// TODO(bbudge) Use a simpler macro structure that doesn't loop when converting +// non-numbers, if such a code sequence doesn't make the builtin bigger. + +transitioning macro ToNumericOrPrimitive(implicit context: Context)( + value: JSAny): JSAny { + typeswitch (value) { + case (v: JSReceiver): { + return NonPrimitiveToPrimitive_Default(context, v); + } + case (v: JSPrimitive): { + return NonNumberToNumeric(v); + } + } +} + +transitioning builtin Add(implicit context: Context)( + leftArg: JSAny, rightArg: JSAny): JSAny { + let left: JSAny = leftArg; + let right: JSAny = rightArg; + try { + while (true) { + typeswitch (left) { + case (left: Smi): { + typeswitch (right) { + case (right: Smi): { + return math::TrySmiAdd(left, right) otherwise goto Float64s( + SmiToFloat64(left), SmiToFloat64(right)); + } + case (right: HeapNumber): { + goto Float64s(SmiToFloat64(left), Convert(right)); + } + case (right: BigInt): { + goto Numerics(left, right); + } + case (right: String): { + goto StringAddConvertLeft(left, right); + } + case (HeapObject): { + right = ToNumericOrPrimitive(right); + continue; + } + } + } + case (left: HeapNumber): { + typeswitch (right) { + case (right: Smi): { + goto Float64s(Convert(left), SmiToFloat64(right)); + } + case (right: HeapNumber): { + goto Float64s(Convert(left), Convert(right)); + } + case (right: BigInt): { + goto Numerics(left, right); + } + case (right: String): { + goto StringAddConvertLeft(left, right); + } + case (HeapObject): { + right = ToNumericOrPrimitive(right); + continue; + } + } + } + case (left: BigInt): { + typeswitch (right) { + case (right: Numeric): { + goto Numerics(left, right); + } + case (right: String): { + goto StringAddConvertLeft(left, right); + } + case (HeapObject): { + right = ToNumericOrPrimitive(right); + continue; + } + } + } + case (left: String): { + goto StringAddConvertRight(left, right); + } + case (leftReceiver: JSReceiver): { + left = ToPrimitiveDefault(leftReceiver); + } + case (HeapObject): { + // left: HeapObject + typeswitch (right) { + case (right: String): { + goto StringAddConvertLeft(left, right); + } + case (rightReceiver: JSReceiver): { + // left is JSPrimitive and right is JSReceiver, convert right + // with priority. + right = ToPrimitiveDefault(rightReceiver); + continue; + } + case (JSPrimitive): { + // Neither left or right is JSReceiver, convert left. + left = NonNumberToNumeric(left); + continue; + } + } + } + } + } + } label StringAddConvertLeft(left: JSAny, right: String) { + tail StringAddConvertLeft(left, right); + } label StringAddConvertRight(left: String, right: JSAny) { + tail StringAddConvertRight(left, right); + } label Numerics(left: Numeric, right: Numeric) { + tail bigint::BigIntAdd(left, right); + } label Float64s(left: float64, right: float64) { + return AllocateHeapNumberWithValue(left + right); + } + unreachable; +} + +// Unary type switch on Number | BigInt. +macro UnaryOp1(implicit context: Context)(value: JSAny): never labels +Number(Number), BigInt(BigInt) { + let x: JSAny = value; + while (true) { + typeswitch (x) { + case (n: Number): { + goto Number(n); + } + case (b: BigInt): { + goto BigInt(b); + } + case (JSAnyNotNumeric): { + x = NonNumberToNumeric(x); + } + } + } + unreachable; +} + +// Unary type switch on Smi | HeapNumber | BigInt. +macro UnaryOp2(implicit context: Context)(value: JSAny): never labels +Smi(Smi), HeapNumber(HeapNumber), BigInt(BigInt) { + let x: JSAny = value; + while (true) { + typeswitch (x) { + case (s: Smi): { + goto Smi(s); + } + case (h: HeapNumber): { + goto HeapNumber(h); + } + case (b: BigInt): { + goto BigInt(b); + } + case (JSAnyNotNumeric): { + x = NonNumberToNumeric(x); + } + } + } + unreachable; +} + +// Binary type switch on Number | BigInt. +macro BinaryOp1(implicit context: Context)( + leftVal: JSAny, rightVal: JSAny): never labels +Number(Number, Number), AtLeastOneBigInt(Numeric, Numeric) { + let left: JSAny = leftVal; + let right: JSAny = rightVal; + while (true) { + try { + typeswitch (left) { + case (left: Number): { + typeswitch (right) { + case (right: Number): { + goto Number(left, right); + } + case (right: BigInt): { + goto AtLeastOneBigInt(left, right); + } + case (JSAnyNotNumeric): { + goto RightNotNumeric; + } + } + } + case (left: BigInt): { + typeswitch (right) { + case (right: Numeric): { + goto AtLeastOneBigInt(left, right); + } + case (JSAnyNotNumeric): { + goto RightNotNumeric; + } + } + } + case (JSAnyNotNumeric): { + left = NonNumberToNumeric(left); + } + } + } label RightNotNumeric { + right = NonNumberToNumeric(right); + } + } + unreachable; +} + +// Binary type switch on Smi | HeapNumber | BigInt. +macro BinaryOp2(implicit context: Context)(leftVal: JSAny, rightVal: JSAny): + never labels Smis(Smi, Smi), Float64s(float64, float64), + AtLeastOneBigInt(Numeric, Numeric) { + let left: JSAny = leftVal; + let right: JSAny = rightVal; + while (true) { + try { + typeswitch (left) { + case (left: Smi): { + typeswitch (right) { + case (right: Smi): { + goto Smis(left, right); + } + case (right: HeapNumber): { + goto Float64s(SmiToFloat64(left), Convert(right)); + } + case (right: BigInt): { + goto AtLeastOneBigInt(left, right); + } + case (JSAnyNotNumeric): { + goto RightNotNumeric; + } + } + } + case (left: HeapNumber): { + typeswitch (right) { + case (right: Smi): { + goto Float64s(Convert(left), SmiToFloat64(right)); + } + case (right: HeapNumber): { + goto Float64s(Convert(left), Convert(right)); + } + case (right: BigInt): { + goto AtLeastOneBigInt(left, right); + } + case (JSAnyNotNumeric): { + goto RightNotNumeric; + } + } + } + case (left: BigInt): { + typeswitch (right) { + case (right: Numeric): { + goto AtLeastOneBigInt(left, right); + } + case (JSAnyNotNumeric): { + goto RightNotNumeric; + } + } + } + case (JSAnyNotNumeric): { + left = NonNumberToNumeric(left); + } + } + } label RightNotNumeric { + right = NonNumberToNumeric(right); + } + } + unreachable; +} + +builtin Subtract(implicit context: Context)( + left: JSAny, right: JSAny): Numeric { + try { + BinaryOp2(left, right) otherwise Smis, Float64s, AtLeastOneBigInt; + } label Smis(left: Smi, right: Smi) { + try { + return math::TrySmiSub(left, right) otherwise Overflow; + } label Overflow { + goto Float64s(SmiToFloat64(left), SmiToFloat64(right)); + } + } label Float64s(left: float64, right: float64) { + return AllocateHeapNumberWithValue(left - right); + } label AtLeastOneBigInt(left: Numeric, right: Numeric) { + tail bigint::BigIntSubtract(left, right); + } +} + +builtin Multiply(implicit context: Context)( + left: JSAny, right: JSAny): Numeric { + try { + BinaryOp2(left, right) otherwise Smis, Float64s, AtLeastOneBigInt; + } label Smis(left: Smi, right: Smi) { + // The result is not necessarily a smi, in case of overflow. + return SmiMul(left, right); + } label Float64s(left: float64, right: float64) { + return AllocateHeapNumberWithValue(left * right); + } label AtLeastOneBigInt(left: Numeric, right: Numeric) { + tail runtime::BigIntBinaryOp( + context, left, right, SmiTag(Operation::kMultiply)); + } +} + +const kSmiValueSize: constexpr int32 generates 'kSmiValueSize'; +const kMinInt32: constexpr int32 generates 'kMinInt'; +const kMinInt31: constexpr int32 generates 'kMinInt31'; +const kMinimumDividend: int32 = (kSmiValueSize == 32) ? kMinInt32 : kMinInt31; + +builtin Divide(implicit context: Context)(left: JSAny, right: JSAny): Numeric { + try { + BinaryOp2(left, right) otherwise Smis, Float64s, AtLeastOneBigInt; + } label Smis(left: Smi, right: Smi) { + // TODO(jkummerow): Consider just always doing a double division. + // Bail out if {divisor} is zero. + if (right == 0) goto SmiBailout(left, right); + + // Bail out if dividend is zero and divisor is negative. + if (left == 0 && right < 0) goto SmiBailout(left, right); + + const dividend: int32 = SmiToInt32(left); + const divisor: int32 = SmiToInt32(right); + + // Bail out if dividend is kMinInt31 (or kMinInt32 if Smis are 32 bits) + // and divisor is -1. + if (divisor == -1 && dividend == kMinimumDividend) { + goto SmiBailout(left, right); + } + // TODO(epertoso): consider adding a machine instruction that returns + // both the result and the remainder. + const result: int32 = dividend / divisor; + const truncated: int32 = result * divisor; + if (dividend != truncated) goto SmiBailout(left, right); + return SmiFromInt32(result); + } label SmiBailout(left: Smi, right: Smi) { + goto Float64s(SmiToFloat64(left), SmiToFloat64(right)); + } label Float64s(left: float64, right: float64) { + return AllocateHeapNumberWithValue(left / right); + } label AtLeastOneBigInt(left: Numeric, right: Numeric) { + tail runtime::BigIntBinaryOp( + context, left, right, SmiTag(Operation::kDivide)); + } +} + +builtin Modulus(implicit context: Context)(left: JSAny, right: JSAny): Numeric { + try { + BinaryOp2(left, right) otherwise Smis, Float64s, AtLeastOneBigInt; + } label Smis(left: Smi, right: Smi) { + return SmiMod(left, right); + } label Float64s(left: float64, right: float64) { + return AllocateHeapNumberWithValue(left % right); + } label AtLeastOneBigInt(left: Numeric, right: Numeric) { + tail runtime::BigIntBinaryOp( + context, left, right, SmiTag(Operation::kModulus)); + } +} + +builtin Exponentiate(implicit context: Context)( + left: JSAny, right: JSAny): Numeric { + try { + BinaryOp1(left, right) otherwise Numbers, AtLeastOneBigInt; + } label Numbers(left: Number, right: Number) { + return math::MathPowImpl(left, right); + } label AtLeastOneBigInt(left: Numeric, right: Numeric) { + tail runtime::BigIntBinaryOp( + context, left, right, SmiTag(Operation::kExponentiate)); + } +} + +builtin Negate(implicit context: Context)(value: JSAny): Numeric { + try { + UnaryOp2(value) otherwise Smi, HeapNumber, BigInt; + } label Smi(s: Smi) { + return SmiMul(s, -1); + } label HeapNumber(h: HeapNumber) { + return AllocateHeapNumberWithValue(Convert(h) * -1); + } label BigInt(b: BigInt) { + tail runtime::BigIntUnaryOp( + context, b, SmiTag(Operation::kNegate)); + } +} + +builtin BitwiseNot(implicit context: Context)(value: JSAny): Numeric { + try { + UnaryOp1(value) otherwise Number, BigInt; + } label Number(n: Number) { + tail BitwiseXor(n, -1); + } label BigInt(b: BigInt) { + return runtime::BigIntUnaryOp( + context, b, SmiTag(Operation::kBitwiseNot)); + } +} + +builtin Decrement(implicit context: Context)(value: JSAny): Numeric { + try { + UnaryOp1(value) otherwise Number, BigInt; + } label Number(n: Number) { + tail Subtract(n, 1); + } label BigInt(b: BigInt) { + return runtime::BigIntUnaryOp( + context, b, SmiTag(Operation::kDecrement)); + } +} + +builtin Increment(implicit context: Context)(value: JSAny): Numeric { + try { + UnaryOp1(value) otherwise Number, BigInt; + } label Number(n: Number) { + tail Add(n, 1); + } label BigInt(b: BigInt) { + return runtime::BigIntUnaryOp( + context, b, SmiTag(Operation::kIncrement)); + } +} + +// Bitwise binary operations. + +extern macro BinaryOpAssembler::Generate_BitwiseBinaryOp( + constexpr Operation, JSAny, JSAny, Context): Object; + +builtin ShiftLeft(implicit context: Context)( + left: JSAny, right: JSAny): Object { + return Generate_BitwiseBinaryOp(Operation::kShiftLeft, left, right, context); +} + +builtin ShiftRight(implicit context: Context)( + left: JSAny, right: JSAny): Object { + return Generate_BitwiseBinaryOp(Operation::kShiftRight, left, right, context); } + +builtin ShiftRightLogical(implicit context: Context)( + left: JSAny, right: JSAny): Object { + return Generate_BitwiseBinaryOp( + Operation::kShiftRightLogical, left, right, context); +} + +builtin BitwiseAnd(implicit context: Context)( + left: JSAny, right: JSAny): Object { + return Generate_BitwiseBinaryOp(Operation::kBitwiseAnd, left, right, context); +} + +builtin BitwiseOr(implicit context: Context)( + left: JSAny, right: JSAny): Object { + return Generate_BitwiseBinaryOp(Operation::kBitwiseOr, left, right, context); +} + +builtin BitwiseXor(implicit context: Context)( + left: JSAny, right: JSAny): Object { + return Generate_BitwiseBinaryOp(Operation::kBitwiseXor, left, right, context); +} + +// Relational builtins. + +builtin LessThan(implicit context: Context)(left: JSAny, right: JSAny): Object { + return RelationalComparison(Operation::kLessThan, left, right, context); +} + +builtin LessThanOrEqual(implicit context: Context)( + left: JSAny, right: JSAny): Object { + return RelationalComparison( + Operation::kLessThanOrEqual, left, right, context); +} + +builtin GreaterThan(implicit context: Context)( + left: JSAny, right: JSAny): Object { + return RelationalComparison(Operation::kGreaterThan, left, right, context); +} + +builtin GreaterThanOrEqual(implicit context: Context)( + left: JSAny, right: JSAny): Object { + return RelationalComparison( + Operation::kGreaterThanOrEqual, left, right, context); +} + +builtin Equal(implicit context: Context)(left: JSAny, right: JSAny): Object { + return Equal(left, right, context); +} + +builtin StrictEqual(implicit context: Context)( + left: JSAny, right: JSAny): Object { + return ::StrictEqual(left, right); +} + +} // namespace number diff --git a/deps/v8/src/builtins/object.tq b/deps/v8/src/builtins/object.tq index 931972024cf97f..9dae15c92a786a 100644 --- a/deps/v8/src/builtins/object.tq +++ b/deps/v8/src/builtins/object.tq @@ -92,22 +92,19 @@ ObjectSetPrototypeOfDontThrow(implicit context: Context)( transitioning builtin CreateObjectWithoutProperties(implicit context: Context)( prototype: JSAny): JSAny { - const nativeContext = LoadNativeContext(context); - try { let map: Map; let properties: NameDictionary|EmptyFixedArray; typeswitch (prototype) { case (Null): { - map = UnsafeCast( - nativeContext - [NativeContextSlot::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP]); + map = *NativeContextSlot( + ContextSlot::SLOW_OBJECT_WITH_NULL_PROTOTYPE_MAP); properties = AllocateNameDictionary(kNameDictionaryInitialCapacity); } case (prototype: JSReceiver): { properties = kEmptyFixedArray; - const objectFunction = UnsafeCast( - nativeContext[NativeContextSlot::OBJECT_FUNCTION_INDEX]); + const objectFunction = + *NativeContextSlot(ContextSlot::OBJECT_FUNCTION_INDEX); map = UnsafeCast(objectFunction.prototype_or_initial_map); if (prototype != map.prototype) { const prototypeInfo = prototype.map.PrototypeInfo() otherwise Runtime; diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index 367838f82cc87d..8b3bc956994003 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -2966,6 +2966,11 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ Ret(); } +void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { + // TODO(v8:10701): Implement for this platform. + __ Trap(); +} + namespace { static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { diff --git a/deps/v8/src/builtins/profile-data-reader.cc b/deps/v8/src/builtins/profile-data-reader.cc new file mode 100644 index 00000000000000..b07c9c21f2b84b --- /dev/null +++ b/deps/v8/src/builtins/profile-data-reader.cc @@ -0,0 +1,121 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/builtins/profile-data-reader.h" + +#include +#include +#include + +#include "src/base/lazy-instance.h" +#include "src/flags/flags.h" +#include "src/utils/utils.h" + +namespace v8 { +namespace internal { + +namespace { + +class ProfileDataFromFileInternal : public ProfileDataFromFile { + public: + bool hash_has_value() const { return hash_has_value_; } + + void set_hash(int hash) { + hash_ = hash; + hash_has_value_ = true; + } + + void AddCountToBlock(size_t block_id, uint32_t count) { + if (block_counts_by_id_.size() <= block_id) { + // std::vector initializes new data to zero when resizing. + block_counts_by_id_.resize(block_id + 1); + } + block_counts_by_id_[block_id] += count; + } + + private: + bool hash_has_value_ = false; +}; + +const std::unordered_map& +EnsureInitProfileData() { + static base::LeakyObject< + std::unordered_map> + data; + static bool initialized = false; + + if (initialized) return *data.get(); + initialized = true; + const char* filename = FLAG_turbo_profiling_log_file; + if (filename == nullptr) return *data.get(); + std::ifstream file(filename); + CHECK_WITH_MSG(file.good(), "Can't read log file"); + for (std::string line; std::getline(file, line);) { + std::string token; + std::istringstream line_stream(line); + if (!std::getline(line_stream, token, ',')) continue; + if (token == ProfileDataFromFileConstants::kBlockCounterMarker) { + // Any line starting with kBlockCounterMarker is a block usage count. + // As defined by Logger::BasicBlockCounterEvent, the format is: + // literal kBlockCounterMarker , builtin_name , block_id , usage_count + std::string builtin_name; + CHECK(std::getline(line_stream, builtin_name, ',')); + CHECK(std::getline(line_stream, token, ',')); + char* end = nullptr; + uint32_t id = static_cast(strtoul(token.c_str(), &end, 0)); + CHECK(errno == 0 && end != token.c_str()); + std::getline(line_stream, token, ','); + CHECK(line_stream.eof()); + uint32_t count = static_cast(strtoul(token.c_str(), &end, 0)); + CHECK(errno == 0 && end != token.c_str()); + ProfileDataFromFileInternal& counters_and_hash = + (*data.get())[builtin_name]; + // We allow concatenating data from several Isolates, so we might see the + // same block multiple times. Just sum them all. + counters_and_hash.AddCountToBlock(id, count); + } else if (token == ProfileDataFromFileConstants::kBuiltinHashMarker) { + // Any line starting with kBuiltinHashMarker is a function hash record. + // As defined by Logger::BuiltinHashEvent, the format is: + // literal kBuiltinHashMarker , builtin_name , hash + std::string builtin_name; + CHECK(std::getline(line_stream, builtin_name, ',')); + std::getline(line_stream, token, ','); + CHECK(line_stream.eof()); + char* end = nullptr; + int hash = static_cast(strtol(token.c_str(), &end, 0)); + CHECK(errno == 0 && end != token.c_str()); + ProfileDataFromFileInternal& counters_and_hash = + (*data.get())[builtin_name]; + // We allow concatenating data from several Isolates, but expect them all + // to be running the same build. Any file with mismatched hashes for a + // function is considered ill-formed. + CHECK_IMPLIES(counters_and_hash.hash_has_value(), + counters_and_hash.hash() == hash); + counters_and_hash.set_hash(hash); + } + } + for (const auto& pair : *data.get()) { + // Every function is required to have a hash in the log. + CHECK(pair.second.hash_has_value()); + } + if (data.get()->size() == 0) { + PrintF( + "No basic block counters were found in log file.\n" + "Did you build with v8_enable_builtins_profiling=true\n" + "and run with --turbo-profiling-log-builtins?\n"); + } + + return *data.get(); +} + +} // namespace + +const ProfileDataFromFile* ProfileDataFromFile::TryRead(const char* name) { + const auto& data = EnsureInitProfileData(); + auto it = data.find(name); + return it == data.end() ? nullptr : &it->second; +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/src/builtins/profile-data-reader.h b/deps/v8/src/builtins/profile-data-reader.h new file mode 100644 index 00000000000000..18490141d16f58 --- /dev/null +++ b/deps/v8/src/builtins/profile-data-reader.h @@ -0,0 +1,62 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BUILTINS_PROFILE_DATA_READER_H_ +#define V8_BUILTINS_PROFILE_DATA_READER_H_ + +#include +#include +#include + +namespace v8 { +namespace internal { + +class ProfileDataFromFile { + public: + // A hash of the function's Graph before scheduling. Allows us to avoid using + // profiling data if the function has been changed. + int hash() const { return hash_; } + + // Returns how many times the block with the given ID was executed during + // profiling. + uint32_t GetCounter(size_t block_id) const { + // The profile data is allowed to omit blocks which were never hit, so be + // careful to avoid out-of-bounds access. + return block_id < block_counts_by_id_.size() ? block_counts_by_id_[block_id] + : 0; + } + + // Load basic block profiling data for the builtin with the given name, if + // such data exists. The returned vector is indexed by block ID, and its + // values are the number of times each block was executed while profiling. + static const ProfileDataFromFile* TryRead(const char* name); + + protected: + int hash_ = 0; + + // How many times each block was executed, indexed by block ID. This vector + // may be shorter than the total number of blocks; any omitted block should be + // treated as a zero. + std::vector block_counts_by_id_; +}; + +// The following strings can't be static members of ProfileDataFromFile until +// C++ 17; see https://stackoverflow.com/q/8016780/839379 . So for now we use a +// namespace. +namespace ProfileDataFromFileConstants { + +// Any line in a v8.log beginning with this string represents a basic block +// counter. +static constexpr char kBlockCounterMarker[] = "block"; + +// Any line in a v8.log beginning with this string represents the hash of the +// function Graph for a builtin. +static constexpr char kBuiltinHashMarker[] = "builtin_hash"; + +} // namespace ProfileDataFromFileConstants + +} // namespace internal +} // namespace v8 + +#endif // V8_BUILTINS_PROFILE_DATA_READER_H_ diff --git a/deps/v8/src/builtins/promise-abstract-operations.tq b/deps/v8/src/builtins/promise-abstract-operations.tq index 9cf6da102b8eec..b7a1b571e6418b 100644 --- a/deps/v8/src/builtins/promise-abstract-operations.tq +++ b/deps/v8/src/builtins/promise-abstract-operations.tq @@ -24,8 +24,18 @@ PromiseRejectEventFromStack(implicit context: Context)(JSPromise, JSAny): JSAny; // https://tc39.es/ecma262/#sec-promise-abstract-operations namespace promise { + +extern macro PromiseForwardingHandlerSymbolConstant(): Symbol; +const kPromiseForwardingHandlerSymbol: Symbol = + PromiseForwardingHandlerSymbolConstant(); +extern macro PromiseHandledBySymbolConstant(): Symbol; +const kPromiseHandledBySymbol: Symbol = PromiseHandledBySymbolConstant(); +extern macro ResolveStringConstant(): String; +const kResolveString: String = ResolveStringConstant(); +extern macro IsPromiseResolveProtectorCellInvalid(): bool; + extern macro AllocateFunctionWithMapAndContext( - Map, SharedFunctionInfo, Context): JSFunction; + Map, SharedFunctionInfo, FunctionContext): JSFunction; extern macro PromiseReactionMapConstant(): Map; extern macro PromiseFulfillReactionJobTaskMapConstant(): Map; @@ -90,7 +100,7 @@ transitioning macro MorphAndEnqueuePromiseReaction(implicit context: Context)( primaryHandler = promiseReaction.fulfill_handler; secondaryHandler = promiseReaction.reject_handler; } else { - StaticAssert(reactionType == kPromiseReactionReject); + static_assert(reactionType == kPromiseReactionReject); primaryHandler = promiseReaction.reject_handler; secondaryHandler = promiseReaction.fulfill_handler; } @@ -104,26 +114,26 @@ transitioning macro MorphAndEnqueuePromiseReaction(implicit context: Context)( // Morph {current} from a PromiseReaction into a PromiseReactionJobTask // and schedule that on the microtask queue. We try to minimize the number // of stores here to avoid screwing up the store buffer. - StaticAssert( + static_assert( kPromiseReactionSize == kPromiseReactionJobTaskSizeOfAllPromiseReactionJobTasks); if constexpr (reactionType == kPromiseReactionFulfill) { - * UnsafeConstCast(& promiseReaction.map) = + *UnsafeConstCast(&promiseReaction.map) = PromiseFulfillReactionJobTaskMapConstant(); const promiseReactionJobTask = UnsafeCast(promiseReaction); promiseReactionJobTask.argument = argument; promiseReactionJobTask.context = handlerContext; EnqueueMicrotask(handlerContext, promiseReactionJobTask); - StaticAssert( + static_assert( kPromiseReactionFulfillHandlerOffset == kPromiseReactionJobTaskHandlerOffset); - StaticAssert( + static_assert( kPromiseReactionPromiseOrCapabilityOffset == kPromiseReactionJobTaskPromiseOrCapabilityOffset); } else { - StaticAssert(reactionType == kPromiseReactionReject); - * UnsafeConstCast(& promiseReaction.map) = + static_assert(reactionType == kPromiseReactionReject); + *UnsafeConstCast(&promiseReaction.map) = PromiseRejectReactionJobTaskMapConstant(); const promiseReactionJobTask = UnsafeCast(promiseReaction); @@ -131,7 +141,7 @@ transitioning macro MorphAndEnqueuePromiseReaction(implicit context: Context)( promiseReactionJobTask.context = handlerContext; promiseReactionJobTask.handler = primaryHandler; EnqueueMicrotask(handlerContext, promiseReactionJobTask); - StaticAssert( + static_assert( kPromiseReactionPromiseOrCapabilityOffset == kPromiseReactionJobTaskPromiseOrCapabilityOffset); } @@ -242,24 +252,33 @@ RejectPromise(implicit context: Context)( const kPromiseCapabilitySize: constexpr int31 generates 'PromiseCapability::kSize'; -const kPromiseBuiltinsCapabilitiesContextLength: constexpr int31 - generates 'PromiseBuiltins::kCapabilitiesContextLength'; -const kPromiseBuiltinsCapabilitySlot: constexpr ContextSlot - generates 'PromiseBuiltins::kCapabilitySlot'; -const kPromiseBuiltinsPromiseSlot: constexpr ContextSlot - generates 'PromiseBuiltins::kPromiseSlot'; -const kPromiseBuiltinsAlreadyResolvedSlot: constexpr ContextSlot - generates 'PromiseBuiltins::kAlreadyResolvedSlot'; -const kPromiseBuiltinsDebugEventSlot: constexpr ContextSlot - generates 'PromiseBuiltins::kDebugEventSlot'; + +type PromiseResolvingFunctionContext extends FunctionContext; +extern enum PromiseResolvingFunctionContextSlot extends intptr +constexpr 'PromiseBuiltins::PromiseResolvingFunctionContextSlot' { + kPromiseSlot: Slot, + kAlreadyResolvedSlot: Slot, + kDebugEventSlot: Slot, + kPromiseContextLength +} + +type PromiseCapabilitiesExecutorContext extends FunctionContext; +extern enum FunctionContextSlot extends intptr +constexpr 'PromiseBuiltins::FunctionContextSlot' { + kCapabilitySlot: Slot, + kCapabilitiesContextLength +} @export macro CreatePromiseCapabilitiesExecutorContext( - nativeContext: NativeContext, capability: PromiseCapability): Context { - const executorContext = AllocateSyntheticFunctionContext( - nativeContext, kPromiseBuiltinsCapabilitiesContextLength); - - executorContext[kPromiseBuiltinsCapabilitySlot] = capability; + nativeContext: NativeContext, capability: PromiseCapability): + PromiseCapabilitiesExecutorContext { + const executorContext = %RawDownCast( + AllocateSyntheticFunctionContext( + nativeContext, FunctionContextSlot::kCapabilitiesContextLength)); + + InitContextSlot( + executorContext, FunctionContextSlot::kCapabilitySlot, capability); return executorContext; } @@ -283,13 +302,12 @@ struct PromiseResolvingFunctions { @export macro CreatePromiseResolvingFunctions(implicit context: Context)( - promise: JSPromise, debugEvent: Object, nativeContext: NativeContext): + promise: JSPromise, debugEvent: Boolean, nativeContext: NativeContext): PromiseResolvingFunctions { const promiseContext = CreatePromiseResolvingFunctionsContext( promise, debugEvent, nativeContext); - const map = UnsafeCast( - nativeContext - [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]); + const map = *NativeContextSlot( + nativeContext, ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); const resolveInfo = PromiseCapabilityDefaultResolveSharedFunConstant(); const resolve: JSFunction = @@ -302,11 +320,10 @@ macro CreatePromiseResolvingFunctions(implicit context: Context)( transitioning macro InnerNewPromiseCapability(implicit context: Context)( - constructor: HeapObject, debugEvent: Object): PromiseCapability { + constructor: HeapObject, debugEvent: Boolean): PromiseCapability { const nativeContext = LoadNativeContext(context); - if (TaggedEqual( - constructor, - nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX])) { + if (constructor == + *NativeContextSlot(nativeContext, ContextSlot::PROMISE_FUNCTION_INDEX)) { const promise = NewJSPromise(); const pair = @@ -321,9 +338,10 @@ InnerNewPromiseCapability(implicit context: Context)( CreatePromiseCapabilitiesExecutorContext(nativeContext, capability); const executorInfo = PromiseGetCapabilitiesExecutorSharedFunConstant(); - const functionMap = UnsafeCast( - nativeContext - [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]); + const functionMap = + *NativeContextSlot( + nativeContext, + ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); const executor = AllocateFunctionWithMapAndContext( functionMap, executorInfo, executorContext); @@ -341,7 +359,7 @@ InnerNewPromiseCapability(implicit context: Context)( // https://tc39.es/ecma262/#sec-newpromisecapability transitioning builtin NewPromiseCapability(implicit context: Context)( - maybeConstructor: Object, debugEvent: Object): PromiseCapability { + maybeConstructor: Object, debugEvent: Boolean): PromiseCapability { typeswitch (maybeConstructor) { case (Smi): { ThrowTypeError(MessageTemplate::kNotConstructor, maybeConstructor); @@ -358,13 +376,15 @@ NewPromiseCapability(implicit context: Context)( // https://tc39.es/ecma262/#sec-promise-reject-functions transitioning javascript builtin PromiseCapabilityDefaultReject( - js-implicit context: NativeContext, receiver: JSAny)(reason: JSAny): JSAny { + js-implicit context: Context, receiver: JSAny)(reason: JSAny): JSAny { + const context = %RawDownCast(context); // 2. Let promise be F.[[Promise]]. - const promise = UnsafeCast(context[kPromiseBuiltinsPromiseSlot]); + const promise = + *ContextSlot(context, PromiseResolvingFunctionContextSlot::kPromiseSlot); // 3. Let alreadyResolved be F.[[AlreadyResolved]]. - const alreadyResolved = - UnsafeCast(context[kPromiseBuiltinsAlreadyResolvedSlot]); + const alreadyResolved = *ContextSlot( + context, PromiseResolvingFunctionContextSlot::kAlreadyResolvedSlot); // 4. If alreadyResolved.[[Value]] is true, return undefined. if (alreadyResolved == True) { @@ -372,25 +392,28 @@ PromiseCapabilityDefaultReject( } // 5. Set alreadyResolved.[[Value]] to true. - context[kPromiseBuiltinsAlreadyResolvedSlot] = True; + *ContextSlot( + context, PromiseResolvingFunctionContextSlot::kAlreadyResolvedSlot) = + True; // 6. Return RejectPromise(promise, reason). - const debugEvent = - UnsafeCast(context[kPromiseBuiltinsDebugEventSlot]); + const debugEvent = *ContextSlot( + context, PromiseResolvingFunctionContextSlot::kDebugEventSlot); return RejectPromise(promise, reason, debugEvent); } // https://tc39.es/ecma262/#sec-promise-resolve-functions transitioning javascript builtin PromiseCapabilityDefaultResolve( - js-implicit context: NativeContext, - receiver: JSAny)(resolution: JSAny): JSAny { + js-implicit context: Context, receiver: JSAny)(resolution: JSAny): JSAny { + const context = %RawDownCast(context); // 2. Let promise be F.[[Promise]]. - const promise = UnsafeCast(context[kPromiseBuiltinsPromiseSlot]); + const promise: JSPromise = + *ContextSlot(context, PromiseResolvingFunctionContextSlot::kPromiseSlot); // 3. Let alreadyResolved be F.[[AlreadyResolved]]. - const alreadyResolved = - UnsafeCast(context[kPromiseBuiltinsAlreadyResolvedSlot]); + const alreadyResolved: Boolean = *ContextSlot( + context, PromiseResolvingFunctionContextSlot::kAlreadyResolvedSlot); // 4. If alreadyResolved.[[Value]] is true, return undefined. if (alreadyResolved == True) { @@ -398,7 +421,9 @@ PromiseCapabilityDefaultResolve( } // 5. Set alreadyResolved.[[Value]] to true. - context[kPromiseBuiltinsAlreadyResolvedSlot] = True; + *ContextSlot( + context, PromiseResolvingFunctionContextSlot::kAlreadyResolvedSlot) = + True; // The rest of the logic (and the catch prediction) is // encapsulated in the dedicated ResolvePromise builtin. @@ -465,7 +490,7 @@ PromiseReject( const receiver = Cast(receiver) otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'PromiseReject'); - const promiseFun = context[NativeContextSlot::PROMISE_FUNCTION_INDEX]; + const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX); if (promiseFun == receiver) { const promise = NewJSPromise(PromiseState::kRejected, reason); runtime::PromiseRejectEventFromStack(promise, reason); @@ -488,11 +513,11 @@ const kPromiseExecutorAlreadyInvoked: constexpr MessageTemplate // https://tc39.es/ecma262/#sec-getcapabilitiesexecutor-functions transitioning javascript builtin -PromiseGetCapabilitiesExecutor( - js-implicit context: NativeContext, receiver: JSAny)( +PromiseGetCapabilitiesExecutor(js-implicit context: Context, receiver: JSAny)( resolve: JSAny, reject: JSAny): JSAny { - const capability = - UnsafeCast(context[kPromiseBuiltinsCapabilitySlot]); + const context = %RawDownCast(context); + const capability: PromiseCapability = + *ContextSlot(context, FunctionContextSlot::kCapabilitySlot); if (capability.resolve != Undefined || capability.reject != Undefined) deferred { ThrowTypeError(kPromiseExecutorAlreadyInvoked); @@ -503,6 +528,41 @@ PromiseGetCapabilitiesExecutor( return Undefined; } +macro IsPromiseResolveLookupChainIntact(implicit context: Context)( + nativeContext: NativeContext, constructor: JSReceiver): bool { + if (IsForceSlowPath()) return false; + const promiseFun = + *NativeContextSlot(nativeContext, ContextSlot::PROMISE_FUNCTION_INDEX); + return promiseFun == constructor && !IsPromiseResolveProtectorCellInvalid(); +} + +// https://tc39.es/ecma262/#sec-getpromiseresolve +transitioning macro GetPromiseResolve(implicit context: Context)( + nativeContext: NativeContext, constructor: Constructor): JSAny { + // 1. Assert: IsConstructor(constructor) is true. + + // We can skip the "resolve" lookup on {constructor} if it's the + // Promise constructor and the Promise.resolve protector is intact, + // as that guards the lookup path for the "resolve" property on the + // Promise constructor. In this case, promiseResolveFunction is undefined, + // and when CallResolve is called with it later, it will call Promise.resolve. + let promiseResolveFunction: JSAny = Undefined; + + if (!IsPromiseResolveLookupChainIntact(nativeContext, constructor)) { + let promiseResolve: JSAny; + + // 2. Let promiseResolve be ? Get(constructor, "resolve"). + promiseResolve = GetProperty(constructor, kResolveString); + + // 3. If IsCallable(promiseResolve) is false, throw a TypeError exception. + promiseResolveFunction = + Cast(promiseResolve) otherwise ThrowTypeError( + MessageTemplate::kCalledNonCallable, 'resolve'); + } + // 4. return promiseResolve. + return promiseResolveFunction; +} + transitioning macro CallResolve(implicit context: Context)( constructor: Constructor, resolve: JSAny, value: JSAny): JSAny { // Undefined can never be a valid value for the resolve function, diff --git a/deps/v8/src/builtins/promise-all-element-closure.tq b/deps/v8/src/builtins/promise-all-element-closure.tq index 0b870ea3b185bc..bf07ff622746db 100644 --- a/deps/v8/src/builtins/promise-all-element-closure.tq +++ b/deps/v8/src/builtins/promise-all-element-closure.tq @@ -21,8 +21,8 @@ struct PromiseAllSettledWrapResultAsFulfilledFunctor { // TODO(gsathya): Optimize the creation using a cached map to // prevent transitions here. // 9. Let obj be ! ObjectCreate(%ObjectPrototype%). - const objectFunction = UnsafeCast( - nativeContext[NativeContextSlot::OBJECT_FUNCTION_INDEX]); + const objectFunction = + *NativeContextSlot(nativeContext, ContextSlot::OBJECT_FUNCTION_INDEX); const objectFunctionMap = UnsafeCast(objectFunction.prototype_or_initial_map); const obj = AllocateJSObjectFromMap(objectFunctionMap); @@ -44,8 +44,8 @@ struct PromiseAllSettledWrapResultAsRejectedFunctor { // TODO(gsathya): Optimize the creation using a cached map to // prevent transitions here. // 9. Let obj be ! ObjectCreate(%ObjectPrototype%). - const objectFunction = UnsafeCast( - nativeContext[NativeContextSlot::OBJECT_FUNCTION_INDEX]); + const objectFunction = + *NativeContextSlot(nativeContext, ContextSlot::OBJECT_FUNCTION_INDEX); const objectFunctionMap = UnsafeCast(objectFunction.prototype_or_initial_map); const obj = AllocateJSObjectFromMap(objectFunctionMap); @@ -62,11 +62,15 @@ struct PromiseAllSettledWrapResultAsRejectedFunctor { extern macro LoadJSReceiverIdentityHash(Object): intptr labels IfNoHash; -extern enum PromiseAllResolveElementContextSlots extends int31 +type PromiseAllResolveElementContext extends FunctionContext; +extern enum PromiseAllResolveElementContextSlots extends intptr constexpr 'PromiseBuiltins::PromiseAllResolveElementContextSlots' { - kPromiseAllResolveElementRemainingSlot, - kPromiseAllResolveElementCapabilitySlot, - kPromiseAllResolveElementValuesArraySlot, + kPromiseAllResolveElementRemainingSlot: + Slot, + kPromiseAllResolveElementCapabilitySlot: + Slot, + kPromiseAllResolveElementValuesSlot: + Slot, kPromiseAllResolveElementLength } extern operator '[]=' macro StoreContextElement( @@ -81,27 +85,31 @@ const kPropertyArrayHashFieldMax: constexpr int31 generates 'PropertyArray::HashField::kMax'; transitioning macro PromiseAllResolveElementClosure( - implicit context: Context)( - value: JSAny, function: JSFunction, wrapResultFunctor: F): JSAny { + implicit context: PromiseAllResolveElementContext|NativeContext)( + value: JSAny, function: JSFunction, wrapResultFunctor: F, + hasResolveAndRejectClosures: constexpr bool): JSAny { // We use the {function}s context as the marker to remember whether this // resolve element closure was already called. It points to the resolve // element context (which is a FunctionContext) until it was called the // first time, in which case we make it point to the native context here // to mark this resolve element closure as done. - if (IsNativeContext(context)) deferred { + let promiseContext: PromiseAllResolveElementContext; + typeswitch (context) { + case (NativeContext): deferred { return Undefined; } + case (context: PromiseAllResolveElementContext): { + promiseContext = context; + } + } assert( - context.length == - PromiseAllResolveElementContextSlots::kPromiseAllResolveElementLength); - const nativeContext = LoadNativeContext(context); + promiseContext.length == + SmiTag(PromiseAllResolveElementContextSlots:: + kPromiseAllResolveElementLength)); + const nativeContext = LoadNativeContext(promiseContext); function.context = nativeContext; - // Update the value depending on whether Promise.all or - // Promise.allSettled is called. - const updatedValue = wrapResultFunctor.Call(nativeContext, value); - // Determine the index from the {function}. assert(kPropertyArrayNoHashSentinel == 0); const identityHash = @@ -109,54 +117,65 @@ transitioning macro PromiseAllResolveElementClosure( assert(identityHash > 0); const index = identityHash - 1; - // Check if we need to grow the [[ValuesArray]] to store {value} at {index}. - const valuesArray = UnsafeCast( - context[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementValuesArraySlot]); - const elements = UnsafeCast(valuesArray.elements); - const valuesLength = Convert(valuesArray.length); - if (index < valuesLength) { - // The {index} is in bounds of the {values_array}, - // just store the {value} and continue. - elements.objects[index] = updatedValue; - } else { - // Check if we need to grow the backing store. - const newLength = index + 1; - const elementsLength = elements.length_intptr; - if (index < elementsLength) { - // The {index} is within bounds of the {elements} backing store, so - // just store the {value} and update the "length" of the {values_array}. - valuesArray.length = Convert(newLength); - elements.objects[index] = updatedValue; - } else - deferred { - // We need to grow the backing store to fit the {index} as well. - const newElementsLength = IntPtrMin( - CalculateNewElementsCapacity(newLength), - kPropertyArrayHashFieldMax + 1); - assert(index < newElementsLength); - assert(elementsLength < newElementsLength); - const newElements = - ExtractFixedArray(elements, 0, elementsLength, newElementsLength); - newElements.objects[index] = updatedValue; - - // Update backing store and "length" on {values_array}. - valuesArray.elements = newElements; - valuesArray.length = Convert(newLength); + let remainingElementsCount = *ContextSlot( + promiseContext, + PromiseAllResolveElementContextSlots:: + kPromiseAllResolveElementRemainingSlot); + + let values = *ContextSlot( + promiseContext, + PromiseAllResolveElementContextSlots:: + kPromiseAllResolveElementValuesSlot); + const newCapacity = index + 1; + if (newCapacity > values.length_intptr) deferred { + // This happens only when the promises are resolved during iteration. + values = ExtractFixedArray(values, 0, values.length_intptr, newCapacity); + *ContextSlot( + promiseContext, + PromiseAllResolveElementContextSlots:: + kPromiseAllResolveElementValuesSlot) = values; + } + + // Promise.allSettled, for each input element, has both a resolve and a reject + // closure that share an [[AlreadyCalled]] boolean. That is, the input element + // can only be settled once: after resolve is called, reject returns early, + // and vice versa. Using {function}'s context as the marker only tracks + // per-closure instead of per-element. When the second resolve/reject closure + // is called on the same index, values.object[index] will already exist and + // will not be the hole value. In that case, return early. Everything up to + // this point is not yet observable to user code. This is not a problem for + // Promise.all since Promise.all has a single resolve closure (no reject) per + // element. + if (hasResolveAndRejectClosures) { + if (values.objects[index] != TheHole) deferred { + return Undefined; } } - let remainingElementsCount = - UnsafeCast(context[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementRemainingSlot]); + + // Update the value depending on whether Promise.all or + // Promise.allSettled is called. + const updatedValue = wrapResultFunctor.Call(nativeContext, value); + + values.objects[index] = updatedValue; + remainingElementsCount = remainingElementsCount - 1; - context[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementRemainingSlot] = remainingElementsCount; + check(remainingElementsCount >= 0); + + *ContextSlot( + promiseContext, + PromiseAllResolveElementContextSlots:: + kPromiseAllResolveElementRemainingSlot) = remainingElementsCount; if (remainingElementsCount == 0) { - const capability = UnsafeCast( - context[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementCapabilitySlot]); + const capability = *ContextSlot( + promiseContext, + PromiseAllResolveElementContextSlots:: + kPromiseAllResolveElementCapabilitySlot); const resolve = UnsafeCast(capability.resolve); - Call(context, resolve, Undefined, valuesArray); + const arrayMap = + *NativeContextSlot( + nativeContext, ContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX); + const valuesArray = NewJSArray(arrayMap, values); + Call(promiseContext, resolve, Undefined, valuesArray); } return Undefined; } @@ -165,23 +184,29 @@ transitioning javascript builtin PromiseAllResolveElementClosure( js-implicit context: Context, receiver: JSAny, target: JSFunction)(value: JSAny): JSAny { + const context = + %RawDownCast(context); return PromiseAllResolveElementClosure( - value, target, PromiseAllWrapResultAsFulfilledFunctor{}); + value, target, PromiseAllWrapResultAsFulfilledFunctor{}, false); } transitioning javascript builtin PromiseAllSettledResolveElementClosure( js-implicit context: Context, receiver: JSAny, target: JSFunction)(value: JSAny): JSAny { + const context = + %RawDownCast(context); return PromiseAllResolveElementClosure( - value, target, PromiseAllSettledWrapResultAsFulfilledFunctor{}); + value, target, PromiseAllSettledWrapResultAsFulfilledFunctor{}, true); } transitioning javascript builtin PromiseAllSettledRejectElementClosure( js-implicit context: Context, receiver: JSAny, target: JSFunction)(value: JSAny): JSAny { + const context = + %RawDownCast(context); return PromiseAllResolveElementClosure( - value, target, PromiseAllSettledWrapResultAsRejectedFunctor{}); + value, target, PromiseAllSettledWrapResultAsRejectedFunctor{}, true); } } diff --git a/deps/v8/src/builtins/promise-all.tq b/deps/v8/src/builtins/promise-all.tq index b7fad88f6fc891..41dee8b9e7682e 100644 --- a/deps/v8/src/builtins/promise-all.tq +++ b/deps/v8/src/builtins/promise-all.tq @@ -17,34 +17,38 @@ const kPromiseBuiltinsPromiseContextLength: constexpr int31 // was called already (we slap the native context onto the closure in that // case to mark it's done). macro CreatePromiseAllResolveElementContext(implicit context: Context)( - capability: PromiseCapability, nativeContext: NativeContext): Context { - // TODO(bmeurer): Manually fold this into a single allocation. - const arrayMap = UnsafeCast( - nativeContext[NativeContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX]); - const valuesArray = AllocateJSArray( - ElementsKind::PACKED_ELEMENTS, arrayMap, IntPtrConstant(0), - SmiConstant(0)); - const resolveContext = AllocateSyntheticFunctionContext( + capability: PromiseCapability, + nativeContext: NativeContext): PromiseAllResolveElementContext { + const resolveContext = %RawDownCast< + PromiseAllResolveElementContext>(AllocateSyntheticFunctionContext( nativeContext, - PromiseAllResolveElementContextSlots::kPromiseAllResolveElementLength); - resolveContext[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementRemainingSlot] = SmiConstant(1); - resolveContext[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementCapabilitySlot] = capability; - resolveContext[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementValuesArraySlot] = valuesArray; + PromiseAllResolveElementContextSlots::kPromiseAllResolveElementLength)); + InitContextSlot( + resolveContext, + PromiseAllResolveElementContextSlots:: + kPromiseAllResolveElementRemainingSlot, + 1); + InitContextSlot( + resolveContext, + PromiseAllResolveElementContextSlots:: + kPromiseAllResolveElementCapabilitySlot, + capability); + InitContextSlot( + resolveContext, + PromiseAllResolveElementContextSlots::kPromiseAllResolveElementValuesSlot, + kEmptyFixedArray); return resolveContext; } macro CreatePromiseAllResolveElementFunction(implicit context: Context)( - resolveElementContext: Context, index: Smi, nativeContext: NativeContext, + resolveElementContext: PromiseAllResolveElementContext, index: Smi, + nativeContext: NativeContext, resolveFunction: SharedFunctionInfo): JSFunction { assert(index > 0); assert(index < kPropertyArrayHashFieldMax); - const map = UnsafeCast( - nativeContext - [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]); + const map = *ContextSlot( + nativeContext, ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); const resolve = AllocateFunctionWithMapAndContext( map, resolveFunction, resolveElementContext); @@ -55,13 +59,24 @@ macro CreatePromiseAllResolveElementFunction(implicit context: Context)( @export macro CreatePromiseResolvingFunctionsContext(implicit context: Context)( - promise: JSPromise, debugEvent: Object, nativeContext: NativeContext): - Context { - const resolveContext = AllocateSyntheticFunctionContext( - nativeContext, kPromiseBuiltinsPromiseContextLength); - resolveContext[kPromiseBuiltinsPromiseSlot] = promise; - resolveContext[kPromiseBuiltinsAlreadyResolvedSlot] = False; - resolveContext[kPromiseBuiltinsDebugEventSlot] = debugEvent; + promise: JSPromise, debugEvent: Boolean, nativeContext: NativeContext): + PromiseResolvingFunctionContext { + const resolveContext = %RawDownCast( + AllocateSyntheticFunctionContext( + nativeContext, + PromiseResolvingFunctionContextSlot::kPromiseContextLength)); + InitContextSlot( + resolveContext, PromiseResolvingFunctionContextSlot::kPromiseSlot, + promise); + InitContextSlot( + resolveContext, PromiseResolvingFunctionContextSlot::kAlreadyResolvedSlot, + False); + InitContextSlot( + resolveContext, PromiseResolvingFunctionContextSlot::kDebugEventSlot, + debugEvent); + static_assert( + PromiseResolvingFunctionContextSlot::kPromiseContextLength == + ContextSlot::MIN_CONTEXT_SLOTS + 3); return resolveContext; } @@ -69,15 +84,17 @@ macro IsPromiseThenLookupChainIntact(implicit context: Context)( nativeContext: NativeContext, receiverMap: Map): bool { if (IsForceSlowPath()) return false; if (!IsJSPromiseMap(receiverMap)) return false; - if (receiverMap.prototype != - nativeContext[NativeContextSlot::PROMISE_PROTOTYPE_INDEX]) + if (receiverMap.prototype != *NativeContextSlot( + nativeContext, ContextSlot::PROMISE_PROTOTYPE_INDEX)) { return false; + } return !IsPromiseThenProtectorCellInvalid(); } struct PromiseAllResolveElementFunctor { macro Call(implicit context: Context)( - resolveElementContext: Context, nativeContext: NativeContext, index: Smi, + resolveElementContext: PromiseAllResolveElementContext, + nativeContext: NativeContext, index: Smi, _capability: PromiseCapability): Callable { return CreatePromiseAllResolveElementFunction( resolveElementContext, index, nativeContext, @@ -87,15 +104,17 @@ struct PromiseAllResolveElementFunctor { struct PromiseAllRejectElementFunctor { macro Call(implicit context: Context)( - _resolveElementContext: Context, _nativeContext: NativeContext, - _index: Smi, capability: PromiseCapability): Callable { + _resolveElementContext: PromiseAllResolveElementContext, + _nativeContext: NativeContext, _index: Smi, + capability: PromiseCapability): Callable { return UnsafeCast(capability.reject); } } struct PromiseAllSettledResolveElementFunctor { macro Call(implicit context: Context)( - resolveElementContext: Context, nativeContext: NativeContext, index: Smi, + resolveElementContext: PromiseAllResolveElementContext, + nativeContext: NativeContext, index: Smi, _capability: PromiseCapability): Callable { return CreatePromiseAllResolveElementFunction( resolveElementContext, index, nativeContext, @@ -105,7 +124,8 @@ struct PromiseAllSettledResolveElementFunctor { struct PromiseAllSettledRejectElementFunctor { macro Call(implicit context: Context)( - resolveElementContext: Context, nativeContext: NativeContext, index: Smi, + resolveElementContext: PromiseAllResolveElementContext, + nativeContext: NativeContext, index: Smi, _capability: PromiseCapability): Callable { return CreatePromiseAllResolveElementFunction( resolveElementContext, index, nativeContext, @@ -115,11 +135,11 @@ struct PromiseAllSettledRejectElementFunctor { transitioning macro PerformPromiseAll( implicit context: Context)( - constructor: JSReceiver, capability: PromiseCapability, - iter: iterator::IteratorRecord, createResolveElementFunctor: F1, + nativeContext: NativeContext, iter: iterator::IteratorRecord, + constructor: Constructor, capability: PromiseCapability, + promiseResolveFunction: JSAny, createResolveElementFunctor: F1, createRejectElementFunctor: F2): JSAny labels Reject(Object) { - const nativeContext = LoadNativeContext(context); const promise = capability.promise; const resolve = capability.resolve; const reject = capability.reject; @@ -135,167 +155,145 @@ Reject(Object) { let index: Smi = 1; - // We can skip the "resolve" lookup on {constructor} if it's the - // Promise constructor and the Promise.resolve protector is intact, - // as that guards the lookup path for the "resolve" property on the - // Promise constructor. - let promiseResolveFunction: JSAny = Undefined; try { - try { - if (!IsPromiseResolveLookupChainIntact(nativeContext, constructor)) { - let promiseResolve: JSAny; - - // 5. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`). - promiseResolve = GetProperty(constructor, kResolveString); - - // 6. If IsCallable(_promiseResolve_) is *false*, throw a *TypeError* - // exception. - promiseResolveFunction = - Cast(promiseResolve) otherwise ThrowTypeError( - MessageTemplate::kCalledNonCallable, 'resolve'); + const fastIteratorResultMap = *NativeContextSlot( + nativeContext, ContextSlot::ITERATOR_RESULT_MAP_INDEX); + while (true) { + let nextValue: JSAny; + try { + // Let next be IteratorStep(iteratorRecord.[[Iterator]]). + // If next is an abrupt completion, set iteratorRecord.[[Done]] to + // true. ReturnIfAbrupt(next). + const next: JSReceiver = iterator::IteratorStep( + iter, fastIteratorResultMap) otherwise goto Done; + + // Let nextValue be IteratorValue(next). + // If nextValue is an abrupt completion, set iteratorRecord.[[Done]] + // to true. + // ReturnIfAbrupt(nextValue). + nextValue = iterator::IteratorValue(next, fastIteratorResultMap); + } catch (e) { + goto Reject(e); + } + + // Check if we reached the limit. + if (index == kPropertyArrayHashFieldMax) { + // If there are too many elements (currently more than 2**21-1), + // raise a RangeError here (which is caught below and turned into + // a rejection of the resulting promise). We could gracefully handle + // this case as well and support more than this number of elements + // by going to a separate function and pass the larger indices via a + // separate context, but it doesn't seem likely that we need this, + // and it's unclear how the rest of the system deals with 2**21 live + // Promises anyway. + ThrowRangeError( + MessageTemplate::kTooManyElementsInPromiseCombinator, 'all'); } - const fastIteratorResultMap = UnsafeCast( - nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]); - while (true) { - let nextValue: JSAny; - try { - // Let next be IteratorStep(iteratorRecord.[[Iterator]]). - // If next is an abrupt completion, set iteratorRecord.[[Done]] to - // true. ReturnIfAbrupt(next). - const next: JSReceiver = iterator::IteratorStep( - iter, fastIteratorResultMap) otherwise goto Done; - - // Let nextValue be IteratorValue(next). - // If nextValue is an abrupt completion, set iteratorRecord.[[Done]] - // to true. - // ReturnIfAbrupt(nextValue). - nextValue = iterator::IteratorValue(next, fastIteratorResultMap); - } catch (e) { - goto Reject(e); - } - - // Check if we reached the limit. - if (index == kPropertyArrayHashFieldMax) { - // If there are too many elements (currently more than 2**21-1), - // raise a RangeError here (which is caught below and turned into - // a rejection of the resulting promise). We could gracefully handle - // this case as well and support more than this number of elements - // by going to a separate function and pass the larger indices via a - // separate context, but it doesn't seem likely that we need this, - // and it's unclear how the rest of the system deals with 2**21 live - // Promises anyway. - ThrowRangeError( - MessageTemplate::kTooManyElementsInPromiseCombinator, 'all'); - } - - // Set remainingElementsCount.[[Value]] to - // remainingElementsCount.[[Value]] + 1. - const remainingElementsCount = UnsafeCast( - resolveElementContext[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementRemainingSlot]); - resolveElementContext[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementRemainingSlot] = - remainingElementsCount + 1; - - // Let resolveElement be CreateBuiltinFunction(steps, - // « [[AlreadyCalled]], - // [[Index]], - // [[Values]], - // [[Capability]], - // [[RemainingElements]] - // »). - // Set resolveElement.[[AlreadyCalled]] to a Record { [[Value]]: false - // }. Set resolveElement.[[Index]] to index. Set - // resolveElement.[[Values]] to values. Set - // resolveElement.[[Capability]] to resultCapability. Set - // resolveElement.[[RemainingElements]] to remainingElementsCount. - const resolveElementFun = createResolveElementFunctor.Call( - resolveElementContext, nativeContext, index, capability); - const rejectElementFun = createRejectElementFunctor.Call( - resolveElementContext, nativeContext, index, capability); - - // We can skip the "resolve" lookup on the {constructor} as well as - // the "then" lookup on the result of the "resolve" call, and - // immediately chain continuation onto the {next_value} if: - // - // (a) The {constructor} is the intrinsic %Promise% function, and - // looking up "resolve" on {constructor} yields the initial - // Promise.resolve() builtin, and - // (b) the promise @@species protector cell is valid, meaning that - // no one messed with the Symbol.species property on any - // intrinsic promise or on the Promise.prototype, and - // (c) the {next_value} is a JSPromise whose [[Prototype]] field - // contains the intrinsic %PromisePrototype%, and - // (d) we're not running with async_hooks or DevTools enabled. - // - // In that case we also don't need to allocate a chained promise for - // the PromiseReaction (aka we can pass undefined to - // PerformPromiseThen), since this is only necessary for DevTools and - // PromiseHooks. - if (promiseResolveFunction != Undefined || - IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() || - IsPromiseSpeciesProtectorCellInvalid() || Is(nextValue) || - !IsPromiseThenLookupChainIntact( - nativeContext, UnsafeCast(nextValue).map)) { - // Let nextPromise be ? Call(constructor, _promiseResolve_, « - // nextValue »). - const nextPromise = CallResolve( - UnsafeCast(constructor), promiseResolveFunction, - nextValue); - - // Perform ? Invoke(nextPromise, "then", « resolveElement, - // resultCapability.[[Reject]] »). - const then = GetProperty(nextPromise, kThenString); - const thenResult = Call( - nativeContext, then, nextPromise, resolveElementFun, - rejectElementFun); - - // For catch prediction, mark that rejections here are - // semantically handled by the combined Promise. - if (IsDebugActive() && Is(thenResult)) deferred { - SetPropertyStrict( - context, thenResult, kPromiseHandledBySymbol, promise); - } - } else { - PerformPromiseThenImpl( - UnsafeCast(nextValue), resolveElementFun, - rejectElementFun, Undefined); - } - - // Set index to index + 1. - index += 1; + // Set remainingElementsCount.[[Value]] to + // remainingElementsCount.[[Value]] + 1. + *ContextSlot( + resolveElementContext, + PromiseAllResolveElementContextSlots:: + kPromiseAllResolveElementRemainingSlot) += 1; + + // Let resolveElement be CreateBuiltinFunction(steps, + // « [[AlreadyCalled]], + // [[Index]], + // [[Values]], + // [[Capability]], + // [[RemainingElements]] + // »). + // Set resolveElement.[[AlreadyCalled]] to a Record { [[Value]]: false + // }. Set resolveElement.[[Index]] to index. Set + // resolveElement.[[Values]] to values. Set + // resolveElement.[[Capability]] to resultCapability. Set + // resolveElement.[[RemainingElements]] to remainingElementsCount. + const resolveElementFun = createResolveElementFunctor.Call( + resolveElementContext, nativeContext, index, capability); + const rejectElementFun = createRejectElementFunctor.Call( + resolveElementContext, nativeContext, index, capability); + + // We can skip the "then" lookup on the result of the "resolve" call and + // immediately chain the continuation onto the {next_value} if: + // + // (a) The {constructor} is the intrinsic %Promise% function, and + // looking up "resolve" on {constructor} yields the initial + // Promise.resolve() builtin, and + // (b) the promise @@species protector cell is valid, meaning that + // no one messed with the Symbol.species property on any + // intrinsic promise or on the Promise.prototype, and + // (c) the {next_value} is a JSPromise whose [[Prototype]] field + // contains the intrinsic %PromisePrototype%, and + // (d) we're not running with async_hooks or DevTools enabled. + // + // In that case we also don't need to allocate a chained promise for + // the PromiseReaction (aka we can pass undefined to + // PerformPromiseThen), since this is only necessary for DevTools and + // PromiseHooks. + if (promiseResolveFunction != Undefined || + IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() || + IsPromiseSpeciesProtectorCellInvalid() || Is(nextValue) || + !IsPromiseThenLookupChainIntact( + nativeContext, UnsafeCast(nextValue).map)) { + // Let nextPromise be ? Call(constructor, _promiseResolve_, « + // nextValue »). + const nextPromise = + CallResolve(constructor, promiseResolveFunction, nextValue); + + // Perform ? Invoke(nextPromise, "then", « resolveElement, + // resultCapability.[[Reject]] »). + const then = GetProperty(nextPromise, kThenString); + const thenResult = Call( + nativeContext, then, nextPromise, resolveElementFun, + rejectElementFun); + + // For catch prediction, mark that rejections here are + // semantically handled by the combined Promise. + if (IsDebugActive() && Is(thenResult)) deferred { + SetPropertyStrict( + context, thenResult, kPromiseHandledBySymbol, promise); + } + } else { + PerformPromiseThenImpl( + UnsafeCast(nextValue), resolveElementFun, + rejectElementFun, Undefined); } - } catch (e) deferred { - iterator::IteratorCloseOnException(iter); - goto Reject(e); + + // Set index to index + 1. + index += 1; } + } catch (e) deferred { + iterator::IteratorCloseOnException(iter); + goto Reject(e); } label Done {} // Set iteratorRecord.[[Done]] to true. // Set remainingElementsCount.[[Value]] to // remainingElementsCount.[[Value]] - 1. - let remainingElementsCount = UnsafeCast( - resolveElementContext[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementRemainingSlot]); - remainingElementsCount -= 1; - resolveElementContext[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementRemainingSlot] = - remainingElementsCount; + const remainingElementsCount = -- *ContextSlot( + resolveElementContext, + PromiseAllResolveElementContextSlots:: + kPromiseAllResolveElementRemainingSlot); + + check(remainingElementsCount >= 0); + if (remainingElementsCount > 0) { - // Pre-allocate the backing store for the {values_array} to the desired - // capacity here. We may already have elements here in case of some - // fancy Thenable that calls the resolve callback immediately, so we need - // to handle that correctly here. - const valuesArray = UnsafeCast( - resolveElementContext[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementValuesArraySlot]); - const oldElements = UnsafeCast(valuesArray.elements); - const oldCapacity = oldElements.length_intptr; - const newCapacity = SmiUntag(index); + // Pre-allocate the backing store for the {values} to the desired + // capacity. We may already have elements in "values" - this happens + // when the Thenable calls the resolve callback immediately. + const valuesRef:&FixedArray = ContextSlot( + resolveElementContext, + PromiseAllResolveElementContextSlots:: + kPromiseAllResolveElementValuesSlot); + const values = *valuesRef; + // 'index' is a 1-based index and incremented after every Promise. Later we + // use 'values' as a 0-based array, so capacity 'index - 1' is enough. + const newCapacity = SmiUntag(index) - 1; + + const oldCapacity = values.length_intptr; if (oldCapacity < newCapacity) { - valuesArray.elements = - ExtractFixedArray(oldElements, 0, oldCapacity, newCapacity); + *valuesRef = ExtractFixedArray(values, 0, oldCapacity, newCapacity); } } else deferred { @@ -303,10 +301,15 @@ Reject(Object) { // Let valuesArray be CreateArrayFromList(values). // Perform ? Call(resultCapability.[[Resolve]], undefined, // « valuesArray »). - assert(remainingElementsCount == 0); - const valuesArray = UnsafeCast( - resolveElementContext[PromiseAllResolveElementContextSlots:: - kPromiseAllResolveElementValuesArraySlot]); + + const values: FixedArray = *ContextSlot( + resolveElementContext, + PromiseAllResolveElementContextSlots:: + kPromiseAllResolveElementValuesSlot); + const arrayMap = + *NativeContextSlot( + nativeContext, ContextSlot::JS_ARRAY_PACKED_ELEMENTS_MAP_INDEX); + const valuesArray = NewJSArray(arrayMap, values); Call(nativeContext, UnsafeCast(resolve), Undefined, valuesArray); } @@ -318,6 +321,7 @@ transitioning macro GeneratePromiseAll( implicit context: Context)( receiver: JSAny, iterable: JSAny, createResolveElementFunctor: F1, createRejectElementFunctor: F2): JSAny { + const nativeContext = LoadNativeContext(context); // Let C be the this value. // If Type(C) is not Object, throw a TypeError exception. const receiver = Cast(receiver) @@ -328,7 +332,16 @@ transitioning macro GeneratePromiseAll( // not trigger redundant ExceptionEvents const capability = NewPromiseCapability(receiver, False); + // NewPromiseCapability guarantees that receiver is Constructor. + assert(Is(receiver)); + const constructor = UnsafeCast(receiver); + try { + // Let promiseResolve be GetPromiseResolve(C). + // IfAbruptRejectPromise(promiseResolve, promiseCapability). + const promiseResolveFunction = + GetPromiseResolve(nativeContext, constructor); + // Let iterator be GetIterator(iterable). // IfAbruptRejectPromise(iterator, promiseCapability). let i = iterator::GetIterator(iterable); @@ -339,8 +352,9 @@ transitioning macro GeneratePromiseAll( // IteratorClose(iterator, result). // IfAbruptRejectPromise(result, promiseCapability). return PerformPromiseAll( - receiver, capability, i, createResolveElementFunctor, - createRejectElementFunctor) otherwise Reject; + nativeContext, i, constructor, capability, promiseResolveFunction, + createResolveElementFunctor, createRejectElementFunctor) + otherwise Reject; } catch (e) deferred { goto Reject(e); } label Reject(e: Object) deferred { diff --git a/deps/v8/src/builtins/promise-any.tq b/deps/v8/src/builtins/promise-any.tq index 1046ed0a89c4cf..d86e265d6c2bcf 100644 --- a/deps/v8/src/builtins/promise-any.tq +++ b/deps/v8/src/builtins/promise-any.tq @@ -5,11 +5,15 @@ #include 'src/builtins/builtins-promise-gen.h' namespace promise { -extern enum PromiseAnyRejectElementContextSlots extends int31 +type PromiseAnyRejectElementContext extends FunctionContext; +extern enum PromiseAnyRejectElementContextSlots extends intptr constexpr 'PromiseBuiltins::PromiseAnyRejectElementContextSlots' { - kPromiseAnyRejectElementRemainingSlot, - kPromiseAnyRejectElementCapabilitySlot, - kPromiseAnyRejectElementErrorsArraySlot, + kPromiseAnyRejectElementRemainingSlot: + Slot, + kPromiseAnyRejectElementCapabilitySlot: + Slot, + kPromiseAnyRejectElementErrorsSlot: + Slot, kPromiseAnyRejectElementLength } @@ -27,28 +31,36 @@ extern operator '[]' macro LoadContextElement( // case to mark it's done). See Promise.all which uses the same approach. transitioning macro CreatePromiseAnyRejectElementContext( implicit context: Context)( - capability: PromiseCapability, nativeContext: NativeContext): Context { - const rejectContext = AllocateSyntheticFunctionContext( - nativeContext, - PromiseAnyRejectElementContextSlots::kPromiseAnyRejectElementLength); - rejectContext[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementRemainingSlot] = SmiConstant(1); - rejectContext[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementCapabilitySlot] = capability; - // Will be set later. - rejectContext[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementErrorsArraySlot] = Undefined; + capability: PromiseCapability, + nativeContext: NativeContext): PromiseAnyRejectElementContext { + const rejectContext = %RawDownCast( + AllocateSyntheticFunctionContext( + nativeContext, + PromiseAnyRejectElementContextSlots::kPromiseAnyRejectElementLength)); + InitContextSlot( + rejectContext, + PromiseAnyRejectElementContextSlots:: + kPromiseAnyRejectElementRemainingSlot, + 1); + InitContextSlot( + rejectContext, + PromiseAnyRejectElementContextSlots:: + kPromiseAnyRejectElementCapabilitySlot, + capability); + InitContextSlot( + rejectContext, + PromiseAnyRejectElementContextSlots::kPromiseAnyRejectElementErrorsSlot, + kEmptyFixedArray); return rejectContext; } macro CreatePromiseAnyRejectElementFunction(implicit context: Context)( - rejectElementContext: Context, index: Smi, + rejectElementContext: PromiseAnyRejectElementContext, index: Smi, nativeContext: NativeContext): JSFunction { assert(index > 0); assert(index < kPropertyArrayHashFieldMax); - const map = UnsafeCast( - nativeContext - [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]); + const map = *ContextSlot( + nativeContext, ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); const rejectInfo = PromiseAnyRejectElementSharedFunConstant(); const reject = AllocateFunctionWithMapAndContext(map, rejectInfo, rejectElementContext); @@ -79,7 +91,9 @@ PromiseAnyRejectElementClosure( assert( context.length == - PromiseAnyRejectElementContextSlots::kPromiseAnyRejectElementLength); + SmiTag( + PromiseAnyRejectElementContextSlots::kPromiseAnyRejectElementLength)); + const context = %RawDownCast(context); // 4. Set alreadyCalled.[[Value]] to true. const nativeContext = LoadNativeContext(context); @@ -92,43 +106,48 @@ PromiseAnyRejectElementClosure( const index = identityHash - 1; // 6. Let errors be F.[[Errors]]. - if (context[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementErrorsArraySlot] == Undefined) { - // We're going to reject the Promise with a more fundamental error (e.g., - // something went wrong with iterating the Promises). We don't need to - // construct the "errors" array. - return Undefined; - } - - const errorsArray = UnsafeCast( - context[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementErrorsArraySlot]); + let errors = *ContextSlot( + context, + PromiseAnyRejectElementContextSlots::kPromiseAnyRejectElementErrorsSlot); // 7. Let promiseCapability be F.[[Capability]]. // 8. Let remainingElementsCount be F.[[RemainingElements]]. - let remainingElementsCount = - UnsafeCast(context[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementRemainingSlot]); + let remainingElementsCount = *ContextSlot( + context, + PromiseAnyRejectElementContextSlots:: + kPromiseAnyRejectElementRemainingSlot); + // 9. Set errors[index] to x. - errorsArray.objects[index] = value; + const newCapacity = IntPtrMax(SmiUntag(remainingElementsCount), index + 1); + if (newCapacity > errors.length_intptr) deferred { + errors = ExtractFixedArray(errors, 0, errors.length_intptr, newCapacity); + *ContextSlot( + context, + PromiseAnyRejectElementContextSlots:: + kPromiseAnyRejectElementErrorsSlot) = errors; + } + errors.objects[index] = value; // 10. Set remainingElementsCount.[[Value]] to // remainingElementsCount.[[Value]] - 1. remainingElementsCount = remainingElementsCount - 1; - context[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementRemainingSlot] = remainingElementsCount; + *ContextSlot( + context, + PromiseAnyRejectElementContextSlots:: + kPromiseAnyRejectElementRemainingSlot) = remainingElementsCount; // 11. If remainingElementsCount.[[Value]] is 0, then if (remainingElementsCount == 0) { // a. Let error be a newly created AggregateError object. // b. Set error.[[AggregateErrors]] to errors. - const error = ConstructAggregateError(errorsArray); + const error = ConstructAggregateError(errors); // c. Return ? Call(promiseCapability.[[Reject]], undefined, « error »). - const capability = UnsafeCast( - context[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementCapabilitySlot]); + const capability = *ContextSlot( + context, + PromiseAnyRejectElementContextSlots:: + kPromiseAnyRejectElementCapabilitySlot); Call(context, UnsafeCast(capability.reject), Undefined, error); } @@ -137,16 +156,15 @@ PromiseAnyRejectElementClosure( } transitioning macro PerformPromiseAny(implicit context: Context)( - iteratorRecord: iterator::IteratorRecord, constructor: Constructor, - resultCapability: PromiseCapability): JSAny labels + nativeContext: NativeContext, iteratorRecord: iterator::IteratorRecord, + constructor: Constructor, resultCapability: PromiseCapability, + promiseResolveFunction: JSAny): JSAny labels Reject(Object) { // 1. Assert: ! IsConstructor(constructor) is true. // 2. Assert: resultCapability is a PromiseCapability Record. - const nativeContext = LoadNativeContext(context); - - // 3. Let errors be a new empty List. - let growableErrorsArray = growable_fixed_array::NewGrowableFixedArray(); + // 3. Let errors be a new empty List. (Do nothing: errors is + // initialized lazily when the first Promise rejects.) // 4. Let remainingElementsCount be a new Record { [[Value]]: 1 }. const rejectElementContext = @@ -157,23 +175,8 @@ Reject(Object) { let index: Smi = 1; try { - // We can skip the "resolve" lookup on {constructor} if it's the - // Promise constructor and the Promise.resolve protector is intact, - // as that guards the lookup path for the "resolve" property on the - // Promise constructor. - let promiseResolveFunction: JSAny = Undefined; - if (!IsPromiseResolveLookupChainIntact(nativeContext, constructor)) - deferred { - // 6. Let promiseResolve be ? Get(constructor, `"resolve"`). - const promiseResolve = GetProperty(constructor, kResolveString); - // 7. If IsCallable(promiseResolve) is false, throw a - // TypeError exception. - promiseResolveFunction = Cast(promiseResolve) - otherwise ThrowTypeError( - MessageTemplate::kCalledNonCallable, 'resolve'); - } - const fastIteratorResultMap = UnsafeCast( - nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]); + const fastIteratorResultMap = *NativeContextSlot( + nativeContext, ContextSlot::ITERATOR_RESULT_MAP_INDEX); // 8. Repeat, while (true) { let nextValue: JSAny; @@ -215,8 +218,8 @@ Reject(Object) { MessageTemplate::kTooManyElementsInPromiseCombinator, 'any'); } - // h. Append undefined to errors. - growableErrorsArray.Push(Undefined); + // h. Append undefined to errors. (Do nothing: errors is initialized + // lazily when the first Promise rejects.) let nextPromise: JSAny; // i. Let nextPromise be ? Call(constructor, promiseResolve, @@ -245,11 +248,14 @@ Reject(Object) { rejectElementContext, index, nativeContext); // q. Set remainingElementsCount.[[Value]] to // remainingElementsCount.[[Value]] + 1. - const remainingElementsCount = UnsafeCast( - rejectElementContext[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementRemainingSlot]); - rejectElementContext[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementRemainingSlot] = + const remainingElementsCount = *ContextSlot( + rejectElementContext, + PromiseAnyRejectElementContextSlots:: + kPromiseAnyRejectElementRemainingSlot); + *ContextSlot( + rejectElementContext, + PromiseAnyRejectElementContextSlots:: + kPromiseAnyRejectElementRemainingSlot) = remainingElementsCount + 1; // r. Perform ? Invoke(nextPromise, "then", « @@ -283,24 +289,24 @@ Reject(Object) { // i. Set iteratorRecord.[[Done]] to true. // ii. Set remainingElementsCount.[[Value]] to // remainingElementsCount.[[Value]] - 1. - let remainingElementsCount = UnsafeCast( - rejectElementContext[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementRemainingSlot]); - remainingElementsCount -= 1; - rejectElementContext[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementRemainingSlot] = - remainingElementsCount; - - const errorsArray = growableErrorsArray.ToFixedArray(); - rejectElementContext[PromiseAnyRejectElementContextSlots:: - kPromiseAnyRejectElementErrorsArraySlot] = - errorsArray; + const remainingElementsCount = -- *ContextSlot( + rejectElementContext, + PromiseAnyRejectElementContextSlots:: + kPromiseAnyRejectElementRemainingSlot); // iii. If remainingElementsCount.[[Value]] is 0, then if (remainingElementsCount == 0) deferred { // 1. Let error be a newly created AggregateError object. // 2. Set error.[[AggregateErrors]] to errors. - const error = ConstructAggregateError(errorsArray); + + // We may already have elements in "errors" - this happens when the + // Thenable calls the reject callback immediately. + const errors: FixedArray = *ContextSlot( + rejectElementContext, + PromiseAnyRejectElementContextSlots:: + kPromiseAnyRejectElementErrorsSlot); + + const error = ConstructAggregateError(errors); // 3. Return ThrowCompletion(error). goto Reject(error); } @@ -312,6 +318,8 @@ Reject(Object) { transitioning javascript builtin PromiseAny( js-implicit context: Context, receiver: JSAny)(iterable: JSAny): JSAny { + const nativeContext = LoadNativeContext(context); + // 1. Let C be the this value. const receiver = Cast(receiver) otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Promise.any'); @@ -319,37 +327,42 @@ PromiseAny( // 2. Let promiseCapability be ? NewPromiseCapability(C). const capability = NewPromiseCapability(receiver, False); - // NewPromiseCapability guarantees that receiver is Constructor + // NewPromiseCapability guarantees that receiver is Constructor. assert(Is(receiver)); const constructor = UnsafeCast(receiver); try { - let iteratorRecord: iterator::IteratorRecord; - try { - // 3. Let iteratorRecord be GetIterator(iterable). + // 3. Let promiseResolve be GetPromiseResolve(C). + // 4. IfAbruptRejectPromise(promiseResolve, promiseCapability). + // (catch below) + const promiseResolveFunction = + GetPromiseResolve(nativeContext, constructor); - // 4. IfAbruptRejectPromise(iteratorRecord, promiseCapability). - // (catch below) - iteratorRecord = iterator::GetIterator(iterable); + // 5. Let iteratorRecord be GetIterator(iterable). - // 5. Let result be PerformPromiseAny(iteratorRecord, C, - // promiseCapability). + // 6. IfAbruptRejectPromise(iteratorRecord, promiseCapability). + // (catch below) + const iteratorRecord = iterator::GetIterator(iterable); - // 6. If result is an abrupt completion, then + // 7. Let result be PerformPromiseAny(iteratorRecord, C, + // promiseCapability). - // a. If iteratorRecord.[[Done]] is false, set result to - // IteratorClose(iteratorRecord, result). + // 8. If result is an abrupt completion, then - // b. IfAbruptRejectPromise(result, promiseCapability). + // a. If iteratorRecord.[[Done]] is false, set result to + // IteratorClose(iteratorRecord, result). - // [Iterator closing handled by PerformPromiseAny] + // b. IfAbruptRejectPromise(result, promiseCapability). - // 7. Return Completion(result). - return PerformPromiseAny(iteratorRecord, constructor, capability) - otherwise Reject; - } catch (e) deferred { - goto Reject(e); - } + // [Iterator closing handled by PerformPromiseAny] + + // 9. Return Completion(result). + return PerformPromiseAny( + nativeContext, iteratorRecord, constructor, capability, + promiseResolveFunction) + otherwise Reject; + } catch (e) deferred { + goto Reject(e); } label Reject(e: Object) deferred { // Exception must be bound to a JS value. assert(e != TheHole); @@ -361,10 +374,13 @@ PromiseAny( } transitioning macro ConstructAggregateError(implicit context: Context)( - errorsArray: FixedArray): JSObject { - const obj: JSAggregateError = error::ConstructInternalAggregateErrorHelper( + errors: FixedArray): JSObject { + const obj: JSObject = error::ConstructInternalAggregateErrorHelper( context, SmiConstant(MessageTemplate::kAllPromisesRejected)); - obj.errors = errorsArray; + const errorsJSArray = array::CreateJSArrayWithElements(errors); + SetOwnPropertyIgnoreAttributes( + obj, ErrorsStringConstant(), errorsJSArray, + SmiConstant(PropertyAttributes::DONT_ENUM)); return obj; } diff --git a/deps/v8/src/builtins/promise-constructor.tq b/deps/v8/src/builtins/promise-constructor.tq index dbf1fe2f4ded26..32de19f3b29cdd 100644 --- a/deps/v8/src/builtins/promise-constructor.tq +++ b/deps/v8/src/builtins/promise-constructor.tq @@ -36,7 +36,7 @@ HasAccessCheckFailed(implicit context: Context)( return false; } -extern macro ConstructorBuiltinsAssembler::EmitFastNewObject( +extern macro ConstructorBuiltinsAssembler::FastNewObject( Context, JSFunction, JSReceiver): JSObject; extern macro @@ -57,8 +57,7 @@ PromiseConstructor( ThrowTypeError(MessageTemplate::kResolverNotAFunction, executor); } - const promiseFun = UnsafeCast( - context[NativeContextSlot::PROMISE_FUNCTION_INDEX]); + const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX); // Silently fail if the stack looks fishy. if (HasAccessCheckFailed(context, promiseFun, executor)) { @@ -71,8 +70,8 @@ PromiseConstructor( if (promiseFun == newTarget) { result = NewJSPromise(); } else { - result = UnsafeCast(EmitFastNewObject( - context, promiseFun, UnsafeCast(newTarget))); + result = UnsafeCast( + FastNewObject(context, promiseFun, UnsafeCast(newTarget))); PromiseInit(result); if (IsPromiseHookEnabledOrHasAsyncEventDelegate()) { runtime::PromiseHookInit(result, Undefined); diff --git a/deps/v8/src/builtins/promise-finally.tq b/deps/v8/src/builtins/promise-finally.tq index 48928ca4ce7ef3..90a60678dd0736 100644 --- a/deps/v8/src/builtins/promise-finally.tq +++ b/deps/v8/src/builtins/promise-finally.tq @@ -7,41 +7,48 @@ namespace promise { -// TODO(joshualitt): The below ContextSlots are only available on synthetic -// contexts created by the promise pipeline for use in the promise pipeline. -// However, with Torque we should type the context and its slots to prevent -// accidentially using these slots on contexts which don't support them. -const kPromiseBuiltinsValueSlot: constexpr ContextSlot - generates 'PromiseBuiltins::kValueSlot'; -const kPromiseBuiltinsOnFinallySlot: constexpr ContextSlot - generates 'PromiseBuiltins::kOnFinallySlot'; -const kPromiseBuiltinsConstructorSlot: constexpr ContextSlot - generates 'PromiseBuiltins::kConstructorSlot'; -const kPromiseBuiltinsPromiseValueThunkOrReasonContextLength: constexpr int31 - generates 'PromiseBuiltins::kPromiseValueThunkOrReasonContextLength'; -const kPromiseBuiltinsPromiseFinallyContextLength: constexpr int31 - generates 'PromiseBuiltins::kPromiseFinallyContextLength'; +type PromiseValueThunkOrReasonContext extends FunctionContext; +extern enum PromiseValueThunkOrReasonContextSlot extends intptr +constexpr 'PromiseBuiltins::PromiseValueThunkOrReasonContextSlot' { + kValueSlot: Slot, + kPromiseValueThunkOrReasonContextLength +} + +type PromiseFinallyContext extends FunctionContext; +extern enum PromiseFinallyContextSlot extends intptr +constexpr 'PromiseBuiltins::PromiseFinallyContextSlot' { + kOnFinallySlot: Slot, + kConstructorSlot: Slot, + kPromiseFinallyContextLength +} transitioning javascript builtin PromiseValueThunkFinally( js-implicit context: Context, receiver: JSAny)(): JSAny { - return UnsafeCast(context[kPromiseBuiltinsValueSlot]); + const context = %RawDownCast(context); + return *ContextSlot( + context, PromiseValueThunkOrReasonContextSlot::kValueSlot); } transitioning javascript builtin PromiseThrowerFinally(js-implicit context: Context, receiver: JSAny)(): never { - const reason = UnsafeCast(context[kPromiseBuiltinsValueSlot]); + const context = %RawDownCast(context); + const reason = + *ContextSlot(context, PromiseValueThunkOrReasonContextSlot::kValueSlot); Throw(reason); } macro CreateThrowerFunction(implicit context: Context)( nativeContext: NativeContext, reason: JSAny): JSFunction { - const throwerContext = AllocateSyntheticFunctionContext( - nativeContext, kPromiseBuiltinsPromiseValueThunkOrReasonContextLength); - throwerContext[kPromiseBuiltinsValueSlot] = reason; - const map = UnsafeCast( - nativeContext - [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]); + const throwerContext = %RawDownCast( + AllocateSyntheticFunctionContext( + nativeContext, + PromiseValueThunkOrReasonContextSlot:: + kPromiseValueThunkOrReasonContextLength)); + InitContextSlot( + throwerContext, PromiseValueThunkOrReasonContextSlot::kValueSlot, reason); + const map = *ContextSlot( + nativeContext, ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); const throwerInfo = PromiseThrowerFinallySharedFunConstant(); return AllocateFunctionWithMapAndContext(map, throwerInfo, throwerContext); } @@ -49,17 +56,18 @@ macro CreateThrowerFunction(implicit context: Context)( transitioning javascript builtin PromiseCatchFinally( js-implicit context: Context, receiver: JSAny)(reason: JSAny): JSAny { + const context = %RawDownCast(context); // 1. Let onFinally be F.[[OnFinally]]. // 2. Assert: IsCallable(onFinally) is true. - const onFinally = - UnsafeCast(context[kPromiseBuiltinsOnFinallySlot]); + const onFinally: Callable = + *ContextSlot(context, PromiseFinallyContextSlot::kOnFinallySlot); // 3. Let result be ? Call(onFinally). const result = Call(context, onFinally, Undefined); // 4. Let C be F.[[Constructor]]. - const constructor = - UnsafeCast(context[kPromiseBuiltinsConstructorSlot]); + const constructor: Constructor = + *ContextSlot(context, PromiseFinallyContextSlot::kConstructorSlot); // 5. Assert: IsConstructor(C) is true. assert(IsConstructor(constructor)); @@ -77,12 +85,16 @@ PromiseCatchFinally( macro CreateValueThunkFunction(implicit context: Context)( nativeContext: NativeContext, value: JSAny): JSFunction { - const valueThunkContext = AllocateSyntheticFunctionContext( - nativeContext, kPromiseBuiltinsPromiseValueThunkOrReasonContextLength); - valueThunkContext[kPromiseBuiltinsValueSlot] = value; - const map = UnsafeCast( - nativeContext - [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]); + const valueThunkContext = %RawDownCast( + AllocateSyntheticFunctionContext( + nativeContext, + PromiseValueThunkOrReasonContextSlot:: + kPromiseValueThunkOrReasonContextLength)); + InitContextSlot( + valueThunkContext, PromiseValueThunkOrReasonContextSlot::kValueSlot, + value); + const map = *ContextSlot( + nativeContext, ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); const valueThunkInfo = PromiseValueThunkFinallySharedFunConstant(); return AllocateFunctionWithMapAndContext( map, valueThunkInfo, valueThunkContext); @@ -91,17 +103,18 @@ macro CreateValueThunkFunction(implicit context: Context)( transitioning javascript builtin PromiseThenFinally( js-implicit context: Context, receiver: JSAny)(value: JSAny): JSAny { + const context = %RawDownCast(context); // 1. Let onFinally be F.[[OnFinally]]. // 2. Assert: IsCallable(onFinally) is true. const onFinally = - UnsafeCast(context[kPromiseBuiltinsOnFinallySlot]); + *ContextSlot(context, PromiseFinallyContextSlot::kOnFinallySlot); // 3. Let result be ? Call(onFinally). const result = Call(context, onFinally, Undefined); // 4. Let C be F.[[Constructor]]. const constructor = - UnsafeCast(context[kPromiseBuiltinsConstructorSlot]); + *ContextSlot(context, PromiseFinallyContextSlot::kConstructorSlot); // 5. Assert: IsConstructor(C) is true. assert(IsConstructor(constructor)); @@ -124,14 +137,17 @@ struct PromiseFinallyFunctions { macro CreatePromiseFinallyFunctions(implicit context: Context)( nativeContext: NativeContext, onFinally: Callable, - constructor: JSReceiver): PromiseFinallyFunctions { - const promiseContext = AllocateSyntheticFunctionContext( - nativeContext, kPromiseBuiltinsPromiseFinallyContextLength); - promiseContext[kPromiseBuiltinsOnFinallySlot] = onFinally; - promiseContext[kPromiseBuiltinsConstructorSlot] = constructor; - const map = UnsafeCast( - nativeContext - [NativeContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX]); + constructor: Constructor): PromiseFinallyFunctions { + const promiseContext = + %RawDownCast(AllocateSyntheticFunctionContext( + nativeContext, + PromiseFinallyContextSlot::kPromiseFinallyContextLength)); + InitContextSlot( + promiseContext, PromiseFinallyContextSlot::kOnFinallySlot, onFinally); + InitContextSlot( + promiseContext, PromiseFinallyContextSlot::kConstructorSlot, constructor); + const map = *ContextSlot( + nativeContext, ContextSlot::STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX); const thenFinallyInfo = PromiseThenFinallySharedFunConstant(); const thenFinally = AllocateFunctionWithMapAndContext(map, thenFinallyInfo, promiseContext); @@ -154,15 +170,15 @@ PromisePrototypeFinally( // 3. Let C be ? SpeciesConstructor(promise, %Promise%). const nativeContext = LoadNativeContext(context); - const promiseFun = UnsafeCast( - nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX]); + const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX); - let constructor: JSReceiver = promiseFun; + let constructor: Constructor = UnsafeCast(promiseFun); const receiverMap = jsReceiver.map; if (!IsJSPromiseMap(receiverMap) || !IsPromiseSpeciesLookupChainIntact(nativeContext, receiverMap)) deferred { - constructor = SpeciesConstructor(jsReceiver, promiseFun); + constructor = + UnsafeCast(SpeciesConstructor(jsReceiver, promiseFun)); } // 4. Assert: IsConstructor(C) is true. diff --git a/deps/v8/src/builtins/promise-jobs.tq b/deps/v8/src/builtins/promise-jobs.tq index 6c64baf22d19ba..80e98f373b9665 100644 --- a/deps/v8/src/builtins/promise-jobs.tq +++ b/deps/v8/src/builtins/promise-jobs.tq @@ -22,7 +22,7 @@ PromiseResolveThenableJob(implicit context: Context)( // We take the generic (slow-)path if a PromiseHook is enabled or the // debugger is active, to make sure we expose spec compliant behavior. const nativeContext = LoadNativeContext(context); - const promiseThen = nativeContext[NativeContextSlot::PROMISE_THEN_INDEX]; + const promiseThen = *NativeContextSlot(ContextSlot::PROMISE_THEN_INDEX); const thenableMap = thenable.map; if (TaggedEqual(then, promiseThen) && IsJSPromiseMap(thenableMap) && !IsPromiseHookEnabledOrDebugIsActiveOrHasAsyncEventDelegate() && diff --git a/deps/v8/src/builtins/promise-misc.tq b/deps/v8/src/builtins/promise-misc.tq index 7ed2f7909a88c1..67e5e38687d76c 100644 --- a/deps/v8/src/builtins/promise-misc.tq +++ b/deps/v8/src/builtins/promise-misc.tq @@ -38,13 +38,11 @@ macro PromiseInit(promise: JSPromise): void { } macro InnerNewJSPromise(implicit context: Context)(): JSPromise { - const nativeContext = LoadNativeContext(context); - const promiseFun = UnsafeCast( - nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX]); + const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX); assert(IsFunctionWithPrototypeSlotMap(promiseFun.map)); const promiseMap = UnsafeCast(promiseFun.prototype_or_initial_map); const promiseHeapObject = promise_internal::AllocateJSPromise(context); - * UnsafeConstCast(& promiseHeapObject.map) = promiseMap; + *UnsafeConstCast(&promiseHeapObject.map) = promiseMap; const promise = UnsafeCast(promiseHeapObject); promise.properties_or_hash = kEmptyFixedArray; promise.elements = kEmptyFixedArray; @@ -69,8 +67,9 @@ macro NewPromiseFulfillReactionJobTask(implicit context: Context)( context: handlerContext, handler, promise_or_capability: promiseOrCapability, - continuation_preserved_embedder_data: nativeContext - [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX] + continuation_preserved_embedder_data: + *ContextSlot( + nativeContext, ContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX) }; } @@ -85,8 +84,9 @@ macro NewPromiseRejectReactionJobTask(implicit context: Context)( context: handlerContext, handler, promise_or_capability: promiseOrCapability, - continuation_preserved_embedder_data: nativeContext - [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX] + continuation_preserved_embedder_data: + *ContextSlot( + nativeContext, ContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX) }; } @@ -143,8 +143,9 @@ macro NewPromiseReaction(implicit context: Context)( reject_handler: rejectHandler, fulfill_handler: fulfillHandler, promise_or_capability: promiseOrCapability, - continuation_preserved_embedder_data: nativeContext - [NativeContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX] + continuation_preserved_embedder_data: + *ContextSlot( + nativeContext, ContextSlot::CONTINUATION_PRESERVED_EMBEDDER_DATA_INDEX) }; } @@ -208,7 +209,7 @@ macro InvokeThen(implicit context: Context)( IsPromiseThenLookupChainIntact( nativeContext, UnsafeCast(receiver).map)) { const then = - UnsafeCast(nativeContext[NativeContextSlot::PROMISE_THEN_INDEX]); + *NativeContextSlot(nativeContext, ContextSlot::PROMISE_THEN_INDEX); return callFunctor.Call(nativeContext, then, receiver, arg1, arg2); } else deferred { diff --git a/deps/v8/src/builtins/promise-race.tq b/deps/v8/src/builtins/promise-race.tq index 27d2038398ab4b..23a7efc00ac478 100644 --- a/deps/v8/src/builtins/promise-race.tq +++ b/deps/v8/src/builtins/promise-race.tq @@ -6,24 +6,6 @@ namespace promise { -extern macro PromiseForwardingHandlerSymbolConstant(): Symbol; -const kPromiseForwardingHandlerSymbol: Symbol = - PromiseForwardingHandlerSymbolConstant(); -extern macro PromiseHandledBySymbolConstant(): Symbol; -const kPromiseHandledBySymbol: Symbol = PromiseHandledBySymbolConstant(); -extern macro ResolveStringConstant(): String; -const kResolveString: String = ResolveStringConstant(); -extern macro SetPropertyStrict(Context, Object, Object, Object): Object; -extern macro IsPromiseResolveProtectorCellInvalid(): bool; - -macro IsPromiseResolveLookupChainIntact(implicit context: Context)( - nativeContext: NativeContext, constructor: JSReceiver): bool { - if (IsForceSlowPath()) return false; - const promiseFun = UnsafeCast( - nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX]); - return promiseFun == constructor && !IsPromiseResolveProtectorCellInvalid(); -} - // https://tc39.es/ecma262/#sec-promise.race transitioning javascript builtin PromiseRace( @@ -31,6 +13,8 @@ PromiseRace( const receiver = Cast(receiver) otherwise ThrowTypeError(MessageTemplate::kCalledOnNonObject, 'Promise.race'); + const nativeContext = LoadNativeContext(context); + // Let promiseCapability be ? NewPromiseCapability(C). // Don't fire debugEvent so that forwarding the rejection through all does // not trigger redundant ExceptionEvents @@ -39,6 +23,10 @@ PromiseRace( const reject = capability.reject; const promise = capability.promise; + // NewPromiseCapability guarantees that receiver is Constructor. + assert(Is(receiver)); + const constructor = UnsafeCast(receiver); + // For catch prediction, don't treat the .then calls as handling it; // instead, recurse outwards. if (IsDebugActive()) deferred { @@ -46,10 +34,15 @@ PromiseRace( } try { - // Let iterator be GetIterator(iterable). - // IfAbruptRejectPromise(iterator, promiseCapability). + let promiseResolveFunction: JSAny; let i: iterator::IteratorRecord; try { + // Let promiseResolve be GetPromiseResolve(C). + // IfAbruptRejectPromise(promiseResolve, promiseCapability). + promiseResolveFunction = GetPromiseResolve(nativeContext, constructor); + + // Let iterator be GetIterator(iterable). + // IfAbruptRejectPromise(iterator, promiseCapability). i = iterator::GetIterator(iterable); } catch (e) deferred { goto Reject(e); @@ -57,26 +50,8 @@ PromiseRace( // Let result be PerformPromiseRace(iteratorRecord, C, promiseCapability). try { - // We can skip the "resolve" lookup on {constructor} if it's the - // Promise constructor and the Promise.resolve protector is intact, - // as that guards the lookup path for the "resolve" property on the - // Promise constructor. - const nativeContext = LoadNativeContext(context); - let promiseResolveFunction: JSAny = Undefined; - if (!IsPromiseResolveLookupChainIntact(nativeContext, receiver)) - deferred { - // 3. Let _promiseResolve_ be ? Get(_constructor_, `"resolve"`). - const resolve = GetProperty(receiver, kResolveString); - - // 4. If IsCallable(_promiseResolve_) is *false*, throw a - // *TypeError* exception. - promiseResolveFunction = Cast(resolve) - otherwise ThrowTypeError( - MessageTemplate::kCalledNonCallable, 'resolve'); - } - - const fastIteratorResultMap = UnsafeCast( - nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]); + const fastIteratorResultMap = *NativeContextSlot( + nativeContext, ContextSlot::ITERATOR_RESULT_MAP_INDEX); while (true) { let nextValue: JSAny; try { @@ -96,9 +71,8 @@ PromiseRace( } // Let nextPromise be ? Call(constructor, _promiseResolve_, « // nextValue »). - const nextPromise = CallResolve( - UnsafeCast(receiver), promiseResolveFunction, - nextValue); + const nextPromise = + CallResolve(constructor, promiseResolveFunction, nextValue); // Perform ? Invoke(nextPromise, "then", « resolveElement, // resultCapability.[[Reject]] »). diff --git a/deps/v8/src/builtins/promise-reaction-job.tq b/deps/v8/src/builtins/promise-reaction-job.tq index 1e89da02617dc9..0374b2a3fe0674 100644 --- a/deps/v8/src/builtins/promise-reaction-job.tq +++ b/deps/v8/src/builtins/promise-reaction-job.tq @@ -30,7 +30,7 @@ macro RejectPromiseReactionJob( } } } else { - StaticAssert(reactionType == kPromiseReactionFulfill); + static_assert(reactionType == kPromiseReactionFulfill); // We have to call out to the dedicated PromiseRejectReactionJob // builtin here, instead of just doing the work inline, as otherwise // the catch predictions in the debugger will be wrong, which just @@ -79,7 +79,7 @@ macro PromiseReactionJob( return FuflfillPromiseReactionJob( context, promiseOrCapability, argument, reactionType); } else { - StaticAssert(reactionType == kPromiseReactionReject); + static_assert(reactionType == kPromiseReactionReject); return RejectPromiseReactionJob( context, promiseOrCapability, argument, reactionType); } diff --git a/deps/v8/src/builtins/promise-resolve.tq b/deps/v8/src/builtins/promise-resolve.tq index dbb60720c04416..e933dfbae0abf8 100644 --- a/deps/v8/src/builtins/promise-resolve.tq +++ b/deps/v8/src/builtins/promise-resolve.tq @@ -30,7 +30,7 @@ transitioning builtin PromiseResolve(implicit context: Context)( constructor: JSReceiver, value: JSAny): JSAny { const nativeContext = LoadNativeContext(context); - const promiseFun = nativeContext[NativeContextSlot::PROMISE_FUNCTION_INDEX]; + const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX); try { // Check if {value} is a JSPromise. const value = Cast(value) otherwise NeedToAllocate; @@ -40,7 +40,9 @@ PromiseResolve(implicit context: Context)( // intact, as that guards the lookup path for "constructor" on // JSPromise instances which have the (initial) Promise.prototype. const promisePrototype = - nativeContext[NativeContextSlot::PROMISE_PROTOTYPE_INDEX]; + *NativeContextSlot(ContextSlot::PROMISE_PROTOTYPE_INDEX); + // Check that Torque load elimination works. + static_assert(nativeContext == LoadNativeContext(context)); if (value.map.prototype != promisePrototype) { goto SlowConstructor; } @@ -137,7 +139,7 @@ ResolvePromise(implicit context: Context)( assert(IsJSReceiverMap(resolutionMap)); assert(!IsPromiseThenProtectorCellInvalid()); if (resolutionMap == - nativeContext[NativeContextSlot::ITERATOR_RESULT_MAP_INDEX]) { + *NativeContextSlot(ContextSlot::ITERATOR_RESULT_MAP_INDEX)) { return FulfillPromise(promise, resolution); } else { goto Slow; @@ -145,10 +147,12 @@ ResolvePromise(implicit context: Context)( } const promisePrototype = - nativeContext[NativeContextSlot::PROMISE_PROTOTYPE_INDEX]; + *NativeContextSlot(ContextSlot::PROMISE_PROTOTYPE_INDEX); if (resolutionMap.prototype == promisePrototype) { // The {resolution} is a native Promise in this case. - then = nativeContext[NativeContextSlot::PROMISE_THEN_INDEX]; + then = *NativeContextSlot(ContextSlot::PROMISE_THEN_INDEX); + // Check that Torque load elimination works. + static_assert(nativeContext == LoadNativeContext(context)); goto Enqueue; } goto Slow; diff --git a/deps/v8/src/builtins/promise-then.tq b/deps/v8/src/builtins/promise-then.tq index 3de6d277d840ca..00f9b0c80f3632 100644 --- a/deps/v8/src/builtins/promise-then.tq +++ b/deps/v8/src/builtins/promise-then.tq @@ -10,7 +10,7 @@ macro IsPromiseSpeciesLookupChainIntact( nativeContext: NativeContext, promiseMap: Map): bool { const promisePrototype = - nativeContext[NativeContextSlot::PROMISE_PROTOTYPE_INDEX]; + *NativeContextSlot(nativeContext, ContextSlot::PROMISE_PROTOTYPE_INDEX); if (IsForceSlowPath()) return false; if (promiseMap.prototype != promisePrototype) return false; return !IsPromiseSpeciesProtectorCellInvalid(); @@ -27,8 +27,7 @@ PromisePrototypeThen(js-implicit context: NativeContext, receiver: JSAny)( receiver); // 3. Let C be ? SpeciesConstructor(promise, %Promise%). - const promiseFun = UnsafeCast( - context[NativeContextSlot::PROMISE_FUNCTION_INDEX]); + const promiseFun = *NativeContextSlot(ContextSlot::PROMISE_FUNCTION_INDEX); // 4. Let resultCapability be ? NewPromiseCapability(C). let resultPromiseOrCapability: JSPromise|PromiseCapability; diff --git a/deps/v8/src/builtins/proxy-delete-property.tq b/deps/v8/src/builtins/proxy-delete-property.tq index 45914a6ed55950..a5925c2f7dcd46 100644 --- a/deps/v8/src/builtins/proxy-delete-property.tq +++ b/deps/v8/src/builtins/proxy-delete-property.tq @@ -16,7 +16,7 @@ ProxyDeleteProperty(implicit context: Context)( PerformStackCheck(); // 1. Assert: IsPropertyKey(P) is true. assert(TaggedIsNotSmi(name)); - assert(IsName(name)); + assert(Is(name)); assert(!IsPrivateSymbol(name)); try { diff --git a/deps/v8/src/builtins/proxy-get-property.tq b/deps/v8/src/builtins/proxy-get-property.tq index 2d6a1edee68869..563b38be37162d 100644 --- a/deps/v8/src/builtins/proxy-get-property.tq +++ b/deps/v8/src/builtins/proxy-get-property.tq @@ -18,7 +18,7 @@ ProxyGetProperty(implicit context: Context)( PerformStackCheck(); // 1. Assert: IsPropertyKey(P) is true. assert(TaggedIsNotSmi(name)); - assert(IsName(name)); + assert(Is(name)); assert(!IsPrivateSymbol(name)); // 2. Let handler be O.[[ProxyHandler]]. diff --git a/deps/v8/src/builtins/proxy-has-property.tq b/deps/v8/src/builtins/proxy-has-property.tq index 488f6fabb31cc6..fc81d5dcc9d3b2 100644 --- a/deps/v8/src/builtins/proxy-has-property.tq +++ b/deps/v8/src/builtins/proxy-has-property.tq @@ -10,12 +10,12 @@ namespace proxy { // https://tc39.github.io/ecma262/#sec-proxy-object-internal-methods-and-internal-slots-hasproperty-p transitioning builtin ProxyHasProperty(implicit context: Context)( proxy: JSProxy, name: PropertyKey): JSAny { - assert(IsJSProxy(proxy)); + assert(Is(proxy)); PerformStackCheck(); // 1. Assert: IsPropertyKey(P) is true. - assert(IsName(name)); + assert(Is(name)); assert(!IsPrivateSymbol(name)); try { diff --git a/deps/v8/src/builtins/proxy-revoke.tq b/deps/v8/src/builtins/proxy-revoke.tq index 5d2071b931bc8b..d031bb9f1d9777 100644 --- a/deps/v8/src/builtins/proxy-revoke.tq +++ b/deps/v8/src/builtins/proxy-revoke.tq @@ -9,28 +9,34 @@ namespace proxy { // Proxy Revocation Functions // https://tc39.github.io/ecma262/#sec-proxy-revocation-functions transitioning javascript builtin -ProxyRevoke(js-implicit context: NativeContext)(): Undefined { - // 1. Let p be F.[[RevocableProxy]]. - const proxyObject: Object = context[PROXY_SLOT]; +ProxyRevoke(js-implicit context: Context)(): Undefined { + const context = %RawDownCast(context); - // 2. If p is null, return undefined - if (proxyObject == Null) { - return Undefined; + // 1. Let p be F.[[RevocableProxy]]. + const proxySlot:&(JSProxy | Null) = + ContextSlot(context, ProxyRevokeFunctionContextSlot::kProxySlot); + + typeswitch (*proxySlot) { + case (Null): { + // 2. If p is null, return undefined + return Undefined; + } + case (proxy: JSProxy): { + // 3. Set F.[[RevocableProxy]] to null. + *proxySlot = Null; + + // 4. Assert: p is a Proxy object. + assert(Is(proxy)); + + // 5. Set p.[[ProxyTarget]] to null. + proxy.target = Null; + + // 6. Set p.[[ProxyHandler]] to null. + proxy.handler = Null; + + // 7. Return undefined. + return Undefined; + } } - - // 3. Set F.[[RevocableProxy]] to null. - context[PROXY_SLOT] = Null; - - // 4. Assert: p is a Proxy object. - const proxy: JSProxy = UnsafeCast(proxyObject); - - // 5. Set p.[[ProxyTarget]] to null. - proxy.target = Null; - - // 6. Set p.[[ProxyHandler]] to null. - proxy.handler = Null; - - // 7. Return undefined. - return Undefined; } } diff --git a/deps/v8/src/builtins/proxy-set-property.tq b/deps/v8/src/builtins/proxy-set-property.tq index 49f55fcd336527..441a5d418d08dd 100644 --- a/deps/v8/src/builtins/proxy-set-property.tq +++ b/deps/v8/src/builtins/proxy-set-property.tq @@ -23,7 +23,7 @@ ProxySetProperty(implicit context: Context)( receiverValue: JSAny): JSAny { // 1. Assert: IsPropertyKey(P) is true. assert(TaggedIsNotSmi(name)); - assert(IsName(name)); + assert(Is(name)); let key: PropertyKey; typeswitch (name) { diff --git a/deps/v8/src/builtins/proxy.tq b/deps/v8/src/builtins/proxy.tq index 8f662a9f4d15ac..e80ed3619210b5 100644 --- a/deps/v8/src/builtins/proxy.tq +++ b/deps/v8/src/builtins/proxy.tq @@ -23,4 +23,11 @@ const kProxyGet: constexpr int31 generates 'JSProxy::AccessKind::kGet'; const kProxySet: constexpr int31 generates 'JSProxy::AccessKind::kSet'; + +type ProxyRevokeFunctionContext extends FunctionContext; +extern enum ProxyRevokeFunctionContextSlot extends intptr +constexpr 'ProxiesCodeStubAssembler::ProxyRevokeFunctionContextSlot' { + kProxySlot: Slot, + kProxyContextLength +} } diff --git a/deps/v8/src/builtins/regexp-match.tq b/deps/v8/src/builtins/regexp-match.tq index d5581e050936f9..48fd8a42bf7a85 100644 --- a/deps/v8/src/builtins/regexp-match.tq +++ b/deps/v8/src/builtins/regexp-match.tq @@ -110,7 +110,7 @@ transitioning macro RegExpPrototypeMatchBody(implicit context: Context)( // length is less than the maximal Smi value. const kMaxStringLengthFitsSmi: constexpr bool = kStringMaxLengthUintptr < kSmiMaxValue; - StaticAssert(kMaxStringLengthFitsSmi); + static_assert(kMaxStringLengthFitsSmi); assert(TaggedIsPositiveSmi(newLastIndex)); } diff --git a/deps/v8/src/builtins/regexp.tq b/deps/v8/src/builtins/regexp.tq index 21577b47634569..e09ddf3d7c3beb 100644 --- a/deps/v8/src/builtins/regexp.tq +++ b/deps/v8/src/builtins/regexp.tq @@ -155,10 +155,8 @@ transitioning macro RegExpPrototypeExecBody(implicit context: Context)( regexp, matchIndices, string, lastIndex); } -macro LoadRegExpFunction(implicit context: Context)( - nativeContext: NativeContext): JSFunction { - return UnsafeCast( - nativeContext[NativeContextSlot::REGEXP_FUNCTION_INDEX]); +macro LoadRegExpFunction(nativeContext: NativeContext): JSFunction { + return *NativeContextSlot(nativeContext, ContextSlot::REGEXP_FUNCTION_INDEX); } // Note this doesn't guarantee const-ness of object properties, just @@ -186,8 +184,7 @@ extern enum Flag constexpr 'JSRegExp::Flag' { kMultiline, kSticky, kUnicode, - kDotAll, - kInvalid + kDotAll } const kRegExpPrototypeOldFlagGetter: constexpr int31 diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index 88bb80891e2530..b0c15825445ee4 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -3014,6 +3014,11 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ Ret(); } +void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { + // TODO(v8:10701): Implement for this platform. + __ Trap(); +} + namespace { static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc index 4739e18c57f629..2d7e93c9bb8a12 100644 --- a/deps/v8/src/builtins/setup-builtins-internal.cc +++ b/deps/v8/src/builtins/setup-builtins-internal.cc @@ -2,9 +2,8 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -#include "src/init/setup-isolate.h" - #include "src/builtins/builtins.h" +#include "src/builtins/profile-data-reader.h" #include "src/codegen/assembler-inl.h" #include "src/codegen/interface-descriptors.h" #include "src/codegen/macro-assembler-inl.h" @@ -12,7 +11,8 @@ #include "src/compiler/code-assembler.h" #include "src/execution/isolate.h" #include "src/handles/handles-inl.h" -#include "src/heap/heap-inl.h" // For MemoryAllocator::code_range. +#include "src/heap/heap-inl.h" // For Heap::code_range. +#include "src/init/setup-isolate.h" #include "src/interpreter/bytecodes.h" #include "src/interpreter/interpreter-generator.h" #include "src/interpreter/interpreter.h" @@ -75,7 +75,7 @@ Handle BuildPlaceholder(Isolate* isolate, int32_t builtin_index) { } CodeDesc desc; masm.GetCode(isolate, &desc); - Handle code = Factory::CodeBuilder(isolate, desc, Code::BUILTIN) + Handle code = Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN) .set_self_reference(masm.CodeObject()) .set_builtin_index(builtin_index) .Build(); @@ -116,7 +116,7 @@ Code BuildWithMacroAssembler(Isolate* isolate, int32_t builtin_index, masm.GetCode(isolate, &desc, MacroAssembler::kNoSafepointTable, handler_table_offset); - Handle code = Factory::CodeBuilder(isolate, desc, Code::BUILTIN) + Handle code = Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN) .set_self_reference(masm.CodeObject()) .set_builtin_index(builtin_index) .Build(); @@ -142,7 +142,7 @@ Code BuildAdaptor(Isolate* isolate, int32_t builtin_index, Builtins::Generate_Adaptor(&masm, builtin_address); CodeDesc desc; masm.GetCode(isolate, &desc); - Handle code = Factory::CodeBuilder(isolate, desc, Code::BUILTIN) + Handle code = Factory::CodeBuilder(isolate, desc, CodeKind::BUILTIN) .set_self_reference(masm.CodeObject()) .set_builtin_index(builtin_index) .Build(); @@ -158,15 +158,16 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index, // to code targets without dereferencing their handles. CanonicalHandleScope canonical(isolate); - Zone zone(isolate->allocator(), ZONE_NAME); + Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone); const int argc_with_recv = (argc == kDontAdaptArgumentsSentinel) ? 0 : argc + 1; compiler::CodeAssemblerState state( - isolate, &zone, argc_with_recv, Code::BUILTIN, name, + isolate, &zone, argc_with_recv, CodeKind::BUILTIN, name, PoisoningMitigationLevel::kDontPoison, builtin_index); generator(&state); Handle code = compiler::CodeAssembler::GenerateCode( - &state, BuiltinAssemblerOptions(isolate, builtin_index)); + &state, BuiltinAssemblerOptions(isolate, builtin_index), + ProfileDataFromFile::TryRead(name)); return *code; } @@ -179,18 +180,19 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index, // Canonicalize handles, so that we can share constant pool entries pointing // to code targets without dereferencing their handles. CanonicalHandleScope canonical(isolate); - Zone zone(isolate->allocator(), ZONE_NAME); + Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone); // The interface descriptor with given key must be initialized at this point // and this construction just queries the details from the descriptors table. CallInterfaceDescriptor descriptor(interface_descriptor); // Ensure descriptor is already initialized. DCHECK_LE(0, descriptor.GetRegisterParameterCount()); compiler::CodeAssemblerState state( - isolate, &zone, descriptor, Code::BUILTIN, name, + isolate, &zone, descriptor, CodeKind::BUILTIN, name, PoisoningMitigationLevel::kDontPoison, builtin_index); generator(&state); Handle code = compiler::CodeAssembler::GenerateCode( - &state, BuiltinAssemblerOptions(isolate, builtin_index)); + &state, BuiltinAssemblerOptions(isolate, builtin_index), + ProfileDataFromFile::TryRead(name)); return *code; } diff --git a/deps/v8/src/builtins/torque-csa-header-includes.h b/deps/v8/src/builtins/torque-csa-header-includes.h new file mode 100644 index 00000000000000..560f704d66c554 --- /dev/null +++ b/deps/v8/src/builtins/torque-csa-header-includes.h @@ -0,0 +1,19 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_BUILTINS_TORQUE_CSA_HEADER_INCLUDES_H_ +#define V8_BUILTINS_TORQUE_CSA_HEADER_INCLUDES_H_ + +// This file is included by Torque-generated CSA headers and contains +// includes necessary for these headers. + +#include "src/builtins/builtins-promise.h" +#include "src/builtins/builtins-proxy-gen.h" +#include "src/codegen/code-stub-assembler.h" +#include "src/compiler/code-assembler.h" +#include "src/utils/utils.h" +#include "torque-generated/csa-types-tq.h" +#include "torque-generated/field-offsets-tq.h" + +#endif // V8_BUILTINS_TORQUE_CSA_HEADER_INCLUDES_H_ diff --git a/deps/v8/src/builtins/torque-internal.tq b/deps/v8/src/builtins/torque-internal.tq index d2b107f932dcad..254663039caaa6 100644 --- a/deps/v8/src/builtins/torque-internal.tq +++ b/deps/v8/src/builtins/torque-internal.tq @@ -25,6 +25,11 @@ macro NewReference(object: HeapObject, offset: intptr):&T { return %RawDownCast<&T>( Reference{object: object, offset: offset, unsafeMarker: Unsafe {}}); } +macro ReferenceCast(ref:&U):&T { + const ref = NewReference(ref.object, ref.offset); + UnsafeCast(*ref); + return ref; +} } // namespace unsafe struct Slice { @@ -100,7 +105,7 @@ struct SliceIterator { } macro Next(): T labels NoMore { - return * this.NextReference() otherwise NoMore; + return *this.NextReference() otherwise NoMore; } macro NextReference():&T labels NoMore { @@ -147,12 +152,23 @@ macro ValidAllocationSize(sizeInBytes: intptr, map: Map): bool { type UninitializedHeapObject extends HeapObject; -extern macro AllocateAllowLOS(intptr): UninitializedHeapObject; extern macro GetInstanceTypeMap(constexpr InstanceType): Map; +extern macro Allocate( + intptr, constexpr AllocationFlag): UninitializedHeapObject; -macro Allocate(sizeInBytes: intptr, map: Map): UninitializedHeapObject { +const kAllocateBaseFlags: constexpr AllocationFlag = + AllocationFlag::kAllowLargeObjectAllocation; +macro AllocateFromNew( + sizeInBytes: intptr, map: Map, pretenured: bool): UninitializedHeapObject { assert(ValidAllocationSize(sizeInBytes, map)); - return AllocateAllowLOS(sizeInBytes); + if (pretenured) { + return Allocate( + sizeInBytes, + %RawConstexprCast( + kAllocateBaseFlags | AllocationFlag::kPretenured)); + } else { + return Allocate(sizeInBytes, kAllocateBaseFlags); + } } macro InitializeFieldsFromIterator( @@ -161,7 +177,7 @@ macro InitializeFieldsFromIterator( let originIterator = originIterator; while (true) { const ref:&T = targetIterator.NextReference() otherwise break; - * ref = originIterator.Next() otherwise unreachable; + *ref = originIterator.Next() otherwise unreachable; } } // Dummy implementations: do not initialize for UninitializedIterator. @@ -176,16 +192,40 @@ extern macro StoreDoubleHole(HeapObject, intptr); macro LoadFloat64OrHole(r:&float64_or_hole): float64_or_hole { return float64_or_hole{ is_hole: IsDoubleHole(r.object, r.offset - kHeapObjectTag), - value: * unsafe::NewReference(r.object, r.offset) + value: *unsafe::NewReference(r.object, r.offset) }; } macro StoreFloat64OrHole(r:&float64_or_hole, value: float64_or_hole) { if (value.is_hole) { StoreDoubleHole(r.object, r.offset - kHeapObjectTag); } else { - * unsafe::NewReference(r.object, r.offset) = value.value; + *unsafe::NewReference(r.object, r.offset) = value.value; } } + +macro DownCastForTorqueClass(o: HeapObject): + T labels CastError { + const map = o.map; + const minInstanceType = %MinInstanceType(); + const maxInstanceType = %MaxInstanceType(); + if constexpr (minInstanceType == maxInstanceType) { + if constexpr (%ClassHasMapConstant()) { + if (map != %GetClassMapConstant()) goto CastError; + } else { + if (map.instance_type != minInstanceType) goto CastError; + } + } else { + const diff: int32 = maxInstanceType - minInstanceType; + const offset = Convert(Convert(map.instance_type)) - + Convert(Convert( + FromConstexpr(minInstanceType))); + if (Unsigned(offset) > Unsigned(diff)) goto CastError; + } + return %RawDownCast(o); +} + +extern macro StaticAssert(bool, constexpr string); + } // namespace torque_internal // Indicates that an array-field should not be initialized. @@ -198,6 +238,12 @@ struct UninitializedIterator {} intrinsic %RawDownCast(x: From): To; intrinsic %RawConstexprCast(f: From): To; +intrinsic %MinInstanceType(): constexpr InstanceType; +intrinsic %MaxInstanceType(): constexpr InstanceType; + +intrinsic %ClassHasMapConstant(): constexpr bool; +intrinsic %GetClassMapConstant(): Map; + struct IteratorSequence { macro Empty(): bool { return this.first.Empty() && this.second.Empty(); diff --git a/deps/v8/src/builtins/typed-array-entries.tq b/deps/v8/src/builtins/typed-array-entries.tq new file mode 100644 index 00000000000000..6749a14e90ab0d --- /dev/null +++ b/deps/v8/src/builtins/typed-array-entries.tq @@ -0,0 +1,27 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-typed-array-gen.h' + +namespace typed_array { +const kBuiltinNameEntries: constexpr string = '%TypedArray%.prototype.entries'; + +// %TypedArray%.entries () +// https://tc39.github.io/ecma262/#sec-%typedarray%.entries +transitioning javascript builtin +TypedArrayPrototypeEntries(js-implicit context: NativeContext, receiver: JSAny)( + ...arguments): JSArrayIterator { + try { + const array: JSTypedArray = Cast(receiver) + otherwise NotTypedArray; + + EnsureAttached(array) otherwise IsDetached; + return CreateArrayIterator(array, IterationKind::kEntries); + } label NotTypedArray deferred { + ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameEntries); + } label IsDetached deferred { + ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameEntries); + } +} +} diff --git a/deps/v8/src/builtins/typed-array-keys.tq b/deps/v8/src/builtins/typed-array-keys.tq new file mode 100644 index 00000000000000..24c53c71052647 --- /dev/null +++ b/deps/v8/src/builtins/typed-array-keys.tq @@ -0,0 +1,27 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-typed-array-gen.h' + +namespace typed_array { +const kBuiltinNameKeys: constexpr string = '%TypedArray%.prototype.keys'; + +// %TypedArray%.keys () +// https://tc39.github.io/ecma262/#sec-%typedarray%.keys +transitioning javascript builtin +TypedArrayPrototypeKeys(js-implicit context: NativeContext, receiver: JSAny)( + ...arguments): JSArrayIterator { + try { + const array: JSTypedArray = Cast(receiver) + otherwise NotTypedArray; + + EnsureAttached(array) otherwise IsDetached; + return CreateArrayIterator(array, IterationKind::kKeys); + } label NotTypedArray deferred { + ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameKeys); + } label IsDetached deferred { + ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameKeys); + } +} +} diff --git a/deps/v8/src/builtins/typed-array-subarray.tq b/deps/v8/src/builtins/typed-array-subarray.tq index 73d9e80c619500..46ce383b503619 100644 --- a/deps/v8/src/builtins/typed-array-subarray.tq +++ b/deps/v8/src/builtins/typed-array-subarray.tq @@ -17,7 +17,7 @@ transitioning javascript builtin TypedArrayPrototypeSubArray( MessageTemplate::kIncompatibleMethodReceiver, methodName); // 5. Let buffer be O.[[ViewedArrayBuffer]]. - const buffer = typed_array::GetBuffer(source); + const buffer = typed_array::GetTypedArrayBuffer(source); // 6. Let srcLength be O.[[ArrayLength]]. const srcLength: uintptr = source.length; diff --git a/deps/v8/src/builtins/typed-array-values.tq b/deps/v8/src/builtins/typed-array-values.tq new file mode 100644 index 00000000000000..a60aaaf707ff60 --- /dev/null +++ b/deps/v8/src/builtins/typed-array-values.tq @@ -0,0 +1,27 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include 'src/builtins/builtins-typed-array-gen.h' + +namespace typed_array { +const kBuiltinNameValues: constexpr string = '%TypedArray%.prototype.values'; + +// %TypedArray%.values () +// https://tc39.github.io/ecma262/#sec-%typedarray%.values +transitioning javascript builtin +TypedArrayPrototypeValues(js-implicit context: NativeContext, receiver: JSAny)( + ...arguments): JSArrayIterator { + try { + const array: JSTypedArray = Cast(receiver) + otherwise NotTypedArray; + + EnsureAttached(array) otherwise IsDetached; + return CreateArrayIterator(array, IterationKind::kValues); + } label NotTypedArray deferred { + ThrowTypeError(MessageTemplate::kNotTypedArray, kBuiltinNameValues); + } label IsDetached deferred { + ThrowTypeError(MessageTemplate::kDetachedOperation, kBuiltinNameValues); + } +} +} diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq index 033de32a1d0314..ca18b432ab9656 100644 --- a/deps/v8/src/builtins/typed-array.tq +++ b/deps/v8/src/builtins/typed-array.tq @@ -63,8 +63,8 @@ extern macro TypedArrayBuiltinsAssembler::CallCMemmove( RawPtr, RawPtr, uintptr): void; extern macro TypedArrayBuiltinsAssembler::CallCMemset( RawPtr, intptr, uintptr): void; -extern macro TypedArrayBuiltinsAssembler::GetBuffer(implicit context: Context)( - JSTypedArray): JSArrayBuffer; +extern macro GetTypedArrayBuffer(implicit context: Context)(JSTypedArray): + JSArrayBuffer; extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo( JSTypedArray): TypedArrayElementsInfo; extern macro TypedArrayBuiltinsAssembler::GetTypedArrayElementsInfo(Map): diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq index 097e39d430e7d3..1ebc610b2ab510 100644 --- a/deps/v8/src/builtins/wasm.tq +++ b/deps/v8/src/builtins/wasm.tq @@ -7,6 +7,10 @@ namespace runtime { extern runtime WasmMemoryGrow(Context, WasmInstanceObject, Smi): Smi; extern runtime WasmRefFunc(Context, WasmInstanceObject, Smi): JSAny; +extern runtime WasmTableInit( + Context, WasmInstanceObject, Object, Object, Smi, Smi, Smi): JSAny; +extern runtime WasmTableCopy( + Context, WasmInstanceObject, Object, Object, Smi, Smi, Smi): JSAny; extern runtime WasmFunctionTableGet( Context, WasmInstanceObject, Smi, Smi): JSAny; extern runtime WasmFunctionTableSet( @@ -14,14 +18,28 @@ extern runtime WasmFunctionTableSet( extern runtime ThrowWasmError(Context, Smi): JSAny; extern runtime Throw(Context, Object): JSAny; extern runtime ReThrow(Context, Object): JSAny; +extern runtime WasmTriggerTierUp(Context, WasmInstanceObject): JSAny; extern runtime WasmStackGuard(Context): JSAny; extern runtime ThrowWasmStackOverflow(Context): JSAny; extern runtime WasmTraceMemory(Context, Smi): JSAny; +extern runtime WasmTraceEnter(Context): JSAny; +extern runtime WasmTraceExit(Context, Smi): JSAny; +extern runtime WasmAtomicNotify( + Context, WasmInstanceObject, Number, Number): Smi; +extern runtime WasmI32AtomicWait( + Context, WasmInstanceObject, Number, Number, BigInt): Smi; +extern runtime WasmI64AtomicWait( + Context, WasmInstanceObject, Number, BigInt, BigInt): Smi; +extern runtime WasmAllocateRtt(Context, Smi, Map): Map; +} + +namespace unsafe { +extern macro TimesTaggedSize(intptr): intptr; +extern macro Allocate(intptr): HeapObject; } namespace wasm { -const kFuncTableType: - constexpr int31 generates 'wasm::ValueType::Kind::kFuncRef'; +const kFuncTableType: constexpr int31 generates 'wasm::HeapType::kFunc'; extern macro WasmBuiltinsAssembler::LoadInstanceFromFrame(): WasmInstanceObject; @@ -33,6 +51,8 @@ extern macro WasmBuiltinsAssembler::LoadTablesFromInstance(WasmInstanceObject): FixedArray; extern macro WasmBuiltinsAssembler::LoadExternalFunctionsFromInstance( WasmInstanceObject): FixedArray; +extern macro WasmBuiltinsAssembler::LoadManagedObjectMapsFromInstance( + WasmInstanceObject): FixedArray; macro LoadContextFromFrame(): NativeContext { return LoadContextFromInstance(LoadInstanceFromFrame()); @@ -61,6 +81,38 @@ builtin WasmMemoryGrow(numPages: int32): int32 { return SmiToInt32(result); } +builtin WasmTableInit( + dstRaw: uint32, srcRaw: uint32, sizeRaw: uint32, tableIndex: Smi, + segmentIndex: Smi): JSAny { + try { + const instance: WasmInstanceObject = LoadInstanceFromFrame(); + const dst: Smi = Convert(dstRaw) otherwise TableOutOfBounds; + const src: Smi = Convert(srcRaw) otherwise TableOutOfBounds; + const size: Smi = Convert(sizeRaw) otherwise TableOutOfBounds; + tail runtime::WasmTableInit( + LoadContextFromInstance(instance), instance, tableIndex, segmentIndex, + dst, src, size); + } label TableOutOfBounds deferred { + tail ThrowWasmTrapTableOutOfBounds(); + } +} + +builtin WasmTableCopy( + dstRaw: uint32, srcRaw: uint32, sizeRaw: uint32, dstTable: Smi, + srcTable: Smi): JSAny { + try { + const instance: WasmInstanceObject = LoadInstanceFromFrame(); + const dst: Smi = Convert(dstRaw) otherwise TableOutOfBounds; + const src: Smi = Convert(srcRaw) otherwise TableOutOfBounds; + const size: Smi = Convert(sizeRaw) otherwise TableOutOfBounds; + tail runtime::WasmTableCopy( + LoadContextFromInstance(instance), instance, dstTable, srcTable, dst, + src, size); + } label TableOutOfBounds deferred { + tail ThrowWasmTrapTableOutOfBounds(); + } +} + builtin WasmTableGet(tableIndex: intptr, index: int32): Object { const instance: WasmInstanceObject = LoadInstanceFromFrame(); const entryIndex: intptr = ChangeInt32ToIntPtr(index); @@ -145,10 +197,15 @@ builtin WasmThrow(exception: Object): JSAny { } builtin WasmRethrow(exception: Object): JSAny { - if (exception == Null) tail ThrowWasmTrapRethrowNullRef(); + if (exception == Null) tail ThrowWasmTrapRethrowNull(); tail runtime::ReThrow(LoadContextFromFrame(), exception); } +builtin WasmTriggerTierUp(): JSAny { + const instance: WasmInstanceObject = LoadInstanceFromFrame(); + tail runtime::WasmTriggerTierUp(LoadContextFromFrame(), instance); +} + builtin WasmStackGuard(): JSAny { tail runtime::WasmStackGuard(LoadContextFromFrame()); } @@ -161,11 +218,108 @@ builtin WasmTraceMemory(info: Smi): JSAny { tail runtime::WasmTraceMemory(LoadContextFromFrame(), info); } +builtin WasmTraceEnter(): JSAny { + tail runtime::WasmTraceEnter(LoadContextFromFrame()); +} + +builtin WasmTraceExit(info: Smi): JSAny { + tail runtime::WasmTraceExit(LoadContextFromFrame(), info); +} + builtin WasmAllocateJSArray(implicit context: Context)(size: Smi): JSArray { const map: Map = GetFastPackedElementsJSArrayMap(); return AllocateJSArray(ElementsKind::PACKED_ELEMENTS, map, size, size); } +builtin WasmAllocateRtt(implicit context: Context)( + typeIndex: Smi, parent: Map): Map { + tail runtime::WasmAllocateRtt(context, typeIndex, parent); +} + +builtin WasmAllocateStructWithRtt(implicit context: Context)(rtt: Map): + HeapObject { + const instanceSize: intptr = + unsafe::TimesTaggedSize(Convert(rtt.instance_size_in_words)); + const result: HeapObject = unsafe::Allocate(instanceSize); + *UnsafeConstCast(&result.map) = rtt; + return result; +} + +builtin WasmIsRttSubtype(implicit context: Context)(sub: Map, super: Map): Smi { + let map = sub; + while (true) { + if (map == super) return SmiConstant(1); // "true" + // This code relies on the fact that we use a non-WasmObject map as the + // end of the chain, e.g. for "rtt any", which then doesn't have a + // WasmTypeInfo. + // TODO(7748): Use a more explicit sentinel mechanism? + const maybeTypeInfo = map.constructor_or_back_pointer_or_native_context; + if (!Is(maybeTypeInfo)) return SmiConstant(0); // "false" + const typeInfo = %RawDownCast(maybeTypeInfo); + map = typeInfo.parent; + } + unreachable; +} + +// Redeclaration with different typing (value is an Object, not JSAny). +extern transitioning runtime +CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, Object); + +transitioning builtin WasmAllocateObjectWrapper(implicit context: Context)( + obj: Object): JSObject { + // Note: {obj} can be null, or i31ref. The code below is agnostic to that. + const wrapper = NewJSObject(); + const symbol = WasmWrappedObjectSymbolConstant(); + CreateDataProperty(wrapper, symbol, obj); + return wrapper; +} + +builtin WasmInt32ToNumber(value: int32): Number { + return ChangeInt32ToTagged(value); +} + +builtin WasmUint32ToNumber(value: uint32): Number { + return ChangeUint32ToTagged(value); +} + +extern builtin I64ToBigInt(intptr): BigInt; + +builtin WasmAtomicNotify(address: uint32, count: uint32): uint32 { + const instance: WasmInstanceObject = LoadInstanceFromFrame(); + const result: Smi = runtime::WasmAtomicNotify( + LoadContextFromInstance(instance), instance, WasmUint32ToNumber(address), + WasmUint32ToNumber(count)); + return Unsigned(SmiToInt32(result)); +} + +builtin WasmI32AtomicWait64( + address: uint32, expectedValue: int32, timeout: intptr): uint32 { + if constexpr (Is64()) { + const instance: WasmInstanceObject = LoadInstanceFromFrame(); + const result: Smi = runtime::WasmI32AtomicWait( + LoadContextFromInstance(instance), instance, + WasmUint32ToNumber(address), WasmInt32ToNumber(expectedValue), + I64ToBigInt(timeout)); + return Unsigned(SmiToInt32(result)); + } else { + unreachable; + } +} + +builtin WasmI64AtomicWait64( + address: uint32, expectedValue: intptr, timeout: intptr): uint32 { + if constexpr (Is64()) { + const instance: WasmInstanceObject = LoadInstanceFromFrame(); + const result: Smi = runtime::WasmI64AtomicWait( + LoadContextFromInstance(instance), instance, + WasmUint32ToNumber(address), I64ToBigInt(expectedValue), + I64ToBigInt(timeout)); + return Unsigned(SmiToInt32(result)); + } else { + unreachable; + } +} + extern macro TryHasOwnProperty(HeapObject, Map, InstanceType, Name): never labels Found, NotFound, Bailout; type OnNonExistent constexpr 'OnNonExistent'; @@ -251,12 +405,12 @@ builtin ThrowWasmTrapTableOutOfBounds(): JSAny { tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapTableOutOfBounds)); } -builtin ThrowWasmTrapBrOnExnNullRef(): JSAny { - tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapBrOnExnNullRef)); +builtin ThrowWasmTrapBrOnExnNull(): JSAny { + tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapBrOnExnNull)); } -builtin ThrowWasmTrapRethrowNullRef(): JSAny { - tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapRethrowNullRef)); +builtin ThrowWasmTrapRethrowNull(): JSAny { + tail WasmTrap(SmiConstant(MessageTemplate::kWasmTrapRethrowNull)); } builtin ThrowWasmTrapNullDereference(): JSAny { diff --git a/deps/v8/src/builtins/weak-ref.tq b/deps/v8/src/builtins/weak-ref.tq new file mode 100644 index 00000000000000..18385e52db306f --- /dev/null +++ b/deps/v8/src/builtins/weak-ref.tq @@ -0,0 +1,59 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +namespace runtime { + +extern runtime JSWeakRefAddToKeptObjects(implicit context: Context)(JSReceiver); + +} // namespace runtime + +namespace weakref { + +transitioning javascript builtin +WeakRefConstructor( + js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny, + target: JSFunction)(weakTarget: JSAny): JSWeakRef { + // 1. If NewTarget is undefined, throw a TypeError exception. + if (newTarget == Undefined) { + ThrowTypeError(MessageTemplate::kConstructorNotFunction, 'WeakRef'); + } + // 2. If Type(target) is not Object, throw a TypeError exception. + const weakTarget = Cast(weakTarget) otherwise + ThrowTypeError( + MessageTemplate::kWeakRefsWeakRefConstructorTargetMustBeObject); + // 3. Let weakRef be ? OrdinaryCreateFromConstructor(NewTarget, + // "%WeakRefPrototype%", « [[WeakRefTarget]] »). + const map = GetDerivedMap(target, UnsafeCast(newTarget)); + const weakRef = UnsafeCast(AllocateFastOrSlowJSObjectFromMap(map)); + // 4. Perfom ! AddToKeptObjects(target). + runtime::JSWeakRefAddToKeptObjects(weakTarget); + // 5. Set weakRef.[[WeakRefTarget]] to target. + weakRef.target = weakTarget; + // 6. Return weakRef. + return weakRef; +} + +transitioning javascript builtin +WeakRefDeref(js-implicit context: NativeContext, receiver: JSAny)(): JSAny { + // 1. Let weakRef be the this value. + // 2. Perform ? RequireInternalSlot(weakRef, [[WeakRefTarget]]). + const weakRef = Cast(receiver) otherwise + ThrowTypeError( + MessageTemplate::kIncompatibleMethodReceiver, 'WeakRef.prototype.deref', + receiver); + // 3. Let target be the value of weakRef.[[WeakRefTarget]]. + const target = weakRef.target; + // 4. If target is not empty, + // a. Perform ! AddToKeptObjects(target). + // b. Return target. + // 5. Return undefined. + if (target != Undefined) { + // JSWeakRefAddToKeptObjects might allocate and cause a GC, but it + // won't clear `target` since we hold it here on the stack. + runtime::JSWeakRefAddToKeptObjects(UnsafeCast(target)); + } + return target; +} + +} // namespace weakrefs diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index bfabe26292b8d3..e12ff0bcbef975 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -2,6 +2,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include "src/codegen/x64/register-x64.h" #if V8_TARGET_ARCH_X64 #include "src/api/api-arguments.h" @@ -25,6 +26,7 @@ #include "src/objects/objects-inl.h" #include "src/objects/smi.h" #include "src/wasm/baseline/liftoff-assembler-defs.h" +#include "src/wasm/object-access.h" #include "src/wasm/wasm-linkage.h" #include "src/wasm/wasm-objects.h" @@ -1634,6 +1636,20 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, const RegisterConfiguration* config(RegisterConfiguration::Default()); int allocatable_register_count = config->num_allocatable_general_registers(); if (with_result) { +#ifdef V8_REVERSE_JSARGS + if (java_script_builtin) { + // kScratchRegister is not included in the allocateable registers. + __ movq(kScratchRegister, rax); + } else { + // Overwrite the hole inserted by the deoptimizer with the return value + // from the LAZY deopt point. + __ movq( + Operand(rsp, config->num_allocatable_general_registers() * + kSystemPointerSize + + BuiltinContinuationFrameConstants::kFixedFrameSize), + rax); + } +#else // Overwrite the hole inserted by the deoptimizer with the return value from // the LAZY deopt point. __ movq( @@ -1641,6 +1657,7 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, kSystemPointerSize + BuiltinContinuationFrameConstants::kFixedFrameSize), rax); +#endif } for (int i = allocatable_register_count - 1; i >= 0; --i) { int code = config->GetAllocatableGeneralCode(i); @@ -1649,6 +1666,16 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm, __ SmiUntag(Register::from_code(code)); } } +#ifdef V8_REVERSE_JSARGS + if (with_result && java_script_builtin) { + // Overwrite the hole inserted by the deoptimizer with the return value from + // the LAZY deopt point. rax contains the arguments count, the return value + // from LAZY is always the last argument. + __ movq(Operand(rsp, rax, times_system_pointer_size, + BuiltinContinuationFrameConstants::kFixedFrameSize), + kScratchRegister); + } +#endif __ movq( rbp, Operand(rsp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp)); @@ -2352,13 +2379,27 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm, Label loop; __ addl(rax, r8); __ PopReturnAddressTo(rcx); +#ifdef V8_REVERSE_JSARGS + // The new receiver is already on the stack. Save it to push it later. + __ Pop(kScratchRegister); +#endif __ bind(&loop); { __ decl(r8); +#ifdef V8_REVERSE_JSARGS + // Skips the old receiver. + __ Push(Operand(rbx, r8, times_system_pointer_size, + kFPOnStackSize + kPCOnStackSize + kSystemPointerSize)); +#else __ Push(Operand(rbx, r8, times_system_pointer_size, kFPOnStackSize + kPCOnStackSize)); +#endif __ j(not_zero, &loop); } +#ifdef V8_REVERSE_JSARGS + // Recover the new receiver. + __ Push(kScratchRegister); +#endif __ PushReturnAddressFrom(rcx); } } @@ -3159,6 +3200,141 @@ void Builtins::Generate_DoubleToI(MacroAssembler* masm) { __ ret(0); } +void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) { + // Set up the stackframe. + __ EnterFrame(StackFrame::JS_TO_WASM); + + Register closure = rdi; + Register shared_function_info = rbx; + __ LoadAnyTaggedField( + shared_function_info, + MemOperand( + closure, + wasm::ObjectAccess::SharedFunctionInfoOffsetInTaggedJSFunction())); + closure = no_reg; + + Register function_data = shared_function_info; + __ LoadAnyTaggedField( + function_data, + MemOperand(shared_function_info, + SharedFunctionInfo::kFunctionDataOffset - kHeapObjectTag)); + shared_function_info = no_reg; + + Register wasm_instance = rsi; + __ LoadAnyTaggedField( + wasm_instance, + MemOperand(function_data, + WasmExportedFunctionData::kInstanceOffset - kHeapObjectTag)); + + // Int signature_type gives the number of int32 params (can be only 0 or 1). + Register signature_type = r9; + __ SmiUntagField( + signature_type, + MemOperand(function_data, WasmExportedFunctionData::kSignatureTypeOffset - + kHeapObjectTag)); + + __ cmpl(signature_type, Immediate(0)); + + // In 0 param case jump through parameter handling. + Label params_done; + __ j(equal, ¶ms_done); + + // Param handling. + Register param = rax; + const int firstParamOffset = 16; + __ movq(param, MemOperand(rbp, firstParamOffset)); + + Label not_smi; + __ JumpIfNotSmi(param, ¬_smi); + + // Change from smi to int32. + __ SmiUntag(param); + + __ bind(¶ms_done); + + // Set thread_in_wasm_flag. + Register thread_in_wasm_flag_addr = rdx; + __ movq( + thread_in_wasm_flag_addr, + MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset())); + __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(1)); + + Register jump_table_start = thread_in_wasm_flag_addr; + __ movq(jump_table_start, + MemOperand(wasm_instance, + wasm::ObjectAccess::ToTagged( + WasmInstanceObject::kJumpTableStartOffset))); + thread_in_wasm_flag_addr = no_reg; + + Register jump_table_offset = function_data; + __ DecompressTaggedSigned( + jump_table_offset, + MemOperand( + function_data, + WasmExportedFunctionData::kJumpTableOffsetOffset - kHeapObjectTag)); + + // Change from smi to integer. + __ SmiUntag(jump_table_offset); + + Register function_entry = jump_table_offset; + __ addq(function_entry, jump_table_start); + jump_table_offset = no_reg; + jump_table_start = no_reg; + + __ pushq(signature_type); + + __ call(function_entry); + function_entry = no_reg; + + __ popq(signature_type); + + // Unset thread_in_wasm_flag. + thread_in_wasm_flag_addr = r8; + __ movq( + thread_in_wasm_flag_addr, + MemOperand(kRootRegister, Isolate::thread_in_wasm_flag_address_offset())); + __ movl(MemOperand(thread_in_wasm_flag_addr, 0), Immediate(0)); + + Register return_reg = rax; + __ LoadRoot(return_reg, RootIndex::kUndefinedValue); + + // Deconstrunct the stack frame. + __ LeaveFrame(StackFrame::JS_TO_WASM); + + __ cmpl(signature_type, Immediate(0)); + + Label ret_0_param; + __ j(equal, &ret_0_param); + + __ ret(16); + + __ bind(&ret_0_param); + __ ret(8); + + // Handle the conversion to int32 when the param is not a smi. + __ bind(¬_smi); + + // The order of pushes is important. We want the heap objects, that should be + // scanned by GC, to be on the top of the stack. + __ pushq(signature_type); + __ pushq(wasm_instance); + __ pushq(function_data); + __ LoadAnyTaggedField( + rsi, + MemOperand(wasm_instance, wasm::ObjectAccess::ToTagged( + WasmInstanceObject::kNativeContextOffset))); + // We had to prepare the parameters for the Call: + // put the value into rax, and the context to rsi. + __ Call(BUILTIN_CODE(masm->isolate(), WasmTaggedNonSmiToInt32), + RelocInfo::CODE_TARGET); + + __ popq(function_data); + __ popq(wasm_instance); + __ popq(signature_type); + + __ jmp(¶ms_done); +} + namespace { int Offset(ExternalReference ref0, ExternalReference ref1) { diff --git a/deps/v8/src/codegen/DEPS b/deps/v8/src/codegen/DEPS index ca53b615417e61..67e29bc97a2efe 100644 --- a/deps/v8/src/codegen/DEPS +++ b/deps/v8/src/codegen/DEPS @@ -4,8 +4,10 @@ specific_include_rules = { "external-reference.cc": [ - # Required to call IrregexpInterpreter::NativeMatch from builtin. + # Required to call into IrregexpInterpreter and RegexpExperimental from + # builtin. "+src/regexp/regexp-interpreter.h", + "+src/regexp/experimental/experimental.h", "+src/regexp/regexp-macro-assembler-arch.h", ], } diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc index 9032714f574f0f..00d0644f73494e 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.cc +++ b/deps/v8/src/codegen/arm/assembler-arm.cc @@ -2621,16 +2621,28 @@ static void DoubleAsTwoUInt32(Double d, uint32_t* lo, uint32_t* hi) { *hi = i >> 32; } +static void WriteVmovIntImmEncoding(uint8_t imm, uint32_t* encoding) { + // Integer promotion from uint8_t to int makes these all okay. + *encoding = ((imm & 0x80) << (24 - 7)); // a + *encoding |= ((imm & 0x70) << (16 - 4)); // bcd + *encoding |= (imm & 0x0f); // efgh +} + // This checks if imm can be encoded into an immediate for vmov. // See Table A7-15 in ARM DDI 0406C.d. -// Currently only supports the first row of the table. -static bool FitsVmovImm64(uint64_t imm, uint32_t* encoding) { +// Currently only supports the first row and op=0 && cmode=1110. +static bool FitsVmovIntImm(uint64_t imm, uint32_t* encoding, uint8_t* cmode) { uint32_t lo = imm & 0xFFFFFFFF; uint32_t hi = imm >> 32; - if (lo == hi && ((lo & 0xffffff00) == 0)) { - *encoding = ((lo & 0x80) << (24 - 7)); // a - *encoding |= ((lo & 0x70) << (16 - 4)); // bcd - *encoding |= (lo & 0x0f); // efgh + if ((lo == hi && ((lo & 0xffffff00) == 0))) { + WriteVmovIntImmEncoding(imm & 0xff, encoding); + *cmode = 0; + return true; + } else if ((lo == hi) && ((lo & 0xffff) == (lo >> 16)) && + ((lo & 0xff) == (lo >> 24))) { + // Check that all bytes in imm are the same. + WriteVmovIntImmEncoding(imm & 0xff, encoding); + *cmode = 0xe; return true; } @@ -2639,15 +2651,17 @@ static bool FitsVmovImm64(uint64_t imm, uint32_t* encoding) { void Assembler::vmov(const QwNeonRegister dst, uint64_t imm) { uint32_t enc; - if (CpuFeatures::IsSupported(VFPv3) && FitsVmovImm64(imm, &enc)) { + uint8_t cmode; + uint8_t op = 0; + if (CpuFeatures::IsSupported(VFPv3) && FitsVmovIntImm(imm, &enc, &cmode)) { CpuFeatureScope scope(this, VFPv3); // Instruction details available in ARM DDI 0406C.b, A8-937. // 001i1(27-23) | D(22) | 000(21-19) | imm3(18-16) | Vd(15-12) | cmode(11-8) // | 0(7) | Q(6) | op(5) | 4(1) | imm4(3-0) int vd, d; dst.split_code(&vd, &d); - emit(kSpecialCondition | 0x05 * B23 | d * B22 | vd * B12 | 0x1 * B6 | - 0x1 * B4 | enc); + emit(kSpecialCondition | 0x05 * B23 | d * B22 | vd * B12 | cmode * B8 | + 0x1 * B6 | op * B5 | 0x1 * B4 | enc); } else { UNIMPLEMENTED(); } @@ -3892,7 +3906,18 @@ void Assembler::vcvt_u32_f32(QwNeonRegister dst, QwNeonRegister src) { emit(EncodeNeonVCVT(U32, dst, F32, src)); } -enum UnaryOp { VMVN, VSWP, VABS, VABSF, VNEG, VNEGF }; +enum UnaryOp { + VMVN, + VSWP, + VABS, + VABSF, + VNEG, + VNEGF, + VRINTM, + VRINTN, + VRINTP, + VRINTZ +}; static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size, int dst_code, int src_code) { @@ -3920,6 +3945,18 @@ static Instr EncodeNeonUnaryOp(UnaryOp op, NeonRegType reg_type, NeonSize size, DCHECK_EQ(Neon32, size); op_encoding = B16 | B10 | 0x7 * B7; break; + case VRINTM: + op_encoding = B17 | 0xD * B7; + break; + case VRINTN: + op_encoding = B17 | 0x8 * B7; + break; + case VRINTP: + op_encoding = B17 | 0xF * B7; + break; + case VRINTZ: + op_encoding = B17 | 0xB * B7; + break; default: UNREACHABLE(); } @@ -4315,7 +4352,6 @@ void Assembler::vmull(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src1, src2.split_code(&vm, &m); int size = NeonSz(dt); int u = NeonU(dt); - if (!u) UNIMPLEMENTED(); emit(0xFU * B28 | B25 | u * B24 | B23 | d * B22 | size * B20 | vn * B16 | vd * B12 | 0xC * B8 | n * B7 | m * B5 | vm); } @@ -4575,6 +4611,38 @@ void Assembler::vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1, emit(EncodeNeonPairwiseOp(VPMAX, dt, dst, src1, src2)); } +void Assembler::vrintm(NeonDataType dt, const QwNeonRegister dst, + const QwNeonRegister src) { + // SIMD vector round floating-point to integer towards -Infinity. + // See ARM DDI 0487F.b, F6-5493. + DCHECK(IsEnabled(ARMv8)); + emit(EncodeNeonUnaryOp(VRINTM, NEON_Q, NeonSize(dt), dst.code(), src.code())); +} + +void Assembler::vrintn(NeonDataType dt, const QwNeonRegister dst, + const QwNeonRegister src) { + // SIMD vector round floating-point to integer to Nearest. + // See ARM DDI 0487F.b, F6-5497. + DCHECK(IsEnabled(ARMv8)); + emit(EncodeNeonUnaryOp(VRINTN, NEON_Q, NeonSize(dt), dst.code(), src.code())); +} + +void Assembler::vrintp(NeonDataType dt, const QwNeonRegister dst, + const QwNeonRegister src) { + // SIMD vector round floating-point to integer towards +Infinity. + // See ARM DDI 0487F.b, F6-5501. + DCHECK(IsEnabled(ARMv8)); + emit(EncodeNeonUnaryOp(VRINTP, NEON_Q, NeonSize(dt), dst.code(), src.code())); +} + +void Assembler::vrintz(NeonDataType dt, const QwNeonRegister dst, + const QwNeonRegister src) { + // SIMD vector round floating-point to integer towards Zero. + // See ARM DDI 0487F.b, F6-5511. + DCHECK(IsEnabled(ARMv8)); + emit(EncodeNeonUnaryOp(VRINTZ, NEON_Q, NeonSize(dt), dst.code(), src.code())); +} + void Assembler::vtst(NeonSize size, QwNeonRegister dst, QwNeonRegister src1, QwNeonRegister src2) { DCHECK(IsEnabled(NEON)); diff --git a/deps/v8/src/codegen/arm/assembler-arm.h b/deps/v8/src/codegen/arm/assembler-arm.h index 61205760df046b..18631e2ece5422 100644 --- a/deps/v8/src/codegen/arm/assembler-arm.h +++ b/deps/v8/src/codegen/arm/assembler-arm.h @@ -820,7 +820,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void vsqrt(const SwVfpRegister dst, const SwVfpRegister src, const Condition cond = al); - // ARMv8 rounding instructions. + // ARMv8 rounding instructions (Scalar). void vrinta(const SwVfpRegister dst, const SwVfpRegister src); void vrinta(const DwVfpRegister dst, const DwVfpRegister src); void vrintn(const SwVfpRegister dst, const SwVfpRegister src); @@ -908,6 +908,17 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { DwVfpRegister src2); void vpmax(NeonDataType dt, DwVfpRegister dst, DwVfpRegister src1, DwVfpRegister src2); + + // ARMv8 rounding instructions (NEON). + void vrintm(NeonDataType dt, const QwNeonRegister dst, + const QwNeonRegister src); + void vrintn(NeonDataType dt, const QwNeonRegister dst, + const QwNeonRegister src); + void vrintp(NeonDataType dt, const QwNeonRegister dst, + const QwNeonRegister src); + void vrintz(NeonDataType dt, const QwNeonRegister dst, + const QwNeonRegister src); + void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, int shift); void vshl(NeonDataType dt, QwNeonRegister dst, QwNeonRegister src, QwNeonRegister shift); diff --git a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc index 5a4e08dc77c0bd..b457376610c0d0 100644 --- a/deps/v8/src/codegen/arm/interface-descriptors-arm.cc +++ b/deps/v8/src/codegen/arm/interface-descriptors-arm.cc @@ -46,11 +46,6 @@ void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(kParameterCount, default_stub_registers); } -const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() { - return r1; -} -const Register FastNewFunctionContextDescriptor::SlotsRegister() { return r0; } - const Register LoadDescriptor::ReceiverRegister() { return r1; } const Register LoadDescriptor::NameRegister() { return r2; } const Register LoadDescriptor::SlotRegister() { return r0; } @@ -191,11 +186,6 @@ void AbortDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } -void AllocateHeapNumberDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - data->InitializePlatformSpecific(0, nullptr); -} - void CompareDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { Register registers[] = {r1, r0}; @@ -295,6 +285,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific( DefaultInitializePlatformSpecific(data, 4); } +void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // TODO(v8:8888): Implement on this platform. + DefaultInitializePlatformSpecific(data, 4); +} + +void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // TODO(v8:8888): Implement on this platform. + DefaultInitializePlatformSpecific(data, 4); +} + +void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // TODO(v8:8888): Implement on this platform. + DefaultInitializePlatformSpecific(data, 4); +} + +void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // TODO(v8:8888): Implement on this platform. + DefaultInitializePlatformSpecific(data, 4); +} + void Compare_WithFeedbackDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { // TODO(v8:8888): Implement on this platform. diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index 7e5fa8cef1c1c2..7b9e73e1d911ce 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -2455,7 +2455,7 @@ void TurboAssembler::CheckPageFlag(Register object, int mask, Condition cc, Register scratch = temps.Acquire(); DCHECK(cc == eq || cc == ne); Bfc(scratch, object, 0, kPageSizeBits); - ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); + ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); tst(scratch, Operand(mask)); b(cc, condition_met); } diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.cc b/deps/v8/src/codegen/arm64/assembler-arm64.cc index 97a57d6f3c6d5a..2e21ab913d7b5e 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/assembler-arm64.cc @@ -41,19 +41,66 @@ namespace v8 { namespace internal { +namespace { + +#ifdef USE_SIMULATOR +static unsigned SimulatorFeaturesFromCommandLine() { + if (strcmp(FLAG_sim_arm64_optional_features, "none") == 0) { + return 0; + } + if (strcmp(FLAG_sim_arm64_optional_features, "all") == 0) { + return (1u << NUMBER_OF_CPU_FEATURES) - 1; + } + fprintf( + stderr, + "Error: unrecognised value for --sim-arm64-optional-features ('%s').\n", + FLAG_sim_arm64_optional_features); + fprintf(stderr, + "Supported values are: none\n" + " all\n"); + FATAL("sim-arm64-optional-features"); +} +#endif // USE_SIMULATOR + +static constexpr unsigned CpuFeaturesFromCompiler() { + unsigned features = 0; +#if defined(__ARM_FEATURE_JCVT) + features |= 1u << JSCVT; +#endif + return features; +} + +} // namespace + // ----------------------------------------------------------------------------- // CpuFeatures implementation. void CpuFeatures::ProbeImpl(bool cross_compile) { - // AArch64 has no configuration options, no further probing is required. - supported_ = 0; - // Only use statically determined features for cross compile (snapshot). - if (cross_compile) return; + if (cross_compile) { + supported_ |= CpuFeaturesFromCompiler(); + return; + } // We used to probe for coherent cache support, but on older CPUs it // causes crashes (crbug.com/524337), and newer CPUs don't even have // the feature any more. + +#ifdef USE_SIMULATOR + supported_ |= SimulatorFeaturesFromCommandLine(); +#else + // Probe for additional features at runtime. + base::CPU cpu; + unsigned runtime = 0; + if (cpu.has_jscvt()) { + runtime |= 1u << JSCVT; + } + + // Use the best of the features found by CPU detection and those inferred from + // the build system. + supported_ |= CpuFeaturesFromCompiler(); + supported_ |= runtime; +#endif // USE_SIMULATOR } void CpuFeatures::PrintTarget() {} @@ -1115,10 +1162,10 @@ void Assembler::cls(const Register& rd, const Register& rn) { DataProcessing1Source(rd, rn, CLS); } -void Assembler::pacia1716() { Emit(PACIA1716); } -void Assembler::autia1716() { Emit(AUTIA1716); } -void Assembler::paciasp() { Emit(PACIASP); } -void Assembler::autiasp() { Emit(AUTIASP); } +void Assembler::pacib1716() { Emit(PACIB1716); } +void Assembler::autib1716() { Emit(AUTIB1716); } +void Assembler::pacibsp() { Emit(PACIBSP); } +void Assembler::autibsp() { Emit(AUTIBSP); } void Assembler::bti(BranchTargetIdentifier id) { SystemHint op; @@ -1136,9 +1183,9 @@ void Assembler::bti(BranchTargetIdentifier id) { op = BTI_jc; break; case BranchTargetIdentifier::kNone: - case BranchTargetIdentifier::kPaciasp: + case BranchTargetIdentifier::kPacibsp: // We always want to generate a BTI instruction here, so disallow - // skipping its generation or generating a PACIASP instead. + // skipping its generation or generating a PACIBSP instead. UNREACHABLE(); } hint(op); @@ -2714,6 +2761,11 @@ void Assembler::fcvtxn2(const VRegister& vd, const VRegister& vn) { Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd)); } +void Assembler::fjcvtzs(const Register& rd, const VRegister& vn) { + DCHECK(rd.IsW() && vn.Is1D()); + Emit(FJCVTZS | Rn(vn) | Rd(rd)); +} + #define NEON_FP2REGMISC_FCVT_LIST(V) \ V(fcvtnu, NEON_FCVTNU, FCVTNU) \ V(fcvtns, NEON_FCVTNS, FCVTNS) \ diff --git a/deps/v8/src/codegen/arm64/assembler-arm64.h b/deps/v8/src/codegen/arm64/assembler-arm64.h index a9e8a5e85ada45..f787bad464f82d 100644 --- a/deps/v8/src/codegen/arm64/assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/assembler-arm64.h @@ -780,21 +780,21 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void clz(const Register& rd, const Register& rn); void cls(const Register& rd, const Register& rn); - // Pointer Authentication Code for Instruction address, using key A, with + // Pointer Authentication Code for Instruction address, using key B, with // address in x17 and modifier in x16 [Armv8.3]. - void pacia1716(); + void pacib1716(); - // Pointer Authentication Code for Instruction address, using key A, with + // Pointer Authentication Code for Instruction address, using key B, with // address in LR and modifier in SP [Armv8.3]. - void paciasp(); + void pacibsp(); - // Authenticate Instruction address, using key A, with address in x17 and + // Authenticate Instruction address, using key B, with address in x17 and // modifier in x16 [Armv8.3]. - void autia1716(); + void autib1716(); - // Authenticate Instruction address, using key A, with address in LR and + // Authenticate Instruction address, using key B, with address in LR and // modifier in SP [Armv8.3]. - void autiasp(); + void autibsp(); // Memory instructions. @@ -1750,6 +1750,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { // FP convert to signed integer, nearest with ties to even. void fcvtns(const Register& rd, const VRegister& vn); + // FP JavaScript convert to signed integer, rounding toward zero [Armv8.3]. + void fjcvtzs(const Register& rd, const VRegister& vn); + // FP convert to unsigned integer, nearest with ties to even. void fcvtnu(const Register& rd, const VRegister& vn); diff --git a/deps/v8/src/codegen/arm64/constants-arm64.h b/deps/v8/src/codegen/arm64/constants-arm64.h index e63962993a7d41..52790b9faf4a87 100644 --- a/deps/v8/src/codegen/arm64/constants-arm64.h +++ b/deps/v8/src/codegen/arm64/constants-arm64.h @@ -412,9 +412,9 @@ enum class BranchTargetIdentifier { // Emit a "BTI jc" instruction, which is a combination of "BTI j" and "BTI c". kBtiJumpCall, - // Emit a PACIASP instruction, which acts like a "BTI c" or a "BTI jc", based - // on the value of SCTLR_EL1.BT0. - kPaciasp + // Emit a PACIBSP instruction, which acts like a "BTI c" or a "BTI jc", + // based on the value of SCTLR_EL1.BT0. + kPacibsp }; enum BarrierDomain { @@ -793,10 +793,10 @@ enum SystemPAuthOp : uint32_t { SystemPAuthFixed = 0xD503211F, SystemPAuthFMask = 0xFFFFFD1F, SystemPAuthMask = 0xFFFFFFFF, - PACIA1716 = SystemPAuthFixed | 0x00000100, - AUTIA1716 = SystemPAuthFixed | 0x00000180, - PACIASP = SystemPAuthFixed | 0x00000320, - AUTIASP = SystemPAuthFixed | 0x000003A0 + PACIB1716 = SystemPAuthFixed | 0x00000140, + AUTIB1716 = SystemPAuthFixed | 0x000001C0, + PACIBSP = SystemPAuthFixed | 0x00000360, + AUTIBSP = SystemPAuthFixed | 0x000003E0 }; // Any load or store (including pair). @@ -1325,7 +1325,8 @@ enum FPIntegerConvertOp : uint32_t { FMOV_xd = FMOV_ws | SixtyFourBits | FP64, FMOV_dx = FMOV_sw | SixtyFourBits | FP64, FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000, - FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000 + FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000, + FJCVTZS = FPIntegerConvertFixed | FP64 | 0x001E0000 }; // Conversion between fixed point and floating point. diff --git a/deps/v8/src/codegen/arm64/cpu-arm64.cc b/deps/v8/src/codegen/arm64/cpu-arm64.cc index 32bcc6f268ea10..d7bd4834b0ea0a 100644 --- a/deps/v8/src/codegen/arm64/cpu-arm64.cc +++ b/deps/v8/src/codegen/arm64/cpu-arm64.cc @@ -9,6 +9,10 @@ #include "src/codegen/arm64/utils-arm64.h" #include "src/codegen/cpu-features.h" +#if V8_OS_MACOSX +#include +#endif + namespace v8 { namespace internal { @@ -41,6 +45,8 @@ void CpuFeatures::FlushICache(void* address, size_t length) { #if defined(V8_HOST_ARCH_ARM64) #if defined(V8_OS_WIN) ::FlushInstructionCache(GetCurrentProcess(), address, length); +#elif defined(V8_OS_MACOSX) + sys_icache_invalidate(address, length); #else // The code below assumes user space cache operations are allowed. The goal // of this routine is to make sure the code generated is visible to the I diff --git a/deps/v8/src/codegen/arm64/decoder-arm64-inl.h b/deps/v8/src/codegen/arm64/decoder-arm64-inl.h index 25d69b38983567..1a7d483dea9960 100644 --- a/deps/v8/src/codegen/arm64/decoder-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/decoder-arm64-inl.h @@ -538,7 +538,6 @@ void Decoder::DecodeFP(Instruction* instr) { (instr->Mask(0x20C60000) == 0x00840000) || (instr->Mask(0xA0C60000) == 0x80060000) || (instr->Mask(0xA0C60000) == 0x00860000) || - (instr->Mask(0xA0C60000) == 0x00460000) || (instr->Mask(0xA0CE0000) == 0x80860000) || (instr->Mask(0xA0CE0000) == 0x804E0000) || (instr->Mask(0xA0CE0000) == 0x000E0000) || diff --git a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc index 9f0592244491be..61c8947bd44692 100644 --- a/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc +++ b/deps/v8/src/codegen/arm64/interface-descriptors-arm64.cc @@ -46,11 +46,6 @@ void EphemeronKeyBarrierDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(kParameterCount, default_stub_registers); } -const Register FastNewFunctionContextDescriptor::ScopeInfoRegister() { - return x1; -} -const Register FastNewFunctionContextDescriptor::SlotsRegister() { return x0; } - const Register LoadDescriptor::ReceiverRegister() { return x1; } const Register LoadDescriptor::NameRegister() { return x2; } const Register LoadDescriptor::SlotRegister() { return x0; } @@ -191,11 +186,6 @@ void AbortDescriptor::InitializePlatformSpecific( data->InitializePlatformSpecific(arraysize(registers), registers); } -void AllocateHeapNumberDescriptor::InitializePlatformSpecific( - CallInterfaceDescriptorData* data) { - data->InitializePlatformSpecific(0, nullptr); -} - void CompareDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { // x1: left operand @@ -299,6 +289,30 @@ void CallTrampoline_WithFeedbackDescriptor::InitializePlatformSpecific( DefaultInitializePlatformSpecific(data, 4); } +void CallWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // TODO(v8:8888): Implement on this platform. + DefaultInitializePlatformSpecific(data, 4); +} + +void CallWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // TODO(v8:8888): Implement on this platform. + DefaultInitializePlatformSpecific(data, 4); +} + +void ConstructWithArrayLike_WithFeedbackDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // TODO(v8:8888): Implement on this platform. + DefaultInitializePlatformSpecific(data, 4); +} + +void ConstructWithSpread_WithFeedbackDescriptor::InitializePlatformSpecific( + CallInterfaceDescriptorData* data) { + // TODO(v8:8888): Implement on this platform. + DefaultInitializePlatformSpecific(data, 4); +} + void Compare_WithFeedbackDescriptor::InitializePlatformSpecific( CallInterfaceDescriptorData* data) { // TODO(v8:8888): Implement on this platform. diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h index 93b8136d9a997a..56be64693d968c 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64-inl.h @@ -318,23 +318,15 @@ void TurboAssembler::Bind(Label* label, BranchTargetIdentifier id) { // instructions between the bind and the target identifier instruction. InstructionAccurateScope scope(this, 1); bind(label); - if (id == BranchTargetIdentifier::kPaciasp) { - paciasp(); + if (id == BranchTargetIdentifier::kPacibsp) { + pacibsp(); } else { bti(id); } } } -void TurboAssembler::CodeEntry() { - // Since `kJavaScriptCallCodeStartRegister` is the target register for tail - // calls, we have to allow for jumps too, with "BTI jc". We also allow the - // register allocator to pick the target register for calls made from - // WebAssembly. - // TODO(v8:10026): Consider changing this so that we can use CallTarget(), - // which maps to "BTI c", here instead. - JumpOrCallTarget(); -} +void TurboAssembler::CodeEntry() { CallTarget(); } void TurboAssembler::ExceptionHandler() { JumpTarget(); } @@ -1136,7 +1128,7 @@ void TurboAssembler::Push(const CPURegister& src0, const CPURegister& src1, #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY if (lr_mode == kSignLR) { - Paciasp(); + Pacibsp(); } #endif @@ -1153,7 +1145,7 @@ void TurboAssembler::Push(const Register& src0, const VRegister& src1) { DCHECK_IMPLIES((lr_mode == kDontStoreLR), ((src0 != lr) && (src1 != lr))); #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY if (lr_mode == kSignLR) { - Paciasp(); + Pacibsp(); } #endif @@ -1188,7 +1180,7 @@ void TurboAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY if (lr_mode == kAuthLR) { - Autiasp(); + Autibsp(); } #endif } @@ -1199,7 +1191,7 @@ void TurboAssembler::Poke(const CPURegister& src, const Operand& offset) { DCHECK_IMPLIES((lr_mode == kDontStoreLR), (src != lr)); #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY if (lr_mode == kSignLR) { - Paciasp(); + Pacibsp(); } #endif @@ -1228,7 +1220,7 @@ void TurboAssembler::Peek(const CPURegister& dst, const Operand& offset) { DCHECK_IMPLIES((lr_mode == kDontLoadLR), (dst != lr)); #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY if (lr_mode == kAuthLR) { - Autiasp(); + Autibsp(); } #endif } @@ -1238,7 +1230,7 @@ void TurboAssembler::PushCPURegList(CPURegList registers) { DCHECK_IMPLIES((lr_mode == kDontStoreLR), !registers.IncludesAliasOf(lr)); #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY if (lr_mode == kSignLR && registers.IncludesAliasOf(lr)) { - Paciasp(); + Pacibsp(); } #endif @@ -1280,7 +1272,7 @@ void TurboAssembler::PopCPURegList(CPURegList registers) { #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY if (lr_mode == kAuthLR && contains_lr) { - Autiasp(); + Autibsp(); } #endif } diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index c157df29966975..2d3e27e5302f40 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -1197,7 +1197,7 @@ void MacroAssembler::PeekPair(const CPURegister& dst1, const CPURegister& dst2, void MacroAssembler::PushCalleeSavedRegisters() { #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY - Paciasp(); + Pacibsp(); #endif { @@ -1249,7 +1249,7 @@ void MacroAssembler::PopCalleeSavedRegisters() { } #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY - Autiasp(); + Autibsp(); #endif } @@ -1953,7 +1953,13 @@ void TurboAssembler::CallCodeObject(Register code_object) { void TurboAssembler::JumpCodeObject(Register code_object) { LoadCodeObjectEntry(code_object, code_object); - Jump(code_object); + + UseScratchRegisterScope temps(this); + if (code_object != x17) { + temps.Exclude(x17); + Mov(x17, code_object); + } + Jump(x17); } void TurboAssembler::StoreReturnAddressAndCall(Register target) { @@ -1971,7 +1977,7 @@ void TurboAssembler::StoreReturnAddressAndCall(Register target) { Adr(x17, &return_location); #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY Add(x16, sp, kSystemPointerSize); - Pacia1716(); + Pacib1716(); #endif Poke(x17, 0); @@ -2263,6 +2269,11 @@ void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone, DoubleRegister double_input, StubCallMode stub_mode, LinkRegisterStatus lr_status) { + if (CpuFeatures::IsSupported(JSCVT)) { + Fjcvtzs(result.W(), double_input); + return; + } + Label done; // Try to convert the double to an int64. If successful, the bottom 32 bits @@ -2650,7 +2661,7 @@ void TurboAssembler::CheckPageFlag(const Register& object, int mask, UseScratchRegisterScope temps(this); Register scratch = temps.AcquireX(); And(scratch, object, ~kPageAlignmentMask); - Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); + Ldr(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset)); if (cc == eq) { TestAndBranchIfAnySet(scratch, mask, condition_met); } else { @@ -3243,7 +3254,7 @@ void TurboAssembler::RestoreFPAndLR() { // We can load the return address directly into x17. Add(x16, fp, StandardFrameConstants::kCallerSPOffset); Ldp(fp, x17, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); - Autia1716(); + Autib1716(); Mov(lr, x17); #else Ldp(fp, lr, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); @@ -3256,7 +3267,7 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) { Adr(x17, return_location); #ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY Add(x16, fp, WasmExitFrameConstants::kCallingPCOffset + kSystemPointerSize); - Pacia1716(); + Pacib1716(); #endif Str(x17, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset)); } diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index 109e73c3c229d4..0cb9e823198006 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -503,13 +503,13 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void Cbnz(const Register& rt, Label* label); void Cbz(const Register& rt, Label* label); - void Paciasp() { + void Pacibsp() { DCHECK(allow_macro_instructions_); - paciasp(); + pacibsp(); } - void Autiasp() { + void Autibsp() { DCHECK(allow_macro_instructions_); - autiasp(); + autibsp(); } // The 1716 pac and aut instructions encourage people to use x16 and x17 @@ -519,7 +519,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // Register temp = temps.AcquireX(); // temp will be x16 // __ Mov(x17, ptr); // __ Mov(x16, modifier); // Will override temp! - // __ Pacia1716(); + // __ Pacib1716(); // // To work around this issue, you must exclude x16 and x17 from the scratch // register list. You may need to replace them with other registers: @@ -529,18 +529,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // temps.Include(x10, x11); // __ Mov(x17, ptr); // __ Mov(x16, modifier); - // __ Pacia1716(); - void Pacia1716() { + // __ Pacib1716(); + void Pacib1716() { DCHECK(allow_macro_instructions_); DCHECK(!TmpList()->IncludesAliasOf(x16)); DCHECK(!TmpList()->IncludesAliasOf(x17)); - pacia1716(); + pacib1716(); } - void Autia1716() { + void Autib1716() { DCHECK(allow_macro_instructions_); DCHECK(!TmpList()->IncludesAliasOf(x16)); DCHECK(!TmpList()->IncludesAliasOf(x17)); - autia1716(); + autib1716(); } inline void Dmb(BarrierDomain domain, BarrierType type); @@ -1009,6 +1009,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { fcvtzs(vd, vn, fbits); } + void Fjcvtzs(const Register& rd, const VRegister& vn) { + DCHECK(allow_macro_instructions()); + DCHECK(!rd.IsZero()); + fjcvtzs(rd, vn); + } + inline void Fcvtzu(const Register& rd, const VRegister& fn); void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) { DCHECK(allow_macro_instructions()); diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h index 090d5424b3a5ec..28bbed9ee09cb1 100644 --- a/deps/v8/src/codegen/arm64/register-arm64.h +++ b/deps/v8/src/codegen/arm64/register-arm64.h @@ -92,9 +92,7 @@ class CPURegister : public RegisterBase { } static constexpr CPURegister Create(int code, int size, RegisterType type) { -#if V8_HAS_CXX14_CONSTEXPR - DCHECK(IsValid(code, size, type)); -#endif + CONSTEXPR_DCHECK(IsValid(code, size, type)); return CPURegister{code, size, type}; } @@ -304,9 +302,7 @@ class VRegister : public CPURegister { } static constexpr VRegister Create(int code, int size, int lane_count = 1) { -#if V8_HAS_CXX14_CONSTEXPR - DCHECK(IsValidLaneCount(lane_count)); -#endif + CONSTEXPR_DCHECK(IsValidLaneCount(lane_count)); return VRegister(CPURegister::Create(code, size, CPURegister::kVRegister), lane_count); } diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc index 3b27bf5db9eb52..3d0b7d28e4723f 100644 --- a/deps/v8/src/codegen/assembler.cc +++ b/deps/v8/src/codegen/assembler.cc @@ -81,7 +81,7 @@ namespace { class DefaultAssemblerBuffer : public AssemblerBuffer { public: explicit DefaultAssemblerBuffer(int size) - : buffer_(OwnedVector::New(size)) { + : buffer_(OwnedVector::NewForOverwrite(size)) { #ifdef DEBUG ZapCode(reinterpret_cast
(buffer_.start()), size); #endif diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h index 1c287222e96758..6419e55cec76fa 100644 --- a/deps/v8/src/codegen/assembler.h +++ b/deps/v8/src/codegen/assembler.h @@ -78,10 +78,16 @@ class JumpOptimizationInfo { public: bool is_collecting() const { return stage_ == kCollection; } bool is_optimizing() const { return stage_ == kOptimization; } - void set_optimizing() { stage_ = kOptimization; } + void set_optimizing() { + DCHECK(is_optimizable()); + stage_ = kOptimization; + } bool is_optimizable() const { return optimizable_; } - void set_optimizable() { optimizable_ = true; } + void set_optimizable() { + DCHECK(is_collecting()); + optimizable_ = true; + } // Used to verify the instruction sequence is always the same in two stages. size_t hash_code() const { return hash_code_; } @@ -251,6 +257,15 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced { int pc_offset() const { return static_cast(pc_ - buffer_start_); } + int pc_offset_for_safepoint() { +#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) + // Mips needs it's own implementation to avoid trampoline's influence. + UNREACHABLE(); +#else + return pc_offset(); +#endif + } + byte* buffer_start() const { return buffer_->start(); } int buffer_size() const { return buffer_->size(); } int instruction_size() const { return pc_offset(); } diff --git a/deps/v8/src/codegen/code-comments.h b/deps/v8/src/codegen/code-comments.h index f366cd5547885d..5866296051ea50 100644 --- a/deps/v8/src/codegen/code-comments.h +++ b/deps/v8/src/codegen/code-comments.h @@ -20,7 +20,7 @@ class Assembler; // Code comments section layout: // byte count content // ------------------------------------------------------------------------ -// 4 size as uint32_t (only for sanity check) +// 4 size as uint32_t (only for a check) // [Inline array of CodeCommentEntry in increasing pc_offset order] // ┌ 4 pc_offset of entry as uint32_t // ├ 4 length of the comment including terminating '\0' diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc index 060a66edc7ad8a..006b6bee1673ff 100644 --- a/deps/v8/src/codegen/code-factory.cc +++ b/deps/v8/src/codegen/code-factory.cc @@ -267,6 +267,23 @@ Callable CodeFactory::Call(Isolate* isolate, ConvertReceiverMode mode) { return Callable(isolate->builtins()->Call(mode), CallTrampolineDescriptor{}); } +// static +Callable CodeFactory::Call_WithFeedback(Isolate* isolate, + ConvertReceiverMode mode) { + switch (mode) { + case ConvertReceiverMode::kNullOrUndefined: + return Builtins::CallableFor( + isolate, Builtins::kCall_ReceiverIsNullOrUndefined_WithFeedback); + case ConvertReceiverMode::kNotNullOrUndefined: + return Builtins::CallableFor( + isolate, Builtins::kCall_ReceiverIsNotNullOrUndefined_WithFeedback); + case ConvertReceiverMode::kAny: + return Builtins::CallableFor(isolate, + Builtins::kCall_ReceiverIsAny_WithFeedback); + } + UNREACHABLE(); +} + // static Callable CodeFactory::CallWithArrayLike(Isolate* isolate) { return Builtins::CallableFor(isolate, Builtins::kCallWithArrayLike); diff --git a/deps/v8/src/codegen/code-factory.h b/deps/v8/src/codegen/code-factory.h index b8d294ce714d27..02fc7e4b236c56 100644 --- a/deps/v8/src/codegen/code-factory.h +++ b/deps/v8/src/codegen/code-factory.h @@ -71,6 +71,7 @@ class V8_EXPORT_PRIVATE CodeFactory final { static Callable ArgumentAdaptor(Isolate* isolate); static Callable Call(Isolate* isolate, ConvertReceiverMode mode = ConvertReceiverMode::kAny); + static Callable Call_WithFeedback(Isolate* isolate, ConvertReceiverMode mode); static Callable CallWithArrayLike(Isolate* isolate); static Callable CallWithSpread(Isolate* isolate); static Callable CallFunction( diff --git a/deps/v8/src/codegen/code-reference.h b/deps/v8/src/codegen/code-reference.h index 4326cf0b96b4a3..8ff3581689f5ae 100644 --- a/deps/v8/src/codegen/code-reference.h +++ b/deps/v8/src/codegen/code-reference.h @@ -16,7 +16,7 @@ class CodeDesc; namespace wasm { class WasmCode; -} +} // namespace wasm class CodeReference { public: diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index 901ce0c7b49410..5a8d0bad030a4e 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -20,7 +20,6 @@ #include "src/objects/descriptor-array.h" #include "src/objects/function-kind.h" #include "src/objects/heap-number.h" -#include "src/objects/js-aggregate-error.h" #include "src/objects/js-generator.h" #include "src/objects/oddball.h" #include "src/objects/ordered-hash-table-inl.h" @@ -109,7 +108,11 @@ void CodeStubAssembler::Check(const BranchGenerator& branch, branch(&ok, ¬_ok); BIND(¬_ok); - FailAssert(message, file, line, extra_nodes); + std::vector file_and_line; + if (file != nullptr) { + file_and_line.push_back({file, line}); + } + FailAssert(message, file_and_line, extra_nodes); BIND(&ok); Comment("] Assert"); @@ -136,17 +139,6 @@ void CodeStubAssembler::Check(SloppyTNode condition_node, Check(branch, message, file, line, extra_nodes); } -template <> -TNode CodeStubAssembler::IntPtrToParameter(TNode value) { - return SmiTag(value); -} -template <> -TNode CodeStubAssembler::IntPtrToParameter( - TNode value) { - return value; -} - - void CodeStubAssembler::IncrementCallCount( TNode feedback_vector, TNode slot_id) { Comment("increment call count"); @@ -171,12 +163,24 @@ void CodeStubAssembler::FastCheck(TNode condition) { } void CodeStubAssembler::FailAssert( - const char* message, const char* file, int line, + const char* message, const std::vector& files_and_lines, std::initializer_list extra_nodes) { DCHECK_NOT_NULL(message); EmbeddedVector chars; - if (file != nullptr) { - SNPrintF(chars, "%s [%s:%d]", message, file, line); + std::stringstream stream; + for (auto it = files_and_lines.rbegin(); it != files_and_lines.rend(); ++it) { + if (it->first != nullptr) { + stream << " [" << it->first << ":" << it->second << "]"; +#ifndef DEBUG + // To limit the size of these strings in release builds, we include only + // the innermost macro's file name and line number. + break; +#endif + } + } + std::string files_and_lines_text = stream.str(); + if (files_and_lines_text.size() != 0) { + SNPrintF(chars, "%s%s", message, files_and_lines_text.c_str()); message = chars.begin(); } TNode message_node = StringConstant(message); @@ -283,42 +287,6 @@ TNode CodeStubAssembler::IntPtrOrSmiConstant(int value) { return ReinterpretCast(IntPtrConstant(value)); } -Node* CodeStubAssembler::IntPtrOrSmiConstant(int value, ParameterMode mode) { - if (mode == SMI_PARAMETERS) { - return SmiConstant(value); - } else { - DCHECK_EQ(INTPTR_PARAMETERS, mode); - return IntPtrConstant(value); - } -} - -bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode test) { - Smi smi_test; - if (ToSmiConstant(test, &smi_test) && smi_test.value() == 0) { - return true; - } - return false; -} - -bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(TNode test) { - int32_t constant_test; - if (ToInt32Constant(test, &constant_test) && constant_test == 0) { - return true; - } - return false; -} - -bool CodeStubAssembler::IsIntPtrOrSmiConstantZero(Node* test, - ParameterMode mode) { - if (mode == INTPTR_PARAMETERS) { - return IsIntPtrOrSmiConstantZero(UncheckedCast(test)); - } else { - DCHECK_EQ(mode, SMI_PARAMETERS); - return IsIntPtrOrSmiConstantZero(UncheckedCast(test)); - } - return false; -} - bool CodeStubAssembler::TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value, ParameterMode mode) { @@ -586,22 +554,14 @@ TNode CodeStubAssembler::Float64Trunc(SloppyTNode x) { return TNode::UncheckedCast(var_x.value()); } -TNode CodeStubAssembler::IsValidSmi(TNode smi) { - if (SmiValuesAre32Bits() && kSystemPointerSize == kInt64Size) { - // Check that the Smi value is zero in the lower bits. - TNode value = BitcastTaggedToWordForTagAndSmiBits(smi); - return Word32Equal(Int32Constant(0), TruncateIntPtrToInt32(value)); - } - return Int32TrueConstant(); +template <> +TNode CodeStubAssembler::TaggedToParameter(TNode value) { + return value; } -TNode CodeStubAssembler::IsValidSmiIndex(TNode smi) { - if (COMPRESS_POINTERS_BOOL) { - return WordEqual( - BitcastTaggedToWordForTagAndSmiBits(smi), - BitcastTaggedToWordForTagAndSmiBits(NormalizeSmiIndex(smi))); - } - return Int32TrueConstant(); +template <> +TNode CodeStubAssembler::TaggedToParameter(TNode value) { + return SmiUntag(value); } TNode CodeStubAssembler::TaggedIndexToIntPtr( @@ -1098,16 +1058,9 @@ void CodeStubAssembler::Bind(Label* label, AssemblerDebugInfo debug_info) { void CodeStubAssembler::Bind(Label* label) { CodeAssembler::Bind(label); } -TNode CodeStubAssembler::LoadDoubleWithHoleCheck( - TNode array, TNode index, Label* if_hole) { - return LoadFixedDoubleArrayElement(array, index, MachineType::Float64(), 0, - SMI_PARAMETERS, if_hole); -} - TNode CodeStubAssembler::LoadDoubleWithHoleCheck( TNode array, TNode index, Label* if_hole) { - return LoadFixedDoubleArrayElement(array, index, MachineType::Float64(), 0, - INTPTR_PARAMETERS, if_hole); + return LoadFixedDoubleArrayElement(array, index, if_hole); } void CodeStubAssembler::BranchIfJSReceiver(SloppyTNode object, @@ -1430,14 +1383,14 @@ TNode CodeStubAssembler::LoadFromParentFrame(int offset) { return LoadFullTagged(frame_pointer, IntPtrConstant(offset)); } -Node* CodeStubAssembler::LoadObjectField(SloppyTNode object, - int offset, MachineType type) { +Node* CodeStubAssembler::LoadObjectField(TNode object, int offset, + MachineType type) { CSA_ASSERT(this, IsStrong(object)); return LoadFromObject(type, object, IntPtrConstant(offset - kHeapObjectTag)); } -Node* CodeStubAssembler::LoadObjectField(SloppyTNode object, - SloppyTNode offset, +Node* CodeStubAssembler::LoadObjectField(TNode object, + TNode offset, MachineType type) { CSA_ASSERT(this, IsStrong(object)); return LoadFromObject(type, object, @@ -2006,6 +1959,43 @@ CodeStubAssembler::LoadArrayElement(TNode, ParameterMode, LoadSensitivity); +template +TNode CodeStubAssembler::LoadFixedArrayElement( + TNode object, TNode index, int additional_offset, + LoadSensitivity needs_poisoning, CheckBounds check_bounds) { + // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants? + static_assert(std::is_same::value || + std::is_same::value || + std::is_same::value, + "Only Smi, UintPtrT or IntPtrT indexes are allowed"); + CSA_ASSERT(this, IsFixedArraySubclass(object)); + CSA_ASSERT(this, IsNotWeakFixedArraySubclass(object)); + + ParameterMode parameter_mode = + std::is_same::value ? SMI_PARAMETERS : INTPTR_PARAMETERS; + if (NeedsBoundsCheck(check_bounds)) { + FixedArrayBoundsCheck(object, index, additional_offset, parameter_mode); + } + TNode element = + LoadArrayElement(object, FixedArray::kHeaderSize, index, + additional_offset, parameter_mode, needs_poisoning); + return CAST(element); +} + +template V8_EXPORT_PRIVATE TNode +CodeStubAssembler::LoadFixedArrayElement(TNode, TNode, + int, LoadSensitivity, + CheckBounds); +template V8_EXPORT_PRIVATE TNode +CodeStubAssembler::LoadFixedArrayElement(TNode, + TNode, int, + LoadSensitivity, + CheckBounds); +template V8_EXPORT_PRIVATE TNode +CodeStubAssembler::LoadFixedArrayElement(TNode, + TNode, int, + LoadSensitivity, CheckBounds); + void CodeStubAssembler::FixedArrayBoundsCheck(TNode array, Node* index, int additional_offset, @@ -2036,22 +2026,6 @@ void CodeStubAssembler::FixedArrayBoundsCheck(TNode array, } } -TNode CodeStubAssembler::LoadFixedArrayElement( - TNode object, Node* index_node, int additional_offset, - ParameterMode parameter_mode, LoadSensitivity needs_poisoning, - CheckBounds check_bounds) { - CSA_ASSERT(this, IsFixedArraySubclass(object)); - CSA_ASSERT(this, IsNotWeakFixedArraySubclass(object)); - if (NeedsBoundsCheck(check_bounds)) { - FixedArrayBoundsCheck(object, index_node, additional_offset, - parameter_mode); - } - TNode element = - LoadArrayElement(object, FixedArray::kHeaderSize, index_node, - additional_offset, parameter_mode, needs_poisoning); - return CAST(element); -} - TNode CodeStubAssembler::LoadPropertyArrayElement( TNode object, SloppyTNode index) { int additional_offset = 0; @@ -2382,9 +2356,8 @@ template TNode CodeStubAssembler::LoadFeedbackVectorSlot( template TNode CodeStubAssembler::LoadAndUntagToWord32ArrayElement( - TNode object, int array_header_size, Node* index_node, - int additional_offset, ParameterMode parameter_mode) { - CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode)); + TNode object, int array_header_size, TNode index, + int additional_offset) { DCHECK(IsAligned(additional_offset, kTaggedSize)); int endian_correction = 0; #if V8_TARGET_LITTLE_ENDIAN @@ -2392,8 +2365,8 @@ TNode CodeStubAssembler::LoadAndUntagToWord32ArrayElement( #endif int32_t header_size = array_header_size + additional_offset - kHeapObjectTag + endian_correction; - TNode offset = ElementOffsetFromIndex(index_node, HOLEY_ELEMENTS, - parameter_mode, header_size); + TNode offset = + ElementOffsetFromIndex(index, HOLEY_ELEMENTS, header_size); CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(object), array_header_size + endian_correction)); if (SmiValuesAre32Bits()) { @@ -2404,32 +2377,25 @@ TNode CodeStubAssembler::LoadAndUntagToWord32ArrayElement( } TNode CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement( - TNode object, Node* index_node, int additional_offset, - ParameterMode parameter_mode) { + TNode object, TNode index, int additional_offset) { CSA_SLOW_ASSERT(this, IsFixedArraySubclass(object)); return LoadAndUntagToWord32ArrayElement(object, FixedArray::kHeaderSize, - index_node, additional_offset, - parameter_mode); + index, additional_offset); } TNode CodeStubAssembler::LoadWeakFixedArrayElement( - TNode object, Node* index, int additional_offset, - ParameterMode parameter_mode, LoadSensitivity needs_poisoning) { + TNode object, TNode index, int additional_offset) { return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index, - additional_offset, parameter_mode, needs_poisoning); + additional_offset, INTPTR_PARAMETERS, + LoadSensitivity::kSafe); } TNode CodeStubAssembler::LoadFixedDoubleArrayElement( - SloppyTNode object, Node* index_node, - MachineType machine_type, int additional_offset, - ParameterMode parameter_mode, Label* if_hole) { - CSA_ASSERT(this, IsFixedDoubleArray(object)); - DCHECK(IsAligned(additional_offset, kTaggedSize)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(index_node, parameter_mode)); - int32_t header_size = - FixedDoubleArray::kHeaderSize + additional_offset - kHeapObjectTag; - TNode offset = ElementOffsetFromIndex( - index_node, HOLEY_DOUBLE_ELEMENTS, parameter_mode, header_size); + TNode object, TNode index, Label* if_hole, + MachineType machine_type) { + int32_t header_size = FixedDoubleArray::kHeaderSize - kHeapObjectTag; + TNode offset = + ElementOffsetFromIndex(index, HOLEY_DOUBLE_ELEMENTS, header_size); CSA_ASSERT(this, IsOffsetInBounds( offset, LoadAndUntagFixedArrayBaseLength(object), FixedDoubleArray::kHeaderSize, HOLEY_DOUBLE_ELEMENTS)); @@ -2478,16 +2444,15 @@ TNode CodeStubAssembler::LoadFixedArrayBaseElementAsTagged( BIND(&if_packed_double); { - var_result = AllocateHeapNumberWithValue(LoadFixedDoubleArrayElement( - CAST(elements), index, MachineType::Float64())); + var_result = AllocateHeapNumberWithValue( + LoadFixedDoubleArrayElement(CAST(elements), index)); Goto(&done); } BIND(&if_holey_double); { - var_result = AllocateHeapNumberWithValue(LoadFixedDoubleArrayElement( - CAST(elements), index, MachineType::Float64(), 0, INTPTR_PARAMETERS, - if_hole)); + var_result = AllocateHeapNumberWithValue( + LoadFixedDoubleArrayElement(CAST(elements), index, if_hole)); Goto(&done); } @@ -2519,7 +2484,7 @@ TNode CodeStubAssembler::IsDoubleHole(TNode base, } TNode CodeStubAssembler::LoadDoubleWithHoleCheck( - SloppyTNode base, SloppyTNode offset, Label* if_hole, + TNode base, TNode offset, Label* if_hole, MachineType machine_type) { if (if_hole) { GotoIf(IsDoubleHole(base, offset), if_hole); @@ -2542,41 +2507,6 @@ TNode CodeStubAssembler::LoadScopeInfoHasExtensionField( return IsSetWord(value); } -TNode CodeStubAssembler::LoadContextElement( - SloppyTNode context, int slot_index) { - int offset = Context::SlotOffset(slot_index); - return Load(context, IntPtrConstant(offset)); -} - -TNode CodeStubAssembler::LoadContextElement( - SloppyTNode context, SloppyTNode slot_index) { - TNode offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS, - Context::SlotOffset(0)); - return Load(context, offset); -} - -TNode CodeStubAssembler::LoadContextElement(TNode context, - TNode slot_index) { - TNode offset = ElementOffsetFromIndex(slot_index, PACKED_ELEMENTS, - Context::SlotOffset(0)); - return Load(context, offset); -} - -void CodeStubAssembler::StoreContextElement(SloppyTNode context, - int slot_index, - SloppyTNode value) { - int offset = Context::SlotOffset(slot_index); - Store(context, IntPtrConstant(offset), value); -} - -void CodeStubAssembler::StoreContextElement(SloppyTNode context, - SloppyTNode slot_index, - SloppyTNode value) { - TNode offset = IntPtrAdd(TimesTaggedSize(slot_index), - IntPtrConstant(Context::SlotOffset(0))); - Store(context, offset, value); -} - void CodeStubAssembler::StoreContextElementNoWriteBarrier( SloppyTNode context, int slot_index, SloppyTNode value) { int offset = Context::SlotOffset(slot_index); @@ -2893,19 +2823,18 @@ TNode CodeStubAssembler::EnsureArrayPushable(TNode context, } void CodeStubAssembler::PossiblyGrowElementsCapacity( - ParameterMode mode, ElementsKind kind, TNode array, - Node* length, TVariable* var_elements, Node* growth, + ElementsKind kind, TNode array, TNode length, + TVariable* var_elements, TNode growth, Label* bailout) { Label fits(this, var_elements); - Node* capacity = - TaggedToParameter(LoadFixedArrayBaseLength(var_elements->value()), mode); - // length and growth nodes are already in a ParameterMode appropriate - // representation. - Node* new_length = IntPtrOrSmiAdd(growth, length, mode); - GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity, mode), &fits); - Node* new_capacity = CalculateNewElementsCapacity(new_length, mode); + TNode capacity = + TaggedToParameter(LoadFixedArrayBaseLength(var_elements->value())); + + TNode new_length = IntPtrOrSmiAdd(growth, length); + GotoIfNot(IntPtrOrSmiGreaterThan(new_length, capacity), &fits); + TNode new_capacity = CalculateNewElementsCapacity(new_length); *var_elements = GrowElementsCapacity(array, var_elements->value(), kind, kind, - capacity, new_capacity, mode, bailout); + capacity, new_capacity, bailout); Goto(&fits); BIND(&fits); } @@ -2919,15 +2848,14 @@ TNode CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, Label pre_bailout(this); Label success(this); TVARIABLE(Smi, var_tagged_length); - ParameterMode mode = OptimalParameterMode(); TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array))); TVARIABLE(FixedArrayBase, var_elements, LoadElements(array)); // Resize the capacity of the fixed array if it doesn't fit. TNode first = arg_index->value(); TNode growth = IntPtrToBInt(IntPtrSub(args->GetLength(), first)); - PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(), - &var_elements, growth, &pre_bailout); + PossiblyGrowElementsCapacity(kind, array, var_length.value(), &var_elements, + growth, &pre_bailout); // Push each argument onto the end of the array now that there is enough // capacity. @@ -2936,8 +2864,8 @@ TNode CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, args->ForEach( push_vars, [&](TNode arg) { - TryStoreArrayElement(kind, mode, &pre_bailout, elements, - var_length.value(), arg); + TryStoreArrayElement(kind, &pre_bailout, elements, var_length.value(), + arg); Increment(&var_length); }, first); @@ -2950,7 +2878,7 @@ TNode CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, BIND(&pre_bailout); { - TNode length = ParameterToTagged(var_length.value(), mode); + TNode length = ParameterToTagged(var_length.value()); var_tagged_length = length; TNode diff = SmiSub(length, LoadFastJSArrayLength(array)); StoreObjectFieldNoWriteBarrier(array, JSArray::kLengthOffset, length); @@ -2962,15 +2890,17 @@ TNode CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, return var_tagged_length.value(); } -void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind, - ParameterMode mode, Label* bailout, +void CodeStubAssembler::TryStoreArrayElement(ElementsKind kind, Label* bailout, TNode elements, - Node* index, TNode value) { + TNode index, + TNode value) { if (IsSmiElementsKind(kind)) { GotoIf(TaggedIsNotSmi(value), bailout); } else if (IsDoubleElementsKind(kind)) { GotoIfNotNumber(value, bailout); } + + ParameterMode mode = OptimalParameterMode(); if (IsDoubleElementsKind(kind)) { StoreElement(elements, kind, index, ChangeNumberToFloat64(CAST(value)), mode); @@ -2984,19 +2914,18 @@ void CodeStubAssembler::BuildAppendJSArray(ElementsKind kind, TNode value, Label* bailout) { Comment("BuildAppendJSArray: ", ElementsKindToString(kind)); - ParameterMode mode = OptimalParameterMode(); TVARIABLE(BInt, var_length, SmiToBInt(LoadFastJSArrayLength(array))); TVARIABLE(FixedArrayBase, var_elements, LoadElements(array)); // Resize the capacity of the fixed array if it doesn't fit. - Node* growth = IntPtrOrSmiConstant(1, mode); - PossiblyGrowElementsCapacity(mode, kind, array, var_length.value(), - &var_elements, growth, bailout); + TNode growth = IntPtrOrSmiConstant(1); + PossiblyGrowElementsCapacity(kind, array, var_length.value(), &var_elements, + growth, bailout); // Push each argument onto the end of the array now that there is enough // capacity. - TryStoreArrayElement(kind, mode, bailout, var_elements.value(), - var_length.value(), value); + TryStoreArrayElement(kind, bailout, var_elements.value(), var_length.value(), + value); Increment(&var_length); TNode length = BIntToSmi(var_length.value()); @@ -3335,7 +3264,7 @@ TNode CodeStubAssembler::CopyNameDictionary( AllocateNameDictionaryWithCapacity(capacity); TNode length = SmiUntag(LoadFixedArrayBaseLength(dictionary)); CopyFixedArrayElements(PACKED_ELEMENTS, dictionary, properties, length, - SKIP_WRITE_BARRIER, INTPTR_PARAMETERS); + SKIP_WRITE_BARRIER); return properties; } @@ -3552,12 +3481,13 @@ TNode CodeStubAssembler::IsValidFastJSArrayCapacity( TNode CodeStubAssembler::AllocateJSArray( TNode array_map, TNode elements, TNode length, - TNode allocation_site, int array_header_size) { + base::Optional> allocation_site, + int array_header_size) { Comment("begin allocation of JSArray passing in elements"); CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); int base_size = array_header_size; - if (!allocation_site.is_null()) { + if (allocation_site) { base_size += AllocationMemento::kSize; } @@ -3571,8 +3501,9 @@ TNode CodeStubAssembler::AllocateJSArray( std::pair, TNode> CodeStubAssembler::AllocateUninitializedJSArrayWithElements( ElementsKind kind, TNode array_map, TNode length, - TNode allocation_site, TNode capacity, - AllocationFlags allocation_flags, int array_header_size) { + base::Optional> allocation_site, + TNode capacity, AllocationFlags allocation_flags, + int array_header_size) { Comment("begin allocation of JSArray with elements"); CHECK_EQ(allocation_flags & ~kAllowLargeObjectAllocation, 0); CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); @@ -3608,7 +3539,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements( BIND(&nonempty); { int base_size = array_header_size; - if (!allocation_site.is_null()) { + if (allocation_site) { base_size += AllocationMemento::kSize; } @@ -3680,7 +3611,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements( TNode CodeStubAssembler::AllocateUninitializedJSArray( TNode array_map, TNode length, - TNode allocation_site, TNode size_in_bytes) { + base::Optional> allocation_site, + TNode size_in_bytes) { CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); // Allocate space for the JSArray and the elements FixedArray in one go. @@ -3691,9 +3623,9 @@ TNode CodeStubAssembler::AllocateUninitializedJSArray( StoreObjectFieldRoot(array, JSArray::kPropertiesOrHashOffset, RootIndex::kEmptyFixedArray); - if (!allocation_site.is_null()) { + if (allocation_site) { InitializeAllocationMemento(array, IntPtrConstant(JSArray::kHeaderSize), - allocation_site); + *allocation_site); } return CAST(array); @@ -3701,11 +3633,10 @@ TNode CodeStubAssembler::AllocateUninitializedJSArray( TNode CodeStubAssembler::AllocateJSArray( ElementsKind kind, TNode array_map, TNode capacity, - TNode length, TNode allocation_site, + TNode length, base::Optional> allocation_site, AllocationFlags allocation_flags) { CSA_SLOW_ASSERT(this, TaggedIsPositiveSmi(length)); - ParameterMode capacity_mode = INTPTR_PARAMETERS; TNode array; TNode elements; @@ -3718,9 +3649,8 @@ TNode CodeStubAssembler::AllocateJSArray( BIND(&nonempty); { - FillFixedArrayWithValue(kind, elements, - IntPtrOrSmiConstant(0, capacity_mode), capacity, - RootIndex::kTheHoleValue, capacity_mode); + FillFixedArrayWithValue(kind, elements, IntPtrConstant(0), capacity, + RootIndex::kTheHoleValue, INTPTR_PARAMETERS); Goto(&out); } @@ -3728,9 +3658,10 @@ TNode CodeStubAssembler::AllocateJSArray( return array; } -TNode CodeStubAssembler::ExtractFastJSArray( - TNode context, TNode array, Node* begin, Node* count, - ParameterMode mode, Node* capacity, TNode allocation_site) { +TNode CodeStubAssembler::ExtractFastJSArray(TNode context, + TNode array, + TNode begin, + TNode count) { TNode original_array_map = LoadMap(array); TNode elements_kind = LoadMapElementsKind(original_array_map); @@ -3739,23 +3670,24 @@ TNode CodeStubAssembler::ExtractFastJSArray( TNode array_map = LoadJSArrayElementsMap(elements_kind, native_context); TNode new_elements = ExtractFixedArray( - LoadElements(array), begin, count, capacity, - ExtractFixedArrayFlag::kAllFixedArrays, mode, nullptr, elements_kind); + LoadElements(array), base::Optional>(begin), + base::Optional>(count), + base::Optional>(base::nullopt), + ExtractFixedArrayFlag::kAllFixedArrays, nullptr, elements_kind); TNode result = AllocateJSArray( - array_map, new_elements, ParameterToTagged(count, mode), allocation_site); + array_map, new_elements, ParameterToTagged(count), base::nullopt); return result; } TNode CodeStubAssembler::CloneFastJSArray( TNode context, TNode array, - TNode allocation_site, HoleConversionMode convert_holes) { + base::Optional> allocation_site, + HoleConversionMode convert_holes) { // TODO(dhai): we should be able to assert IsFastJSArray(array) here, but this // function is also used to copy boilerplates even when the no-elements // protector is invalid. This function should be renamed to reflect its uses. - // TODO(v8:9708): remove ParameterMode - ParameterMode mode = OptimalParameterMode(); TNode length = LoadJSArrayLength(array); TNode new_elements; TVARIABLE(FixedArrayBase, var_new_elements); @@ -3773,11 +3705,13 @@ TNode CodeStubAssembler::CloneFastJSArray( } // Simple extraction that preserves holes. - new_elements = - ExtractFixedArray(LoadElements(array), IntPtrOrSmiConstant(0, mode), - TaggedToParameter(CAST(length), mode), nullptr, - ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, mode, - nullptr, var_elements_kind.value()); + new_elements = ExtractFixedArray( + LoadElements(array), + base::Optional>(IntPtrOrSmiConstant(0)), + base::Optional>(TaggedToParameter(CAST(length))), + base::Optional>(base::nullopt), + ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW, nullptr, + var_elements_kind.value()); var_new_elements = new_elements; Goto(&allocate_jsarray); @@ -3792,9 +3726,11 @@ TNode CodeStubAssembler::CloneFastJSArray( // PACKED_ELEMENTS. Also, if we want to replace holes, we must not use // ExtractFixedArrayFlag::kDontCopyCOW. new_elements = ExtractFixedArray( - LoadElements(array), IntPtrOrSmiConstant(0, mode), - TaggedToParameter(CAST(length), mode), nullptr, - ExtractFixedArrayFlag::kAllFixedArrays, mode, &var_holes_converted); + LoadElements(array), + base::Optional>(IntPtrOrSmiConstant(0)), + base::Optional>(TaggedToParameter(CAST(length))), + base::Optional>(base::nullopt), + ExtractFixedArrayFlag::kAllFixedArrays, &var_holes_converted); var_new_elements = new_elements; // If the array type didn't change, use the original elements kind. GotoIfNot(var_holes_converted.value(), &allocate_jsarray); @@ -3826,25 +3762,29 @@ TNode CodeStubAssembler::CloneFastJSArray( return result; } +template TNode CodeStubAssembler::AllocateFixedArray( - ElementsKind kind, Node* capacity, ParameterMode mode, - AllocationFlags flags, SloppyTNode fixed_array_map) { + ElementsKind kind, TNode capacity, AllocationFlags flags, + base::Optional> fixed_array_map) { + static_assert( + std::is_same::value || std::is_same::value, + "Only Smi or IntPtrT capacity is allowed"); Comment("AllocateFixedArray"); - CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode)); - CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity, - IntPtrOrSmiConstant(0, mode), mode)); + CSA_ASSERT(this, + IntPtrOrSmiGreaterThan(capacity, IntPtrOrSmiConstant(0))); const intptr_t kMaxLength = IsDoubleElementsKind(kind) ? FixedDoubleArray::kMaxLength : FixedArray::kMaxLength; + const ParameterMode parameter_mode = + std::is_same::value ? SMI_PARAMETERS : INTPTR_PARAMETERS; intptr_t capacity_constant; - if (ToParameterConstant(capacity, &capacity_constant, mode)) { + if (ToParameterConstant(capacity, &capacity_constant, parameter_mode)) { CHECK_LE(capacity_constant, kMaxLength); } else { Label if_out_of_memory(this, Label::kDeferred), next(this); - Branch(IntPtrOrSmiGreaterThan( - capacity, - IntPtrOrSmiConstant(static_cast(kMaxLength), mode), mode), + Branch(IntPtrOrSmiGreaterThan(capacity, IntPtrOrSmiConstant( + static_cast(kMaxLength))), &if_out_of_memory, &next); BIND(&if_out_of_memory); @@ -3855,12 +3795,12 @@ TNode CodeStubAssembler::AllocateFixedArray( BIND(&next); } - TNode total_size = GetFixedArrayAllocationSize(capacity, kind, mode); + TNode total_size = GetFixedArrayAllocationSize(capacity, kind); if (IsDoubleElementsKind(kind)) flags |= kDoubleAlignment; // Allocate both array and elements object, and initialize the JSArray. TNode array = Allocate(total_size, flags); - if (fixed_array_map != nullptr) { + if (fixed_array_map) { // Conservatively only skip the write barrier if there are no allocation // flags, this ensures that the object hasn't ended up in LOS. Note that the // fixed array map is currently always immortal and technically wouldn't @@ -3868,9 +3808,9 @@ TNode CodeStubAssembler::AllocateFixedArray( // in case this invariant changes later, since it's difficult to enforce // locally here. if (flags == CodeStubAssembler::kNone) { - StoreMapNoWriteBarrier(array, fixed_array_map); + StoreMapNoWriteBarrier(array, *fixed_array_map); } else { - StoreMap(array, fixed_array_map); + StoreMap(array, *fixed_array_map); } } else { RootIndex map_index = IsDoubleElementsKind(kind) @@ -3880,23 +3820,32 @@ TNode CodeStubAssembler::AllocateFixedArray( StoreMapNoWriteBarrier(array, map_index); } StoreObjectFieldNoWriteBarrier(array, FixedArrayBase::kLengthOffset, - ParameterToTagged(capacity, mode)); + ParameterToTagged(capacity)); return UncheckedCast(array); } +// There is no need to export the Smi version since it is only used inside +// code-stub-assembler. +template V8_EXPORT_PRIVATE TNode + CodeStubAssembler::AllocateFixedArray(ElementsKind, TNode, + AllocationFlags, + base::Optional>); + +template TNode CodeStubAssembler::ExtractToFixedArray( - SloppyTNode source, Node* first, Node* count, - Node* capacity, SloppyTNode source_map, ElementsKind from_kind, - AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags, - ParameterMode parameter_mode, HoleConversionMode convert_holes, + SloppyTNode source, TNode first, + TNode count, TNode capacity, TNode source_map, + ElementsKind from_kind, AllocationFlags allocation_flags, + ExtractFixedArrayFlags extract_flags, HoleConversionMode convert_holes, TVariable* var_holes_converted, base::Optional> source_elements_kind) { - DCHECK_NE(first, nullptr); - DCHECK_NE(count, nullptr); - DCHECK_NE(capacity, nullptr); + static_assert( + std::is_same::value || std::is_same::value, + "Only Smi or IntPtrT first, count, and capacity are allowed"); + DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays); - CSA_ASSERT(this, IntPtrOrSmiNotEqual(IntPtrOrSmiConstant(0, parameter_mode), - capacity, parameter_mode)); + CSA_ASSERT(this, + IntPtrOrSmiNotEqual(IntPtrOrSmiConstant(0), capacity)); CSA_ASSERT(this, TaggedEqual(source_map, LoadMap(source))); TVARIABLE(FixedArrayBase, var_result); @@ -3924,8 +3873,7 @@ TNode CodeStubAssembler::ExtractToFixedArray( // 1) |extract_flags| forces us to, or // 2) we're asked to extract only part of the |source| (|first| != 0). if (extract_flags & ExtractFixedArrayFlag::kDontCopyCOW) { - Branch(IntPtrOrSmiNotEqual(IntPtrOrSmiConstant(0, parameter_mode), - first, parameter_mode), + Branch(IntPtrOrSmiNotEqual(IntPtrOrSmiConstant(0), first), &new_space_check, [&] { var_result = source; Goto(&done); @@ -3937,6 +3885,9 @@ TNode CodeStubAssembler::ExtractToFixedArray( } } + const ParameterMode parameter_mode = + std::is_same::value ? SMI_PARAMETERS : INTPTR_PARAMETERS; + BIND(&new_space_check); { bool handle_old_space = !FLAG_young_generation_large_objects; @@ -3944,7 +3895,7 @@ TNode CodeStubAssembler::ExtractToFixedArray( if (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly) { handle_old_space = false; CSA_ASSERT(this, Word32BinaryNot(FixedArraySizeDoesntFitInNewSpace( - count, FixedArray::kHeaderSize, parameter_mode))); + count, FixedArray::kHeaderSize))); } else { int constant_count; handle_old_space = @@ -3957,17 +3908,16 @@ TNode CodeStubAssembler::ExtractToFixedArray( Label old_space(this, Label::kDeferred); if (handle_old_space) { - GotoIfFixedArraySizeDoesntFitInNewSpace( - capacity, &old_space, FixedArray::kHeaderSize, parameter_mode); + GotoIfFixedArraySizeDoesntFitInNewSpace(capacity, &old_space, + FixedArray::kHeaderSize); } Comment("Copy FixedArray in young generation"); // We use PACKED_ELEMENTS to tell AllocateFixedArray and // CopyFixedArrayElements that we want a FixedArray. const ElementsKind to_kind = PACKED_ELEMENTS; - TNode to_elements = - AllocateFixedArray(to_kind, capacity, parameter_mode, allocation_flags, - var_target_map.value()); + TNode to_elements = AllocateFixedArray( + to_kind, capacity, allocation_flags, var_target_map.value()); var_result = to_elements; #ifndef V8_ENABLE_SINGLE_GENERATION @@ -3993,13 +3943,11 @@ TNode CodeStubAssembler::ExtractToFixedArray( FillFixedArrayWithValue(to_kind, to_elements, count, capacity, RootIndex::kTheHoleValue, parameter_mode); CopyElements(to_kind, to_elements, IntPtrConstant(0), source, - ParameterToIntPtr(first, parameter_mode), - ParameterToIntPtr(count, parameter_mode), + ParameterToIntPtr(first), ParameterToIntPtr(count), SKIP_WRITE_BARRIER); } else { CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first, - count, capacity, SKIP_WRITE_BARRIER, - parameter_mode, convert_holes, + count, capacity, SKIP_WRITE_BARRIER, convert_holes, var_holes_converted); } Goto(&done); @@ -4018,9 +3966,8 @@ TNode CodeStubAssembler::ExtractToFixedArray( ©_one_by_one); const ElementsKind to_smi_kind = PACKED_SMI_ELEMENTS; - to_elements = - AllocateFixedArray(to_smi_kind, capacity, parameter_mode, - allocation_flags, var_target_map.value()); + to_elements = AllocateFixedArray( + to_smi_kind, capacity, allocation_flags, var_target_map.value()); var_result = to_elements; FillFixedArrayWithValue(to_smi_kind, to_elements, count, capacity, @@ -4029,8 +3976,7 @@ TNode CodeStubAssembler::ExtractToFixedArray( // GC. Otherwise it will copy elements by elements, but skip write // barriers (since we're copying smis to smis). CopyElements(to_smi_kind, to_elements, IntPtrConstant(0), source, - ParameterToIntPtr(first, parameter_mode), - ParameterToIntPtr(count, parameter_mode), + ParameterToIntPtr(first), ParameterToIntPtr(count), SKIP_WRITE_BARRIER); Goto(&done); } else { @@ -4039,14 +3985,12 @@ TNode CodeStubAssembler::ExtractToFixedArray( BIND(©_one_by_one); { - to_elements = - AllocateFixedArray(to_kind, capacity, parameter_mode, - allocation_flags, var_target_map.value()); + to_elements = AllocateFixedArray(to_kind, capacity, allocation_flags, + var_target_map.value()); var_result = to_elements; CopyFixedArrayElements(from_kind, source, to_kind, to_elements, first, count, capacity, UPDATE_WRITE_BARRIER, - parameter_mode, convert_holes, - var_holes_converted); + convert_holes, var_holes_converted); Goto(&done); } } @@ -4057,21 +4001,26 @@ TNode CodeStubAssembler::ExtractToFixedArray( return UncheckedCast(var_result.value()); } +template TNode CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles( - TNode from_array, Node* first, Node* count, Node* capacity, - TNode fixed_array_map, TVariable* var_holes_converted, - AllocationFlags allocation_flags, ExtractFixedArrayFlags extract_flags, - ParameterMode mode) { - DCHECK_NE(first, nullptr); - DCHECK_NE(count, nullptr); - DCHECK_NE(capacity, nullptr); + TNode from_array, TNode first, TNode count, + TNode capacity, TNode fixed_array_map, + TVariable* var_holes_converted, AllocationFlags allocation_flags, + ExtractFixedArrayFlags extract_flags) { + static_assert( + std::is_same::value || std::is_same::value, + "Only Smi or IntPtrT first, count, and capacity are allowed"); + DCHECK_NE(var_holes_converted, nullptr); CSA_ASSERT(this, IsFixedDoubleArrayMap(fixed_array_map)); + const ParameterMode parameter_mode = + std::is_same::value ? SMI_PARAMETERS : INTPTR_PARAMETERS; + TVARIABLE(FixedArrayBase, var_result); const ElementsKind kind = PACKED_DOUBLE_ELEMENTS; - TNode to_elements = AllocateFixedArray( - kind, capacity, mode, allocation_flags, fixed_array_map); + TNode to_elements = + AllocateFixedArray(kind, capacity, allocation_flags, fixed_array_map); var_result = to_elements; // We first try to copy the FixedDoubleArray to a new FixedDoubleArray. // |var_holes_converted| is set to False preliminarily. @@ -4079,25 +4028,23 @@ TNode CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles( // The construction of the loop and the offsets for double elements is // extracted from CopyFixedArrayElements. - CSA_SLOW_ASSERT(this, MatchesParameterMode(count, mode)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode)); CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(from_array, kind)); STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize); Comment("[ ExtractFixedDoubleArrayFillingHoles"); // This copy can trigger GC, so we pre-initialize the array with holes. - FillFixedArrayWithValue(kind, to_elements, IntPtrOrSmiConstant(0, mode), - capacity, RootIndex::kTheHoleValue, mode); + FillFixedArrayWithValue(kind, to_elements, IntPtrOrSmiConstant(0), + capacity, RootIndex::kTheHoleValue, parameter_mode); const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag; TNode first_from_element_offset = - ElementOffsetFromIndex(first, kind, mode, 0); + ElementOffsetFromIndex(first, kind, 0); TNode limit_offset = IntPtrAdd(first_from_element_offset, IntPtrConstant(first_element_offset)); TVARIABLE(IntPtrT, var_from_offset, - ElementOffsetFromIndex(IntPtrOrSmiAdd(first, count, mode), kind, - mode, first_element_offset)); + ElementOffsetFromIndex(IntPtrOrSmiAdd(first, count), kind, + first_element_offset)); Label decrement(this, {&var_from_offset}), done(this); TNode to_array_adjusted = @@ -4132,7 +4079,7 @@ TNode CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles( *var_holes_converted = Int32TrueConstant(); to_elements = ExtractToFixedArray(from_array, first, count, capacity, fixed_array_map, - kind, allocation_flags, extract_flags, mode, + kind, allocation_flags, extract_flags, HoleConversionMode::kConvertToUndefined); var_result = to_elements; Goto(&done); @@ -4143,15 +4090,19 @@ TNode CodeStubAssembler::ExtractFixedDoubleArrayFillingHoles( return var_result.value(); } +template TNode CodeStubAssembler::ExtractFixedArray( - TNode source, Node* first, Node* count, Node* capacity, - ExtractFixedArrayFlags extract_flags, ParameterMode parameter_mode, - TVariable* var_holes_converted, - base::Optional> source_runtime_kind) { + TNode source, base::Optional> first, + base::Optional> count, base::Optional> capacity, + ExtractFixedArrayFlags extract_flags, TVariable* var_holes_converted, + base::Optional> source_elements_kind) { + static_assert( + std::is_same::value || std::is_same::value, + "Only Smi or IntPtrT first, count, and capacity are allowed"); DCHECK(extract_flags & ExtractFixedArrayFlag::kFixedArrays || extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays); - // If we want to replace holes, ExtractFixedArrayFlag::kDontCopyCOW should not - // be used, because that disables the iteration which detects holes. + // If we want to replace holes, ExtractFixedArrayFlag::kDontCopyCOW should + // not be used, because that disables the iteration which detects holes. DCHECK_IMPLIES(var_holes_converted != nullptr, !(extract_flags & ExtractFixedArrayFlag::kDontCopyCOW)); HoleConversionMode convert_holes = @@ -4162,31 +4113,26 @@ TNode CodeStubAssembler::ExtractFixedArray( (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly) ? CodeStubAssembler::kNone : CodeStubAssembler::kAllowLargeObjectAllocation; - if (first == nullptr) { - first = IntPtrOrSmiConstant(0, parameter_mode); + if (!first) { + first = IntPtrOrSmiConstant(0); } - if (count == nullptr) { + if (!count) { count = IntPtrOrSmiSub( - TaggedToParameter(LoadFixedArrayBaseLength(source), parameter_mode), - first, parameter_mode); + TaggedToParameter(LoadFixedArrayBaseLength(source)), *first); - CSA_ASSERT( - this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant(0, parameter_mode), - count, parameter_mode)); + CSA_ASSERT(this, IntPtrOrSmiLessThanOrEqual(IntPtrOrSmiConstant(0), + *count)); } - if (capacity == nullptr) { - capacity = count; + if (!capacity) { + capacity = *count; } else { CSA_ASSERT(this, Word32BinaryNot(IntPtrOrSmiGreaterThan( - IntPtrOrSmiAdd(first, count, parameter_mode), capacity, - parameter_mode))); + IntPtrOrSmiAdd(*first, *count), *capacity))); } Label if_fixed_double_array(this), empty(this), done(this, &var_result); TNode source_map = LoadMap(source); - GotoIf(IntPtrOrSmiEqual(IntPtrOrSmiConstant(0, parameter_mode), capacity, - parameter_mode), - &empty); + GotoIf(IntPtrOrSmiEqual(IntPtrOrSmiConstant(0), *capacity), &empty); if (extract_flags & ExtractFixedArrayFlag::kFixedDoubleArrays) { if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) { @@ -4196,13 +4142,15 @@ TNode CodeStubAssembler::ExtractFixedArray( } } + const ParameterMode parameter_mode = + std::is_same::value ? SMI_PARAMETERS : INTPTR_PARAMETERS; if (extract_flags & ExtractFixedArrayFlag::kFixedArrays) { // Here we can only get |source| as FixedArray, never FixedDoubleArray. // PACKED_ELEMENTS is used to signify that the source is a FixedArray. TNode to_elements = ExtractToFixedArray( - source, first, count, capacity, source_map, PACKED_ELEMENTS, - allocation_flags, extract_flags, parameter_mode, convert_holes, - var_holes_converted, source_runtime_kind); + source, *first, *count, *capacity, source_map, PACKED_ELEMENTS, + allocation_flags, extract_flags, convert_holes, var_holes_converted, + source_elements_kind); var_result = to_elements; Goto(&done); } @@ -4213,21 +4161,21 @@ TNode CodeStubAssembler::ExtractFixedArray( if (convert_holes == HoleConversionMode::kConvertToUndefined) { TNode to_elements = ExtractFixedDoubleArrayFillingHoles( - source, first, count, capacity, source_map, var_holes_converted, - allocation_flags, extract_flags, parameter_mode); + source, *first, *count, *capacity, source_map, var_holes_converted, + allocation_flags, extract_flags); var_result = to_elements; } else { // We use PACKED_DOUBLE_ELEMENTS to signify that both the source and // the target are FixedDoubleArray. That it is PACKED or HOLEY does not // matter. ElementsKind kind = PACKED_DOUBLE_ELEMENTS; - TNode to_elements = AllocateFixedArray( - kind, capacity, parameter_mode, allocation_flags, source_map); - FillFixedArrayWithValue(kind, to_elements, count, capacity, + TNode to_elements = + AllocateFixedArray(kind, *capacity, allocation_flags, source_map); + FillFixedArrayWithValue(kind, to_elements, *count, *capacity, RootIndex::kTheHoleValue, parameter_mode); CopyElements(kind, to_elements, IntPtrConstant(0), source, - ParameterToIntPtr(first, parameter_mode), - ParameterToIntPtr(count, parameter_mode)); + ParameterToIntPtr(*first, parameter_mode), + ParameterToIntPtr(*count, parameter_mode)); var_result = to_elements; } @@ -4246,51 +4194,54 @@ TNode CodeStubAssembler::ExtractFixedArray( return var_result.value(); } +template V8_EXPORT_PRIVATE TNode +CodeStubAssembler::ExtractFixedArray( + TNode, base::Optional>, + base::Optional>, base::Optional>, + ExtractFixedArrayFlags, TVariable*, base::Optional>); + +template V8_EXPORT_PRIVATE TNode +CodeStubAssembler::ExtractFixedArray( + TNode, base::Optional>, + base::Optional>, base::Optional>, + ExtractFixedArrayFlags, TVariable*, base::Optional>); + void CodeStubAssembler::InitializePropertyArrayLength( - TNode property_array, Node* length, ParameterMode mode) { - CSA_ASSERT( - this, IntPtrOrSmiGreaterThan(length, IntPtrOrSmiConstant(0, mode), mode)); - CSA_ASSERT( - this, - IntPtrOrSmiLessThanOrEqual( - length, IntPtrOrSmiConstant(PropertyArray::LengthField::kMax, mode), - mode)); - StoreObjectFieldNoWriteBarrier(property_array, - PropertyArray::kLengthAndHashOffset, - ParameterToTagged(length, mode)); + TNode property_array, TNode length) { + CSA_ASSERT(this, IntPtrGreaterThan(length, IntPtrConstant(0))); + CSA_ASSERT(this, + IntPtrLessThanOrEqual( + length, IntPtrConstant(PropertyArray::LengthField::kMax))); + StoreObjectFieldNoWriteBarrier( + property_array, PropertyArray::kLengthAndHashOffset, SmiTag(length)); } TNode CodeStubAssembler::AllocatePropertyArray( - Node* capacity_node, ParameterMode mode, AllocationFlags flags) { - CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity_node, mode)); - CSA_ASSERT(this, IntPtrOrSmiGreaterThan(capacity_node, - IntPtrOrSmiConstant(0, mode), mode)); - TNode total_size = - GetPropertyArrayAllocationSize(capacity_node, mode); + TNode capacity) { + CSA_ASSERT(this, IntPtrGreaterThan(capacity, IntPtrConstant(0))); + TNode total_size = GetPropertyArrayAllocationSize(capacity); - TNode array = Allocate(total_size, flags); + TNode array = Allocate(total_size, kNone); RootIndex map_index = RootIndex::kPropertyArrayMap; DCHECK(RootsTable::IsImmortalImmovable(map_index)); StoreMapNoWriteBarrier(array, map_index); TNode property_array = CAST(array); - InitializePropertyArrayLength(property_array, capacity_node, mode); + InitializePropertyArrayLength(property_array, capacity); return property_array; } void CodeStubAssembler::FillPropertyArrayWithUndefined( - TNode array, Node* from_node, Node* to_node, - ParameterMode mode) { - CSA_SLOW_ASSERT(this, MatchesParameterMode(from_node, mode)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(to_node, mode)); + TNode array, TNode from_index, + TNode to_index) { ElementsKind kind = PACKED_ELEMENTS; TNode value = UndefinedConstant(); - BuildFastFixedArrayForEach( - array, kind, from_node, to_node, - [this, value](Node* array, Node* offset) { + BuildFastArrayForEach( + array, kind, from_index, to_index, + [this, value](TNode array, TNode offset) { StoreNoWriteBarrier(MachineRepresentation::kTagged, array, offset, value); }, - mode); + INTPTR_PARAMETERS); } void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind, @@ -4312,9 +4263,10 @@ void CodeStubAssembler::FillFixedArrayWithValue(ElementsKind kind, float_value = LoadHeapNumberValue(CAST(value)); } - BuildFastFixedArrayForEach( + BuildFastArrayForEach( array, kind, from_node, to_node, - [this, value, float_value, kind](Node* array, Node* offset) { + [this, value, float_value, kind](TNode array, + TNode offset) { if (IsDoubleElementsKind(kind)) { StoreNoWriteBarrier(MachineRepresentation::kFloat64, array, offset, float_value); @@ -4346,12 +4298,10 @@ void CodeStubAssembler::StoreDoubleHole(TNode object, } } -void CodeStubAssembler::StoreFixedDoubleArrayHole( - TNode array, Node* index, ParameterMode parameter_mode) { - CSA_SLOW_ASSERT(this, MatchesParameterMode(index, parameter_mode)); - TNode offset = - ElementOffsetFromIndex(index, PACKED_DOUBLE_ELEMENTS, parameter_mode, - FixedArray::kHeaderSize - kHeapObjectTag); +void CodeStubAssembler::StoreFixedDoubleArrayHole(TNode array, + TNode index) { + TNode offset = ElementOffsetFromIndex( + index, PACKED_DOUBLE_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag); CSA_ASSERT(this, IsOffsetInBounds( offset, LoadAndUntagFixedArrayBaseLength(array), FixedDoubleArray::kHeaderSize, PACKED_DOUBLE_ELEMENTS)); @@ -4472,7 +4422,7 @@ void CodeStubAssembler::MoveElements(ElementsKind kind, const TNode delta = IntPtrMul(IntPtrSub(dst_index, begin), IntPtrConstant(ElementsKindToByteSize(kind))); - auto loop_body = [&](Node* array, Node* offset) { + auto loop_body = [&](TNode array, TNode offset) { const TNode element = Load(array, offset); const TNode delta_offset = IntPtrAdd(offset, delta); Store(array, delta_offset, element); @@ -4485,17 +4435,15 @@ void CodeStubAssembler::MoveElements(ElementsKind kind, BIND(&iterate_forward); { // Make a loop for the stores. - BuildFastFixedArrayForEach(elements, kind, begin, end, loop_body, - INTPTR_PARAMETERS, - ForEachDirection::kForward); + BuildFastArrayForEach(elements, kind, begin, end, loop_body, + INTPTR_PARAMETERS, ForEachDirection::kForward); Goto(&finished); } BIND(&iterate_backward); { - BuildFastFixedArrayForEach(elements, kind, begin, end, loop_body, - INTPTR_PARAMETERS, - ForEachDirection::kReverse); + BuildFastArrayForEach(elements, kind, begin, end, loop_body, + INTPTR_PARAMETERS, ForEachDirection::kReverse); Goto(&finished); } } @@ -4563,9 +4511,9 @@ void CodeStubAssembler::CopyElements(ElementsKind kind, const TNode delta = IntPtrMul(IntPtrSub(dst_index, src_index), IntPtrConstant(ElementsKindToByteSize(kind))); - BuildFastFixedArrayForEach( + BuildFastArrayForEach( src_elements, kind, begin, end, - [&](Node* array, Node* offset) { + [&](TNode array, TNode offset) { const TNode element = Load(array, offset); const TNode delta_offset = IntPtrAdd(offset, delta); if (write_barrier == SKIP_WRITE_BARRIER) { @@ -4582,19 +4530,22 @@ void CodeStubAssembler::CopyElements(ElementsKind kind, } } +template void CodeStubAssembler::CopyFixedArrayElements( ElementsKind from_kind, TNode from_array, - ElementsKind to_kind, TNode to_array, Node* first_element, - Node* element_count, Node* capacity, WriteBarrierMode barrier_mode, - ParameterMode mode, HoleConversionMode convert_holes, - TVariable* var_holes_converted) { + ElementsKind to_kind, TNode to_array, + TNode first_element, TNode element_count, + TNode capacity, WriteBarrierMode barrier_mode, + HoleConversionMode convert_holes, TVariable* var_holes_converted) { DCHECK_IMPLIES(var_holes_converted != nullptr, convert_holes == HoleConversionMode::kConvertToUndefined); - CSA_SLOW_ASSERT(this, MatchesParameterMode(element_count, mode)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode)); CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(from_array, from_kind)); CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(to_array, to_kind)); STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize); + static_assert( + std::is_same::value || std::is_same::value, + "Only Smi or IntPtrT indices are allowed"); + const int first_element_offset = FixedArray::kHeaderSize - kHeapObjectTag; Comment("[ CopyFixedArrayElements"); @@ -4618,6 +4569,8 @@ void CodeStubAssembler::CopyFixedArrayElements( Is64() ? ReinterpretCast(Int64Constant(kHoleNanInt64)) : ReinterpretCast(Int32Constant(kHoleNanLower32)); + const ParameterMode mode = + std::is_same::value ? SMI_PARAMETERS : INTPTR_PARAMETERS; // If copying might trigger a GC, we pre-initialize the FixedArray such that // it's always in a consistent state. if (convert_holes == HoleConversionMode::kConvertToUndefined) { @@ -4625,14 +4578,14 @@ void CodeStubAssembler::CopyFixedArrayElements( // Use undefined for the part that we copy and holes for the rest. // Later if we run into a hole in the source we can just skip the writing // to the target and are still guaranteed that we get an undefined. - FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0, mode), + FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0), element_count, RootIndex::kUndefinedValue, mode); FillFixedArrayWithValue(to_kind, to_array, element_count, capacity, RootIndex::kTheHoleValue, mode); } else if (doubles_to_objects_conversion) { // Pre-initialized the target with holes so later if we run into a hole in // the source we can just skip the writing to the target. - FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0, mode), + FillFixedArrayWithValue(to_kind, to_array, IntPtrOrSmiConstant(0), capacity, RootIndex::kTheHoleValue, mode); } else if (element_count != capacity) { FillFixedArrayWithValue(to_kind, to_array, element_count, capacity, @@ -4640,27 +4593,25 @@ void CodeStubAssembler::CopyFixedArrayElements( } TNode first_from_element_offset = - ElementOffsetFromIndex(first_element, from_kind, mode, 0); + ElementOffsetFromIndex(first_element, from_kind, 0); TNode limit_offset = Signed(IntPtrAdd( first_from_element_offset, IntPtrConstant(first_element_offset))); - TVARIABLE( - IntPtrT, var_from_offset, - ElementOffsetFromIndex(IntPtrOrSmiAdd(first_element, element_count, mode), - from_kind, mode, first_element_offset)); + TVARIABLE(IntPtrT, var_from_offset, + ElementOffsetFromIndex(IntPtrOrSmiAdd(first_element, element_count), + from_kind, first_element_offset)); // This second variable is used only when the element sizes of source and // destination arrays do not match. TVARIABLE(IntPtrT, var_to_offset); if (element_offset_matches) { var_to_offset = var_from_offset.value(); } else { - var_to_offset = ElementOffsetFromIndex(element_count, to_kind, mode, - first_element_offset); + var_to_offset = + ElementOffsetFromIndex(element_count, to_kind, first_element_offset); } - Variable* vars[] = {&var_from_offset, &var_to_offset, var_holes_converted}; - int num_vars = - var_holes_converted != nullptr ? arraysize(vars) : arraysize(vars) - 1; - Label decrement(this, num_vars, vars); + VariableList vars({&var_from_offset, &var_to_offset}, zone()); + if (var_holes_converted != nullptr) vars.push_back(var_holes_converted); + Label decrement(this, vars); TNode to_array_adjusted = element_offset_matches @@ -4757,12 +4708,6 @@ void CodeStubAssembler::CopyFixedArrayElements( Comment("] CopyFixedArrayElements"); } -TNode CodeStubAssembler::HeapObjectToJSAggregateError( - TNode heap_object, Label* fail) { - GotoIfNot(IsJSAggregateError(heap_object), fail); - return UncheckedCast(heap_object); -} - TNode CodeStubAssembler::HeapObjectToFixedArray( TNode base, Label* cast_fail) { Label fixed_array(this); @@ -4776,11 +4721,9 @@ TNode CodeStubAssembler::HeapObjectToFixedArray( void CodeStubAssembler::CopyPropertyArrayValues(TNode from_array, TNode to_array, - Node* property_count, + TNode property_count, WriteBarrierMode barrier_mode, - ParameterMode mode, DestroySource destroy_source) { - CSA_SLOW_ASSERT(this, MatchesParameterMode(property_count, mode)); CSA_SLOW_ASSERT(this, Word32Or(IsPropertyArray(from_array), IsEmptyFixedArray(from_array))); Comment("[ CopyPropertyArrayValues"); @@ -4793,12 +4736,12 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode from_array, needs_write_barrier = true; } - Node* start = IntPtrOrSmiConstant(0, mode); + TNode start = IntPtrConstant(0); ElementsKind kind = PACKED_ELEMENTS; - BuildFastFixedArrayForEach( + BuildFastArrayForEach( from_array, kind, start, property_count, - [this, to_array, needs_write_barrier, destroy_source](Node* array, - Node* offset) { + [this, to_array, needs_write_barrier, destroy_source]( + TNode array, TNode offset) { TNode value = Load(array, offset); if (destroy_source == DestroySource::kNo) { @@ -4812,15 +4755,14 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode from_array, value); } }, - mode); + INTPTR_PARAMETERS); #ifdef DEBUG // Zap {from_array} if the copying above has made it invalid. if (destroy_source == DestroySource::kYes) { Label did_zap(this); GotoIf(IsEmptyFixedArray(from_array), &did_zap); - FillPropertyArrayWithUndefined(CAST(from_array), start, property_count, - mode); + FillPropertyArrayWithUndefined(CAST(from_array), start, property_count); Goto(&did_zap); BIND(&did_zap); @@ -4829,11 +4771,17 @@ void CodeStubAssembler::CopyPropertyArrayValues(TNode from_array, Comment("] CopyPropertyArrayValues"); } -Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array, - Node* offset, - ElementsKind from_kind, - ElementsKind to_kind, - Label* if_hole) { +TNode CodeStubAssembler::CloneFixedArray( + TNode source, ExtractFixedArrayFlags flags) { + return ExtractFixedArray( + source, base::Optional>(IntPtrOrSmiConstant(0)), + base::Optional>(base::nullopt), + base::Optional>(base::nullopt), flags); +} + +Node* CodeStubAssembler::LoadElementAndPrepareForStore( + TNode array, TNode offset, ElementsKind from_kind, + ElementsKind to_kind, Label* if_hole) { CSA_ASSERT(this, IsFixedArrayWithKind(array, from_kind)); if (IsDoubleElementsKind(from_kind)) { TNode value = @@ -4858,75 +4806,86 @@ Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array, } } -Node* CodeStubAssembler::CalculateNewElementsCapacity(Node* old_capacity, - ParameterMode mode) { - CSA_SLOW_ASSERT(this, MatchesParameterMode(old_capacity, mode)); - Node* half_old_capacity = WordOrSmiShr(old_capacity, 1, mode); - Node* new_capacity = IntPtrOrSmiAdd(half_old_capacity, old_capacity, mode); - Node* padding = - IntPtrOrSmiConstant(JSObject::kMinAddedElementsCapacity, mode); - return IntPtrOrSmiAdd(new_capacity, padding, mode); +template +TNode CodeStubAssembler::CalculateNewElementsCapacity( + TNode old_capacity) { + static_assert( + std::is_same::value || std::is_same::value, + "Only Smi or IntPtrT old_capacity is allowed"); + Comment("TryGrowElementsCapacity"); + TNode half_old_capacity = WordOrSmiShr(old_capacity, 1); + TNode new_capacity = IntPtrOrSmiAdd(half_old_capacity, old_capacity); + TNode padding = + IntPtrOrSmiConstant(JSObject::kMinAddedElementsCapacity); + return IntPtrOrSmiAdd(new_capacity, padding); } +template V8_EXPORT_PRIVATE TNode + CodeStubAssembler::CalculateNewElementsCapacity(TNode); +template V8_EXPORT_PRIVATE TNode + CodeStubAssembler::CalculateNewElementsCapacity(TNode); + TNode CodeStubAssembler::TryGrowElementsCapacity( TNode object, TNode elements, ElementsKind kind, TNode key, Label* bailout) { CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind)); TNode capacity = LoadFixedArrayBaseLength(elements); - ParameterMode mode = OptimalParameterMode(); - return TryGrowElementsCapacity( - object, elements, kind, TaggedToParameter(key, mode), - TaggedToParameter(capacity, mode), mode, bailout); + return TryGrowElementsCapacity(object, elements, kind, + TaggedToParameter(key), + TaggedToParameter(capacity), bailout); } +template TNode CodeStubAssembler::TryGrowElementsCapacity( TNode object, TNode elements, ElementsKind kind, - Node* key, Node* capacity, ParameterMode mode, Label* bailout) { + TNode key, TNode capacity, Label* bailout) { + static_assert( + std::is_same::value || std::is_same::value, + "Only Smi or IntPtrT key and capacity nodes are allowed"); Comment("TryGrowElementsCapacity"); CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, kind)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(key, mode)); // If the gap growth is too big, fall back to the runtime. - Node* max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap, mode); - Node* max_capacity = IntPtrOrSmiAdd(capacity, max_gap, mode); - GotoIf(UintPtrOrSmiGreaterThanOrEqual(key, max_capacity, mode), bailout); + TNode max_gap = IntPtrOrSmiConstant(JSObject::kMaxGap); + TNode max_capacity = IntPtrOrSmiAdd(capacity, max_gap); + GotoIf(UintPtrOrSmiGreaterThanOrEqual(key, max_capacity), bailout); // Calculate the capacity of the new backing store. - Node* new_capacity = CalculateNewElementsCapacity( - IntPtrOrSmiAdd(key, IntPtrOrSmiConstant(1, mode), mode), mode); + TNode new_capacity = CalculateNewElementsCapacity( + IntPtrOrSmiAdd(key, IntPtrOrSmiConstant(1))); + return GrowElementsCapacity(object, elements, kind, kind, capacity, - new_capacity, mode, bailout); + new_capacity, bailout); } +template TNode CodeStubAssembler::GrowElementsCapacity( TNode object, TNode elements, - ElementsKind from_kind, ElementsKind to_kind, Node* capacity, - Node* new_capacity, ParameterMode mode, Label* bailout) { + ElementsKind from_kind, ElementsKind to_kind, TNode capacity, + TNode new_capacity, Label* bailout) { + static_assert( + std::is_same::value || std::is_same::value, + "Only Smi or IntPtrT capacities are allowed"); Comment("[ GrowElementsCapacity"); CSA_SLOW_ASSERT(this, IsFixedArrayWithKindOrEmpty(elements, from_kind)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(capacity, mode)); - CSA_SLOW_ASSERT(this, MatchesParameterMode(new_capacity, mode)); // If size of the allocation for the new capacity doesn't fit in a page // that we can bump-pointer allocate from, fall back to the runtime. int max_size = FixedArrayBase::GetMaxLengthForNewSpaceAllocation(to_kind); - GotoIf(UintPtrOrSmiGreaterThanOrEqual( - new_capacity, IntPtrOrSmiConstant(max_size, mode), mode), + GotoIf(UintPtrOrSmiGreaterThanOrEqual(new_capacity, + IntPtrOrSmiConstant(max_size)), bailout); // Allocate the new backing store. TNode new_elements = - AllocateFixedArray(to_kind, new_capacity, mode); + AllocateFixedArray(to_kind, new_capacity); // Copy the elements from the old elements store to the new. // The size-check above guarantees that the |new_elements| is allocated // in new space so we can skip the write barrier. - CopyFixedArrayElements(from_kind, elements, to_kind, new_elements, - UncheckedCast(capacity), - UncheckedCast(new_capacity), - SKIP_WRITE_BARRIER, mode); + CopyFixedArrayElements(from_kind, elements, to_kind, new_elements, capacity, + new_capacity, SKIP_WRITE_BARRIER); StoreObjectField(object, JSObject::kElementsOffset, new_elements); Comment("] GrowElementsCapacity"); @@ -5033,10 +4992,9 @@ void CodeStubAssembler::TaggedToWord32OrBigIntImpl( // We might need to loop after conversion. TVARIABLE(Object, var_value, value); OverwriteFeedback(var_feedback, BinaryOperationFeedback::kNone); - Variable* loop_vars[] = {&var_value, var_feedback}; - int num_vars = - var_feedback != nullptr ? arraysize(loop_vars) : arraysize(loop_vars) - 1; - Label loop(this, num_vars, loop_vars); + VariableList loop_vars({&var_value}, zone()); + if (var_feedback != nullptr) loop_vars.push_back(var_feedback); + Label loop(this, loop_vars); Goto(&loop); BIND(&loop); { @@ -5664,14 +5622,6 @@ TNode CodeStubAssembler::IsCallableMap(SloppyTNode map) { return IsSetWord32(LoadMapBitField(map)); } -TNode CodeStubAssembler::IsCoverageInfo(TNode object) { - return IsCoverageInfoMap(LoadMap(object)); -} - -TNode CodeStubAssembler::IsDebugInfo(TNode object) { - return HasInstanceType(object, DEBUG_INFO_TYPE); -} - TNode CodeStubAssembler::IsDeprecatedMap(SloppyTNode map) { CSA_ASSERT(this, IsMap(map)); return IsSetWord32(LoadMapBitField3(map)); @@ -5803,14 +5753,6 @@ TNode CodeStubAssembler::IsCallable(SloppyTNode object) { return IsCallableMap(LoadMap(object)); } -TNode CodeStubAssembler::IsCell(SloppyTNode object) { - return TaggedEqual(LoadMap(object), CellMapConstant()); -} - -TNode CodeStubAssembler::IsCode(SloppyTNode object) { - return HasInstanceType(object, CODE_TYPE); -} - TNode CodeStubAssembler::IsConstructorMap(SloppyTNode map) { CSA_ASSERT(this, IsMap(map)); return IsSetWord32(LoadMapBitField(map)); @@ -5861,6 +5803,15 @@ TNode CodeStubAssembler::IsSequentialStringInstanceType( Int32Constant(kSeqStringTag)); } +TNode CodeStubAssembler::IsSeqOneByteStringInstanceType( + TNode instance_type) { + CSA_ASSERT(this, IsStringInstanceType(instance_type)); + return Word32Equal( + Word32And(instance_type, + Int32Constant(kStringRepresentationMask | kStringEncodingMask)), + Int32Constant(kSeqStringTag | kOneByteStringTag)); +} + TNode CodeStubAssembler::IsConsStringInstanceType( SloppyTNode instance_type) { CSA_ASSERT(this, IsStringInstanceType(instance_type)); @@ -6002,10 +5953,6 @@ TNode CodeStubAssembler::IsJSPrimitiveWrapperMap(SloppyTNode map) { return IsJSPrimitiveWrapperInstanceType(LoadMapInstanceType(map)); } -TNode CodeStubAssembler::IsJSAggregateError(TNode object) { - return HasInstanceType(object, JS_AGGREGATE_ERROR_TYPE); -} - TNode CodeStubAssembler::IsJSArrayInstanceType( SloppyTNode instance_type) { return InstanceTypeEqual(instance_type, JS_ARRAY_TYPE); @@ -6029,13 +5976,6 @@ TNode CodeStubAssembler::IsJSAsyncGeneratorObject( return HasInstanceType(object, JS_ASYNC_GENERATOR_OBJECT_TYPE); } -TNode CodeStubAssembler::IsContext(SloppyTNode object) { - TNode instance_type = LoadInstanceType(object); - return UncheckedCast(Word32And( - Int32GreaterThanOrEqual(instance_type, Int32Constant(FIRST_CONTEXT_TYPE)), - Int32LessThanOrEqual(instance_type, Int32Constant(LAST_CONTEXT_TYPE)))); -} - TNode CodeStubAssembler::IsFixedArray(SloppyTNode object) { return HasInstanceType(object, FIXED_ARRAY_TYPE); } @@ -6059,21 +5999,11 @@ TNode CodeStubAssembler::IsNotWeakFixedArraySubclass( Int32Constant(LAST_WEAK_FIXED_ARRAY_TYPE)))); } -TNode CodeStubAssembler::IsPromiseCapability( - SloppyTNode object) { - return HasInstanceType(object, PROMISE_CAPABILITY_TYPE); -} - TNode CodeStubAssembler::IsPropertyArray( SloppyTNode object) { return HasInstanceType(object, PROPERTY_ARRAY_TYPE); } -TNode CodeStubAssembler::IsPromiseReaction( - SloppyTNode object) { - return HasInstanceType(object, PROMISE_REACTION_TYPE); -} - TNode CodeStubAssembler::IsPromiseReactionJobTask( TNode object) { TNode instance_type = LoadInstanceType(object); @@ -6081,16 +6011,6 @@ TNode CodeStubAssembler::IsPromiseReactionJobTask( LAST_PROMISE_REACTION_JOB_TASK_TYPE); } -TNode CodeStubAssembler::IsPromiseRejectReactionJobTask( - SloppyTNode object) { - return HasInstanceType(object, PROMISE_REJECT_REACTION_JOB_TASK_TYPE); -} - -TNode CodeStubAssembler::IsPromiseFulfillReactionJobTask( - SloppyTNode object) { - return HasInstanceType(object, PROMISE_FULFILL_REACTION_JOB_TASK_TYPE); -} - // This complicated check is due to elements oddities. If a smi array is empty // after Array.p.shift, it is replaced by the empty array constant. If it is // later filled with a double element, we try to grow it but pass in a double @@ -6136,23 +6056,6 @@ TNode CodeStubAssembler::IsPropertyCell(SloppyTNode object) { return IsPropertyCellMap(LoadMap(object)); } -TNode CodeStubAssembler::IsAccessorInfo(SloppyTNode object) { - return IsAccessorInfoMap(LoadMap(object)); -} - -TNode CodeStubAssembler::IsAccessorPair(SloppyTNode object) { - return IsAccessorPairMap(LoadMap(object)); -} - -TNode CodeStubAssembler::IsAllocationSite( - SloppyTNode object) { - return IsAllocationSiteInstanceType(LoadInstanceType(object)); -} - -TNode CodeStubAssembler::IsHeapNumber(SloppyTNode object) { - return IsHeapNumberMap(LoadMap(object)); -} - TNode CodeStubAssembler::IsHeapNumberInstanceType( SloppyTNode instance_type) { return InstanceTypeEqual(instance_type, HEAP_NUMBER_TYPE); @@ -6167,15 +6070,6 @@ TNode CodeStubAssembler::IsOddballInstanceType( return InstanceTypeEqual(instance_type, ODDBALL_TYPE); } -TNode CodeStubAssembler::IsFeedbackCell(SloppyTNode object) { - return HasInstanceType(object, FEEDBACK_CELL_TYPE); -} - -TNode CodeStubAssembler::IsFeedbackVector( - SloppyTNode object) { - return IsFeedbackVectorMap(LoadMap(object)); -} - TNode CodeStubAssembler::IsName(SloppyTNode object) { return IsNameInstanceType(LoadInstanceType(object)); } @@ -6189,15 +6083,15 @@ TNode CodeStubAssembler::IsString(SloppyTNode object) { return IsStringInstanceType(LoadInstanceType(object)); } +TNode CodeStubAssembler::IsSeqOneByteString(TNode object) { + return IsSeqOneByteStringInstanceType(LoadInstanceType(object)); +} + TNode CodeStubAssembler::IsSymbolInstanceType( SloppyTNode instance_type) { return InstanceTypeEqual(instance_type, SYMBOL_TYPE); } -TNode CodeStubAssembler::IsSymbol(SloppyTNode object) { - return IsSymbolMap(LoadMap(object)); -} - TNode CodeStubAssembler::IsInternalizedStringInstanceType( TNode instance_type) { STATIC_ASSERT(kNotInternalizedTag != 0); @@ -6263,34 +6157,11 @@ TNode CodeStubAssembler::IsPrimitiveInstanceType( Int32Constant(LAST_PRIMITIVE_HEAP_OBJECT_TYPE)); } -TNode CodeStubAssembler::IsPrivateSymbol( - SloppyTNode object) { - return Select( - IsSymbol(object), - [=] { - TNode symbol = CAST(object); - TNode flags = - LoadObjectField(symbol, Symbol::kFlagsOffset); - return IsSetWord32(flags); - }, - [=] { return Int32FalseConstant(); }); -} - TNode CodeStubAssembler::IsPrivateName(SloppyTNode symbol) { TNode flags = LoadObjectField(symbol, Symbol::kFlagsOffset); return IsSetWord32(flags); } -TNode CodeStubAssembler::IsNativeContext( - SloppyTNode object) { - return HasInstanceType(object, NATIVE_CONTEXT_TYPE); -} - -TNode CodeStubAssembler::IsFixedDoubleArray( - SloppyTNode object) { - return TaggedEqual(LoadMap(object), FixedDoubleArrayMapConstant()); -} - TNode CodeStubAssembler::IsHashTable(SloppyTNode object) { TNode instance_type = LoadInstanceType(object); return UncheckedCast( @@ -6329,11 +6200,6 @@ TNode CodeStubAssembler::IsJSFunctionInstanceType( return InstanceTypeEqual(instance_type, JS_FUNCTION_TYPE); } -TNode CodeStubAssembler::IsAllocationSiteInstanceType( - SloppyTNode instance_type) { - return InstanceTypeEqual(instance_type, ALLOCATION_SITE_TYPE); -} - TNode CodeStubAssembler::IsJSFunction(SloppyTNode object) { return IsJSFunctionMap(LoadMap(object)); } @@ -6373,12 +6239,6 @@ TNode CodeStubAssembler::IsJSRegExp(SloppyTNode object) { return HasInstanceType(object, JS_REG_EXP_TYPE); } -TNode CodeStubAssembler::IsNumber(SloppyTNode object) { - return Select( - TaggedIsSmi(object), [=] { return Int32TrueConstant(); }, - [=] { return IsHeapNumber(CAST(object)); }); -} - TNode CodeStubAssembler::IsNumeric(SloppyTNode object) { return Select( TaggedIsSmi(object), [=] { return Int32TrueConstant(); }, @@ -6504,12 +6364,16 @@ TNode CodeStubAssembler::IsNumberArrayIndex(TNode number) { [=] { return IsHeapNumberUint32(CAST(number)); }); } +template TNode CodeStubAssembler::FixedArraySizeDoesntFitInNewSpace( - Node* element_count, int base_size, ParameterMode mode) { + TNode element_count, int base_size) { + static_assert( + std::is_same::value || std::is_same::value, + "Only Smi or IntPtrT element_count is allowed"); int max_newspace_elements = (kMaxRegularHeapObjectSize - base_size) / kTaggedSize; return IntPtrOrSmiGreaterThan( - element_count, IntPtrOrSmiConstant(max_newspace_elements, mode), mode); + element_count, IntPtrOrSmiConstant(max_newspace_elements)); } TNode CodeStubAssembler::StringCharCodeAt(TNode string, @@ -6844,14 +6708,42 @@ TNode CodeStubAssembler::NumberToString(TNode input, TNode hash = Word32And(SmiToInt32(smi_input.value()), mask); TNode entry_index = Signed(ChangeUint32ToWord(Int32Add(hash, hash))); - TNode smi_key = UnsafeLoadFixedArrayElement( - number_string_cache, entry_index, 0, INTPTR_PARAMETERS); - GotoIf(TaggedNotEqual(smi_key, smi_input.value()), bailout); + TNode smi_key = + UnsafeLoadFixedArrayElement(number_string_cache, entry_index); + Label if_smi_cache_missed(this); + GotoIf(TaggedNotEqual(smi_key, smi_input.value()), &if_smi_cache_missed); // Smi match, return value from cache entry. result = CAST(UnsafeLoadFixedArrayElement(number_string_cache, entry_index, - kTaggedSize, INTPTR_PARAMETERS)); + kTaggedSize)); Goto(&done); + + BIND(&if_smi_cache_missed); + { + Label store_to_cache(this); + + // Bailout when the cache is not full-size. + const int kFullCacheSize = + isolate()->heap()->MaxNumberToStringCacheSize(); + Branch(IntPtrLessThan(number_string_cache_length, + IntPtrConstant(kFullCacheSize)), + bailout, &store_to_cache); + + BIND(&store_to_cache); + { + // Generate string and update string hash field. + result = NumberToStringSmi(SmiToInt32(smi_input.value()), + Int32Constant(10), bailout); + + // Store string into cache. + StoreFixedArrayElement(number_string_cache, entry_index, + smi_input.value()); + StoreFixedArrayElement(number_string_cache, + IntPtrAdd(entry_index, IntPtrConstant(1)), + result.value()); + Goto(&done); + } + } } BIND(&done); return result.value(); @@ -6861,6 +6753,8 @@ TNode CodeStubAssembler::NumberToString(TNode input) { TVARIABLE(String, result); Label runtime(this, Label::kDeferred), done(this, &result); + GotoIfForceSlowPath(&runtime); + result = NumberToString(input, &runtime); Goto(&done); @@ -7149,7 +7043,7 @@ TNode CodeStubAssembler::ToUint32(SloppyTNode context, Label out(this); - VARIABLE(var_result, MachineRepresentation::kTagged, input); + TVARIABLE(Object, var_result, input); // Early exit for positive smis. { @@ -7161,7 +7055,7 @@ TNode CodeStubAssembler::ToUint32(SloppyTNode context, } const TNode number = ToNumber(context, input); - var_result.Bind(number); + var_result = number; // Perhaps we have a positive smi now. { @@ -7177,7 +7071,7 @@ TNode CodeStubAssembler::ToUint32(SloppyTNode context, { const TNode uint32_value = SmiToInt32(CAST(number)); TNode float64_value = ChangeUint32ToFloat64(uint32_value); - var_result.Bind(AllocateHeapNumberWithValue(float64_value)); + var_result = AllocateHeapNumberWithValue(float64_value); Goto(&out); } @@ -7229,13 +7123,13 @@ TNode CodeStubAssembler::ToUint32(SloppyTNode context, x = Float64Mod(x, float_two_32); const TNode result = ChangeFloat64ToTagged(x); - var_result.Bind(result); + var_result = result; Goto(&out); } BIND(&return_zero); { - var_result.Bind(SmiConstant(0)); + var_result = SmiConstant(0); Goto(&out); } } @@ -7246,14 +7140,14 @@ TNode CodeStubAssembler::ToUint32(SloppyTNode context, TNode CodeStubAssembler::ToString_Inline(SloppyTNode context, SloppyTNode input) { - VARIABLE(var_result, MachineRepresentation::kTagged, input); + TVARIABLE(Object, var_result, input); Label stub_call(this, Label::kDeferred), out(this); GotoIf(TaggedIsSmi(input), &stub_call); Branch(IsString(CAST(input)), &out, &stub_call); BIND(&stub_call); - var_result.Bind(CallBuiltin(Builtins::kToString, context, input)); + var_result = CallBuiltin(Builtins::kToString, context, input); Goto(&out); BIND(&out); @@ -7297,6 +7191,12 @@ TNode CodeStubAssembler::ToLength_Inline(SloppyTNode context, [=] { return CAST(CallBuiltin(Builtins::kToLength, context, input)); }); } +TNode CodeStubAssembler::OrdinaryToPrimitive( + TNode context, TNode input, OrdinaryToPrimitiveHint hint) { + Callable callable = CodeFactory::OrdinaryToPrimitive(isolate(), hint); + return CallStub(callable, context, input); +} + TNode CodeStubAssembler::DecodeWord32(SloppyTNode word32, uint32_t shift, uint32_t mask) { DCHECK_EQ((mask >> shift) << shift, mask); @@ -7493,8 +7393,8 @@ void CodeStubAssembler::TryInternalizeString( SloppyTNode string, Label* if_index, TVariable* var_index, Label* if_internalized, TVariable* var_internalized, Label* if_not_internalized, Label* if_bailout) { - TNode function = - ExternalConstant(ExternalReference::try_internalize_string_function()); + TNode function = ExternalConstant( + ExternalReference::try_string_to_index_or_lookup_existing()); const TNode isolate_ptr = ExternalConstant(ExternalReference::isolate_address(isolate())); TNode result = @@ -7703,11 +7603,11 @@ void CodeStubAssembler::NameDictionaryLookup( TVARIABLE(IntPtrT, var_count, count); TVARIABLE(IntPtrT, var_entry, entry); - Variable* loop_vars[] = {&var_count, &var_entry, var_name_index}; - Label loop(this, arraysize(loop_vars), loop_vars); + Label loop(this, {&var_count, &var_entry, var_name_index}); Goto(&loop); BIND(&loop); { + Label next_probe(this); TNode entry = var_entry.value(); TNode index = EntryToIndex(entry); @@ -7717,13 +7617,18 @@ void CodeStubAssembler::NameDictionaryLookup( CAST(UnsafeLoadFixedArrayElement(dictionary, index)); GotoIf(TaggedEqual(current, undefined), if_not_found); if (mode == kFindExisting) { + if (Dictionary::ShapeT::kMatchNeedsHoleCheck) { + GotoIf(TaggedEqual(current, TheHoleConstant()), &next_probe); + } current = LoadName(current); GotoIf(TaggedEqual(current, unique_name), if_found); } else { DCHECK_EQ(kFindInsertionIndex, mode); GotoIf(TaggedEqual(current, TheHoleConstant()), if_not_found); } + Goto(&next_probe); + BIND(&next_probe); // See Dictionary::NextProbe(). Increment(&var_count); entry = Signed(WordAnd(IntPtrAdd(entry, var_count.value()), mask)); @@ -7779,8 +7684,7 @@ void CodeStubAssembler::NumberDictionaryLookup( TNode the_hole = TheHoleConstant(); TVARIABLE(IntPtrT, var_count, count); - Variable* loop_vars[] = {&var_count, var_entry}; - Label loop(this, 2, loop_vars); + Label loop(this, {&var_count, var_entry}); *var_entry = entry; Goto(&loop); BIND(&loop); @@ -8149,10 +8053,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( &var_is_symbol_processing_loop, &var_start_key_index, &var_end_key_index}, zone()); - Label descriptor_array_loop( - this, {&var_descriptors, &var_stable, &var_has_symbol, - &var_is_symbol_processing_loop, &var_start_key_index, - &var_end_key_index}); + Label descriptor_array_loop(this, list); Goto(&descriptor_array_loop); BIND(&descriptor_array_loop); @@ -8422,16 +8323,6 @@ void CodeStubAssembler::Lookup(TNode unique_name, TNode array, } } -TNode CodeStubAssembler::IsSimpleObjectMap(TNode map) { - uint32_t mask = Map::Bits1::HasNamedInterceptorBit::kMask | - Map::Bits1::IsAccessCheckNeededBit::kMask; - // !IsSpecialReceiverType && !IsNamedInterceptor && !IsAccessCheckNeeded - return Select( - IsSpecialReceiverInstanceType(LoadMapInstanceType(map)), - [=] { return Int32FalseConstant(); }, - [=] { return IsClearWord32(LoadMapBitField(map), mask); }); -} - void CodeStubAssembler::TryLookupPropertyInSimpleObject( TNode object, TNode map, TNode unique_name, Label* if_found_fast, Label* if_found_dict, @@ -8960,9 +8851,8 @@ void CodeStubAssembler::TryLookupElement( GotoIfNot(UintPtrLessThan(intptr_index, length), &if_oob); // Check if the element is a double hole, but don't load it. - LoadFixedDoubleArrayElement(CAST(elements), intptr_index, - MachineType::None(), 0, INTPTR_PARAMETERS, - if_not_found); + LoadFixedDoubleArrayElement(CAST(elements), intptr_index, if_not_found, + MachineType::None()); Goto(if_found); } BIND(&if_isdictionary); @@ -9672,11 +9562,11 @@ TNode CodeStubAssembler::Float64ToUint8Clamped( return UncheckedCast(var_value.value()); } -Node* CodeStubAssembler::PrepareValueForWriteToTypedArray( +template <> +TNode CodeStubAssembler::PrepareValueForWriteToTypedArray( TNode input, ElementsKind elements_kind, TNode context) { DCHECK(IsTypedArrayElementsKind(elements_kind)); - MachineRepresentation rep; switch (elements_kind) { case UINT8_ELEMENTS: case INT8_ELEMENTS: @@ -9685,23 +9575,13 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray( case UINT32_ELEMENTS: case INT32_ELEMENTS: case UINT8_CLAMPED_ELEMENTS: - rep = MachineRepresentation::kWord32; - break; - case FLOAT32_ELEMENTS: - rep = MachineRepresentation::kFloat32; - break; - case FLOAT64_ELEMENTS: - rep = MachineRepresentation::kFloat64; break; - case BIGINT64_ELEMENTS: - case BIGUINT64_ELEMENTS: - return ToBigInt(context, input); default: UNREACHABLE(); } - VARIABLE(var_result, rep); - VARIABLE(var_input, MachineRepresentation::kTagged, input); + TVARIABLE(Word32T, var_result); + TVARIABLE(Object, var_input, input); Label done(this, &var_result), if_smi(this), if_heapnumber_or_oddball(this), convert(this), loop(this, &var_input); Goto(&loop); @@ -9710,52 +9590,134 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray( // We can handle both HeapNumber and Oddball here, since Oddball has the // same layout as the HeapNumber for the HeapNumber::value field. This // way we can also properly optimize stores of oddballs to typed arrays. - GotoIf(IsHeapNumber(var_input.value()), &if_heapnumber_or_oddball); + TNode heap_object = CAST(var_input.value()); + GotoIf(IsHeapNumber(heap_object), &if_heapnumber_or_oddball); STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset, Oddball::kToNumberRawOffset); - Branch(HasInstanceType(var_input.value(), ODDBALL_TYPE), - &if_heapnumber_or_oddball, &convert); + Branch(HasInstanceType(heap_object, ODDBALL_TYPE), &if_heapnumber_or_oddball, + &convert); BIND(&if_heapnumber_or_oddball); { - TNode value = UncheckedCast(LoadObjectField( - var_input.value(), HeapNumber::kValueOffset, MachineType::Float64())); - if (rep == MachineRepresentation::kWord32) { - if (elements_kind == UINT8_CLAMPED_ELEMENTS) { - var_result.Bind(Float64ToUint8Clamped(value)); - } else { - var_result.Bind(TruncateFloat64ToWord32(value)); - } - } else if (rep == MachineRepresentation::kFloat32) { - var_result.Bind(TruncateFloat64ToFloat32(value)); + TNode value = + LoadObjectField(heap_object, HeapNumber::kValueOffset); + if (elements_kind == UINT8_CLAMPED_ELEMENTS) { + var_result = Float64ToUint8Clamped(value); } else { - DCHECK_EQ(MachineRepresentation::kFloat64, rep); - var_result.Bind(value); + var_result = TruncateFloat64ToWord32(value); } Goto(&done); } BIND(&if_smi); { - TNode value = SmiToInt32(var_input.value()); - if (rep == MachineRepresentation::kFloat32) { - var_result.Bind(RoundInt32ToFloat32(value)); - } else if (rep == MachineRepresentation::kFloat64) { - var_result.Bind(ChangeInt32ToFloat64(value)); + TNode value = SmiToInt32(CAST(var_input.value())); + if (elements_kind == UINT8_CLAMPED_ELEMENTS) { + var_result = Int32ToUint8Clamped(value); } else { - DCHECK_EQ(MachineRepresentation::kWord32, rep); - if (elements_kind == UINT8_CLAMPED_ELEMENTS) { - var_result.Bind(Int32ToUint8Clamped(value)); - } else { - var_result.Bind(value); - } + var_result = value; } Goto(&done); } BIND(&convert); { - var_input.Bind(CallBuiltin(Builtins::kNonNumberToNumber, context, input)); + var_input = CallBuiltin(Builtins::kNonNumberToNumber, context, input); + Goto(&loop); + } + + BIND(&done); + return var_result.value(); +} + +template <> +TNode CodeStubAssembler::PrepareValueForWriteToTypedArray( + TNode input, ElementsKind elements_kind, TNode context) { + DCHECK(IsTypedArrayElementsKind(elements_kind)); + CHECK_EQ(elements_kind, FLOAT32_ELEMENTS); + + TVARIABLE(Float32T, var_result); + TVARIABLE(Object, var_input, input); + Label done(this, &var_result), if_smi(this), if_heapnumber_or_oddball(this), + convert(this), loop(this, &var_input); + Goto(&loop); + BIND(&loop); + GotoIf(TaggedIsSmi(var_input.value()), &if_smi); + // We can handle both HeapNumber and Oddball here, since Oddball has the + // same layout as the HeapNumber for the HeapNumber::value field. This + // way we can also properly optimize stores of oddballs to typed arrays. + TNode heap_object = CAST(var_input.value()); + GotoIf(IsHeapNumber(heap_object), &if_heapnumber_or_oddball); + STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset, + Oddball::kToNumberRawOffset); + Branch(HasInstanceType(heap_object, ODDBALL_TYPE), &if_heapnumber_or_oddball, + &convert); + + BIND(&if_heapnumber_or_oddball); + { + TNode value = + LoadObjectField(heap_object, HeapNumber::kValueOffset); + var_result = TruncateFloat64ToFloat32(value); + Goto(&done); + } + + BIND(&if_smi); + { + TNode value = SmiToInt32(CAST(var_input.value())); + var_result = RoundInt32ToFloat32(value); + Goto(&done); + } + + BIND(&convert); + { + var_input = CallBuiltin(Builtins::kNonNumberToNumber, context, input); + Goto(&loop); + } + + BIND(&done); + return var_result.value(); +} + +template <> +TNode CodeStubAssembler::PrepareValueForWriteToTypedArray( + TNode input, ElementsKind elements_kind, TNode context) { + DCHECK(IsTypedArrayElementsKind(elements_kind)); + CHECK_EQ(elements_kind, FLOAT64_ELEMENTS); + + TVARIABLE(Float64T, var_result); + TVARIABLE(Object, var_input, input); + Label done(this, &var_result), if_smi(this), if_heapnumber_or_oddball(this), + convert(this), loop(this, &var_input); + Goto(&loop); + BIND(&loop); + GotoIf(TaggedIsSmi(var_input.value()), &if_smi); + // We can handle both HeapNumber and Oddball here, since Oddball has the + // same layout as the HeapNumber for the HeapNumber::value field. This + // way we can also properly optimize stores of oddballs to typed arrays. + TNode heap_object = CAST(var_input.value()); + GotoIf(IsHeapNumber(heap_object), &if_heapnumber_or_oddball); + STATIC_ASSERT_FIELD_OFFSETS_EQUAL(HeapNumber::kValueOffset, + Oddball::kToNumberRawOffset); + Branch(HasInstanceType(heap_object, ODDBALL_TYPE), &if_heapnumber_or_oddball, + &convert); + + BIND(&if_heapnumber_or_oddball); + { + var_result = + LoadObjectField(heap_object, HeapNumber::kValueOffset); + Goto(&done); + } + + BIND(&if_smi); + { + TNode value = SmiToInt32(CAST(var_input.value())); + var_result = ChangeInt32ToFloat64(value); + Goto(&done); + } + + BIND(&convert); + { + var_input = CallBuiltin(Builtins::kNonNumberToNumber, context, input); Goto(&loop); } @@ -9763,6 +9725,34 @@ Node* CodeStubAssembler::PrepareValueForWriteToTypedArray( return var_result.value(); } +Node* CodeStubAssembler::PrepareValueForWriteToTypedArray( + TNode input, ElementsKind elements_kind, TNode context) { + DCHECK(IsTypedArrayElementsKind(elements_kind)); + + switch (elements_kind) { + case UINT8_ELEMENTS: + case INT8_ELEMENTS: + case UINT16_ELEMENTS: + case INT16_ELEMENTS: + case UINT32_ELEMENTS: + case INT32_ELEMENTS: + case UINT8_CLAMPED_ELEMENTS: + return PrepareValueForWriteToTypedArray(input, elements_kind, + context); + case FLOAT32_ELEMENTS: + return PrepareValueForWriteToTypedArray(input, elements_kind, + context); + case FLOAT64_ELEMENTS: + return PrepareValueForWriteToTypedArray(input, elements_kind, + context); + case BIGINT64_ELEMENTS: + case BIGUINT64_ELEMENTS: + return ToBigInt(context, input); + default: + UNREACHABLE(); + } +} + void CodeStubAssembler::BigIntToRawBytes(TNode bigint, TVariable* var_low, TVariable* var_high) { @@ -9952,8 +9942,8 @@ void CodeStubAssembler::EmitElementStore( if (IsGrowStoreMode(store_mode) && !(IsSealedElementsKind(elements_kind) || IsNonextensibleElementsKind(elements_kind))) { - elements = CAST(CheckForCapacityGrow(object, elements, elements_kind, - length, intptr_key, bailout)); + elements = CheckForCapacityGrow(object, elements, elements_kind, length, + intptr_key, bailout); } else { GotoIfNot(UintPtrLessThan(Unsigned(intptr_key), length), bailout); } @@ -9973,8 +9963,8 @@ void CodeStubAssembler::EmitElementStore( IsNonextensibleElementsKind(elements_kind))) { CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements)))); } else if (IsCOWHandlingStoreMode(store_mode)) { - elements = CopyElementsOnWrite(object, elements, elements_kind, length, - parameter_mode, bailout); + elements = CopyElementsOnWrite(object, elements, elements_kind, + Signed(length), bailout); } CSA_ASSERT(this, Word32BinaryNot(IsFixedCOWArrayMap(LoadMap(elements)))); @@ -9982,11 +9972,11 @@ void CodeStubAssembler::EmitElementStore( parameter_mode); } -Node* CodeStubAssembler::CheckForCapacityGrow( +TNode CodeStubAssembler::CheckForCapacityGrow( TNode object, TNode elements, ElementsKind kind, TNode length, TNode key, Label* bailout) { DCHECK(IsFastElementsKind(kind)); - VARIABLE(checked_elements, MachineRepresentation::kTagged); + TVARIABLE(FixedArrayBase, checked_elements); Label grow_case(this), no_grow_case(this), done(this), grow_bailout(this, Label::kDeferred); @@ -10003,16 +9993,15 @@ Node* CodeStubAssembler::CheckForCapacityGrow( { TNode current_capacity = SmiUntag(LoadFixedArrayBaseLength(elements)); - checked_elements.Bind(elements); + checked_elements = elements; Label fits_capacity(this); // If key is negative, we will notice in Runtime::kGrowArrayElements. GotoIf(UintPtrLessThan(key, current_capacity), &fits_capacity); { - Node* new_elements = - TryGrowElementsCapacity(object, elements, kind, key, current_capacity, - INTPTR_PARAMETERS, &grow_bailout); - checked_elements.Bind(new_elements); + TNode new_elements = TryGrowElementsCapacity( + object, elements, kind, key, current_capacity, &grow_bailout); + checked_elements = new_elements; Goto(&fits_capacity); } @@ -10023,8 +10012,9 @@ Node* CodeStubAssembler::CheckForCapacityGrow( TNode maybe_elements = CallRuntime( Runtime::kGrowArrayElements, NoContextConstant(), object, tagged_key); GotoIf(TaggedIsSmi(maybe_elements), bailout); - CSA_ASSERT(this, IsFixedArrayWithKind(CAST(maybe_elements), kind)); - checked_elements.Bind(maybe_elements); + TNode new_elements = CAST(maybe_elements); + CSA_ASSERT(this, IsFixedArrayWithKind(new_elements, kind)); + checked_elements = new_elements; Goto(&fits_capacity); } @@ -10040,7 +10030,7 @@ Node* CodeStubAssembler::CheckForCapacityGrow( BIND(&no_grow_case); { GotoIfNot(UintPtrLessThan(key, length), bailout); - checked_elements.Bind(elements); + checked_elements = elements; Goto(&done); } @@ -10050,16 +10040,15 @@ Node* CodeStubAssembler::CheckForCapacityGrow( TNode CodeStubAssembler::CopyElementsOnWrite( TNode object, TNode elements, ElementsKind kind, - Node* length, ParameterMode mode, Label* bailout) { + TNode length, Label* bailout) { TVARIABLE(FixedArrayBase, new_elements_var, elements); Label done(this); GotoIfNot(IsFixedCOWArrayMap(LoadMap(elements)), &done); { - Node* capacity = - TaggedToParameter(LoadFixedArrayBaseLength(elements), mode); + TNode capacity = SmiUntag(LoadFixedArrayBaseLength(elements)); TNode new_elements = GrowElementsCapacity( - object, elements, kind, kind, length, capacity, mode, bailout); + object, elements, kind, kind, length, capacity, bailout); new_elements_var = new_elements; Goto(&done); } @@ -10085,8 +10074,7 @@ void CodeStubAssembler::TransitionElementsKind(TNode object, Label done(this); GotoIf(TaggedEqual(elements, EmptyFixedArrayConstant()), &done); - // TODO(ishell): Use OptimalParameterMode(). - ParameterMode mode = INTPTR_PARAMETERS; + // TODO(ishell): Use BInt for elements_length and array_length. TNode elements_length = SmiUntag(LoadFixedArrayBaseLength(elements)); TNode array_length = Select( @@ -10100,7 +10088,7 @@ void CodeStubAssembler::TransitionElementsKind(TNode object, CSA_ASSERT(this, WordNotEqual(elements_length, IntPtrConstant(0))); GrowElementsCapacity(object, elements, from_kind, to_kind, array_length, - elements_length, mode, bailout); + elements_length, bailout); Goto(&done); BIND(&done); } @@ -10316,10 +10304,10 @@ template TNode CodeStubAssembler::BuildFastLoop( TNode end_index, const FastLoopBody& body, int increment, IndexAdvanceMode advance_mode); -void CodeStubAssembler::BuildFastFixedArrayForEach( +void CodeStubAssembler::BuildFastArrayForEach( const CodeStubAssembler::VariableList& vars, Node* fixed_array, ElementsKind kind, Node* first_element_inclusive, - Node* last_element_exclusive, const FastFixedArrayForEachBody& body, + Node* last_element_exclusive, const FastArrayForEachBody& body, ParameterMode mode, ForEachDirection direction) { STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize); CSA_SLOW_ASSERT(this, MatchesParameterMode(first_element_inclusive, mode)); @@ -10339,14 +10327,14 @@ void CodeStubAssembler::BuildFastFixedArrayForEach( TNode index = IntPtrConstant(i); TNode offset = ElementOffsetFromIndex( index, kind, FixedArray::kHeaderSize - kHeapObjectTag); - body(fixed_array, offset); + body(CAST(fixed_array), offset); } } else { for (int i = last_val - 1; i >= first_val; --i) { TNode index = IntPtrConstant(i); TNode offset = ElementOffsetFromIndex( index, kind, FixedArray::kHeaderSize - kHeapObjectTag); - body(fixed_array, offset); + body(CAST(fixed_array), offset); } } return; @@ -10364,15 +10352,16 @@ void CodeStubAssembler::BuildFastFixedArrayForEach( int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kTaggedSize; BuildFastLoop( vars, start, limit, - [&](TNode offset) { body(fixed_array, offset); }, + [&](TNode offset) { body(CAST(fixed_array), offset); }, direction == ForEachDirection::kReverse ? -increment : increment, direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre : IndexAdvanceMode::kPost); } +template void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace( - Node* element_count, Label* doesnt_fit, int base_size, ParameterMode mode) { - GotoIf(FixedArraySizeDoesntFitInNewSpace(element_count, base_size, mode), + TNode element_count, Label* doesnt_fit, int base_size) { + GotoIf(FixedArraySizeDoesntFitInNewSpace(element_count, base_size), doesnt_fit); } @@ -11013,7 +11002,7 @@ void CodeStubAssembler::GenerateEqual_Same(SloppyTNode value, BIND(&if_boolean); { - CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny); + CombineFeedback(var_type_feedback, CompareOperationFeedback::kBoolean); Goto(if_equal); } @@ -11095,60 +11084,75 @@ TNode CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_left_smi); { Label if_right_smi(this), if_right_not_smi(this); + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kSignedSmall); Branch(TaggedIsSmi(right), &if_right_smi, &if_right_not_smi); BIND(&if_right_smi); { // We have already checked for {left} and {right} being the same value, // so when we get here they must be different Smis. - CombineFeedback(var_type_feedback, - CompareOperationFeedback::kSignedSmall); Goto(&if_notequal); } BIND(&if_right_not_smi); - TNode right_map = LoadMap(CAST(right)); - Label if_right_heapnumber(this), if_right_boolean(this), - if_right_bigint(this, Label::kDeferred), - if_right_receiver(this, Label::kDeferred); - GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber); - // {left} is Smi and {right} is not HeapNumber or Smi. - if (var_type_feedback != nullptr) { - *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); - } - GotoIf(IsBooleanMap(right_map), &if_right_boolean); - TNode right_type = LoadMapInstanceType(right_map); - GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber); - GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint); - Branch(IsJSReceiverInstanceType(right_type), &if_right_receiver, - &if_notequal); - - BIND(&if_right_heapnumber); { - var_left_float = SmiToFloat64(CAST(left)); - var_right_float = LoadHeapNumberValue(CAST(right)); - CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber); - Goto(&do_float_comparison); - } + TNode right_map = LoadMap(CAST(right)); + Label if_right_heapnumber(this), if_right_boolean(this), + if_right_oddball(this), if_right_bigint(this, Label::kDeferred), + if_right_receiver(this, Label::kDeferred); + GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber); - BIND(&if_right_boolean); - { - var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset); - Goto(&loop); - } + // {left} is Smi and {right} is not HeapNumber or Smi. + TNode right_type = LoadMapInstanceType(right_map); + GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber); + GotoIf(IsOddballInstanceType(right_type), &if_right_oddball); + GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint); + GotoIf(IsJSReceiverInstanceType(right_type), &if_right_receiver); + CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny); + Goto(&if_notequal); - BIND(&if_right_bigint); - { - result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber, - NoContextConstant(), right, left)); - Goto(&end); - } + BIND(&if_right_heapnumber); + { + CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber); + var_left_float = SmiToFloat64(CAST(left)); + var_right_float = LoadHeapNumberValue(CAST(right)); + Goto(&do_float_comparison); + } - BIND(&if_right_receiver); - { - Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate()); - var_right = CallStub(callable, context, right); - Goto(&loop); + BIND(&if_right_oddball); + { + Label if_right_boolean(this); + GotoIf(IsBooleanMap(right_map), &if_right_boolean); + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kOddball); + Goto(&if_notequal); + + BIND(&if_right_boolean); + { + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kBoolean); + var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset); + Goto(&loop); + } + } + + BIND(&if_right_bigint); + { + CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt); + result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber, + NoContextConstant(), right, left)); + Goto(&end); + } + + BIND(&if_right_receiver); + { + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kReceiver); + Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate()); + var_right = CallStub(callable, context, right); + Goto(&loop); + } } } @@ -11187,29 +11191,41 @@ TNode CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_left_number); { Label if_right_not_number(this); + + CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber); GotoIf(Word32NotEqual(left_type, right_type), &if_right_not_number); var_left_float = LoadHeapNumberValue(CAST(left)); var_right_float = LoadHeapNumberValue(CAST(right)); - CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber); Goto(&do_float_comparison); BIND(&if_right_not_number); { - Label if_right_boolean(this); - if (var_type_feedback != nullptr) { - *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); - } + Label if_right_oddball(this); + GotoIf(IsStringInstanceType(right_type), &do_right_stringtonumber); - GotoIf(IsBooleanMap(right_map), &if_right_boolean); + GotoIf(IsOddballInstanceType(right_type), &if_right_oddball); GotoIf(IsBigIntInstanceType(right_type), &use_symmetry); - Branch(IsJSReceiverInstanceType(right_type), &use_symmetry, - &if_notequal); + GotoIf(IsJSReceiverInstanceType(right_type), &use_symmetry); + CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny); + Goto(&if_notequal); - BIND(&if_right_boolean); + BIND(&if_right_oddball); { - var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset); - Goto(&loop); + Label if_right_boolean(this); + GotoIf(IsBooleanMap(right_map), &if_right_boolean); + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kOddball); + Goto(&if_notequal); + + BIND(&if_right_boolean); + { + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kBoolean); + var_right = + LoadObjectField(CAST(right), Oddball::kToNumberOffset); + Goto(&loop); + } } } } @@ -11218,6 +11234,8 @@ TNode CodeStubAssembler::Equal(SloppyTNode left, { Label if_right_heapnumber(this), if_right_bigint(this), if_right_string(this), if_right_boolean(this); + CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt); + GotoIf(IsHeapNumberMap(right_map), &if_right_heapnumber); GotoIf(IsBigIntInstanceType(right_type), &if_right_bigint); GotoIf(IsStringInstanceType(right_type), &if_right_string); @@ -11227,9 +11245,7 @@ TNode CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_right_heapnumber); { - if (var_type_feedback != nullptr) { - *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); - } + CombineFeedback(var_type_feedback, CompareOperationFeedback::kNumber); result = CAST(CallRuntime(Runtime::kBigIntEqualToNumber, NoContextConstant(), left, right)); Goto(&end); @@ -11237,7 +11253,7 @@ TNode CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_right_bigint); { - CombineFeedback(var_type_feedback, CompareOperationFeedback::kBigInt); + // We already have BigInt feedback. result = CAST(CallRuntime(Runtime::kBigIntEqualToBigInt, NoContextConstant(), left, right)); Goto(&end); @@ -11245,9 +11261,7 @@ TNode CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_right_string); { - if (var_type_feedback != nullptr) { - *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); - } + CombineFeedback(var_type_feedback, CompareOperationFeedback::kString); result = CAST(CallRuntime(Runtime::kBigIntEqualToString, NoContextConstant(), left, right)); Goto(&end); @@ -11255,9 +11269,8 @@ TNode CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_right_boolean); { - if (var_type_feedback != nullptr) { - *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); - } + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kBoolean); var_right = LoadObjectField(CAST(right), Oddball::kToNumberOffset); Goto(&loop); } @@ -11266,35 +11279,60 @@ TNode CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_left_oddball); { Label if_left_boolean(this), if_left_not_boolean(this); - Branch(IsBooleanMap(left_map), &if_left_boolean, &if_left_not_boolean); + GotoIf(IsBooleanMap(left_map), &if_left_boolean); + if (var_type_feedback != nullptr) { + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kNullOrUndefined); + GotoIf(IsUndetectableMap(left_map), &if_left_not_boolean); + } + Goto(&if_left_not_boolean); BIND(&if_left_not_boolean); { // {left} is either Null or Undefined. Check if {right} is // undetectable (which includes Null and Undefined). - Label if_right_undetectable(this), if_right_not_undetectable(this); - Branch(IsUndetectableMap(right_map), &if_right_undetectable, - &if_right_not_undetectable); + Label if_right_undetectable(this), if_right_number(this), + if_right_oddball(this), + if_right_not_number_or_oddball_or_undetectable(this); + GotoIf(IsUndetectableMap(right_map), &if_right_undetectable); + GotoIf(IsHeapNumberInstanceType(right_type), &if_right_number); + GotoIf(IsOddballInstanceType(right_type), &if_right_oddball); + Goto(&if_right_not_number_or_oddball_or_undetectable); BIND(&if_right_undetectable); { - if (var_type_feedback != nullptr) { - // If {right} is undetectable, it must be either also - // Null or Undefined, or a Receiver (aka document.all). - *var_type_feedback = SmiConstant( - CompareOperationFeedback::kReceiverOrNullOrUndefined); - } + // If {right} is undetectable, it must be either also + // Null or Undefined, or a Receiver (aka document.all). + CombineFeedback( + var_type_feedback, + CompareOperationFeedback::kReceiverOrNullOrUndefined); Goto(&if_equal); } - BIND(&if_right_not_undetectable); + BIND(&if_right_number); + { + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kNumber); + Goto(&if_notequal); + } + + BIND(&if_right_oddball); + { + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kOddball); + Goto(&if_notequal); + } + + BIND(&if_right_not_number_or_oddball_or_undetectable); { if (var_type_feedback != nullptr) { // Track whether {right} is Null, Undefined or Receiver. - *var_type_feedback = SmiConstant( + CombineFeedback( + var_type_feedback, CompareOperationFeedback::kReceiverOrNullOrUndefined); GotoIf(IsJSReceiverInstanceType(right_type), &if_notequal); - *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kAny); } Goto(&if_notequal); } @@ -11302,9 +11340,8 @@ TNode CodeStubAssembler::Equal(SloppyTNode left, BIND(&if_left_boolean); { - if (var_type_feedback != nullptr) { - *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); - } + CombineFeedback(var_type_feedback, + CompareOperationFeedback::kBoolean); // If {right} is a Boolean too, it must be a different Boolean. GotoIf(TaggedEqual(right_map, left_map), &if_notequal); @@ -11387,9 +11424,7 @@ TNode CodeStubAssembler::Equal(SloppyTNode left, { // {right} is a Primitive, and neither Null or Undefined; // convert {left} to Primitive too. - if (var_type_feedback != nullptr) { - *var_type_feedback = SmiConstant(CompareOperationFeedback::kAny); - } + CombineFeedback(var_type_feedback, CompareOperationFeedback::kAny); Callable callable = CodeFactory::NonPrimitiveToPrimitive(isolate()); var_left = CallStub(callable, context, left); Goto(&loop); @@ -11400,6 +11435,12 @@ TNode CodeStubAssembler::Equal(SloppyTNode left, BIND(&do_right_stringtonumber); { + if (var_type_feedback != nullptr) { + TNode right_map = LoadMap(CAST(right)); + TNode right_type = LoadMapInstanceType(right_map); + CombineFeedback(var_type_feedback, + CollectFeedbackForString(right_type)); + } var_right = CallBuiltin(Builtins::kStringToNumber, context, right); Goto(&loop); } @@ -11678,15 +11719,47 @@ TNode CodeStubAssembler::StrictEqual( BIND(&if_lhsisoddball); { - STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == ODDBALL_TYPE); - GotoIf(IsBooleanMap(rhs_map), &if_not_equivalent_types); - GotoIf(Int32LessThan(rhs_instance_type, - Int32Constant(ODDBALL_TYPE)), - &if_not_equivalent_types); - OverwriteFeedback( - var_type_feedback, - CompareOperationFeedback::kReceiverOrNullOrUndefined); - Goto(&if_notequal); + Label if_lhsisboolean(this), if_lhsisnotboolean(this); + Branch(IsBooleanMap(lhs_map), &if_lhsisboolean, + &if_lhsisnotboolean); + + BIND(&if_lhsisboolean); + { + OverwriteFeedback(var_type_feedback, + CompareOperationFeedback::kNumberOrOddball); + GotoIf(IsBooleanMap(rhs_map), &if_notequal); + Goto(&if_not_equivalent_types); + } + + BIND(&if_lhsisnotboolean); + { + Label if_rhsisheapnumber(this), if_rhsisnotheapnumber(this); + + STATIC_ASSERT(LAST_PRIMITIVE_HEAP_OBJECT_TYPE == + ODDBALL_TYPE); + GotoIf(Int32LessThan(rhs_instance_type, + Int32Constant(ODDBALL_TYPE)), + &if_not_equivalent_types); + + Branch(IsHeapNumberMap(rhs_map), &if_rhsisheapnumber, + &if_rhsisnotheapnumber); + + BIND(&if_rhsisheapnumber); + { + OverwriteFeedback( + var_type_feedback, + CompareOperationFeedback::kNumberOrOddball); + Goto(&if_not_equivalent_types); + } + + BIND(&if_rhsisnotheapnumber); + { + OverwriteFeedback( + var_type_feedback, + CompareOperationFeedback::kReceiverOrNullOrUndefined); + Goto(&if_notequal); + } + } } BIND(&if_lhsissymbol); @@ -11742,7 +11815,14 @@ TNode CodeStubAssembler::StrictEqual( } BIND(&if_rhsisnotnumber); - Goto(&if_not_equivalent_types); + { + TNode rhs_instance_type = LoadMapInstanceType(rhs_map); + GotoIfNot(IsOddballInstanceType(rhs_instance_type), + &if_not_equivalent_types); + OverwriteFeedback(var_type_feedback, + CompareOperationFeedback::kNumberOrOddball); + Goto(&if_notequal); + } } } } @@ -12380,28 +12460,6 @@ TNode CodeStubAssembler::BitwiseOp(TNode left32, UNREACHABLE(); } -// ES #sec-createarrayiterator -TNode CodeStubAssembler::CreateArrayIterator( - TNode context, TNode object, IterationKind kind) { - TNode native_context = LoadNativeContext(context); - TNode iterator_map = CAST(LoadContextElement( - native_context, Context::INITIAL_ARRAY_ITERATOR_MAP_INDEX)); - TNode iterator = Allocate(JSArrayIterator::kHeaderSize); - StoreMapNoWriteBarrier(iterator, iterator_map); - StoreObjectFieldRoot(iterator, JSArrayIterator::kPropertiesOrHashOffset, - RootIndex::kEmptyFixedArray); - StoreObjectFieldRoot(iterator, JSArrayIterator::kElementsOffset, - RootIndex::kEmptyFixedArray); - StoreObjectFieldNoWriteBarrier( - iterator, JSArrayIterator::kIteratedObjectOffset, object); - StoreObjectFieldNoWriteBarrier(iterator, JSArrayIterator::kNextIndexOffset, - SmiConstant(0)); - StoreObjectFieldNoWriteBarrier( - iterator, JSArrayIterator::kKindOffset, - SmiConstant(Smi::FromInt(static_cast(kind)))); - return CAST(iterator); -} - TNode CodeStubAssembler::AllocateJSIteratorResult( SloppyTNode context, SloppyTNode value, SloppyTNode done) { @@ -12507,11 +12565,31 @@ TNode CodeStubAssembler::LoadJSTypedArrayLength( return LoadObjectField(typed_array, JSTypedArray::kLengthOffset); } +TNode CodeStubAssembler::GetTypedArrayBuffer( + TNode context, TNode array) { + Label call_runtime(this), done(this); + TVARIABLE(Object, var_result); + + TNode buffer = LoadJSArrayBufferViewBuffer(array); + GotoIf(IsDetachedBuffer(buffer), &call_runtime); + TNode backing_store = LoadJSArrayBufferBackingStorePtr(buffer); + GotoIf(WordEqual(backing_store, IntPtrConstant(0)), &call_runtime); + var_result = buffer; + Goto(&done); + + BIND(&call_runtime); + { + var_result = CallRuntime(Runtime::kTypedArrayGetBuffer, context, array); + Goto(&done); + } + + BIND(&done); + return CAST(var_result.value()); +} + CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler, - TNode argc, TNode fp, - ReceiverMode receiver_mode) + TNode argc, TNode fp) : assembler_(assembler), - receiver_mode_(receiver_mode), argc_(argc), base_(), fp_(fp != nullptr ? fp : assembler_->LoadFramePointer()) { @@ -12531,7 +12609,6 @@ CodeStubArguments::CodeStubArguments(CodeStubAssembler* assembler, } TNode CodeStubArguments::GetReceiver() const { - DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver); #ifdef V8_REVERSE_JSARGS intptr_t offset = -kSystemPointerSize; #else @@ -12541,7 +12618,6 @@ TNode CodeStubArguments::GetReceiver() const { } void CodeStubArguments::SetReceiver(TNode object) const { - DCHECK_EQ(receiver_mode_, ReceiverMode::kHasReceiver); #ifdef V8_REVERSE_JSARGS intptr_t offset = -kSystemPointerSize; #else @@ -12574,26 +12650,6 @@ TNode CodeStubArguments::AtIndex(int index) const { return AtIndex(assembler_->IntPtrConstant(index)); } -TNode CodeStubArguments::GetOptionalArgumentValue( - int index, TNode default_value) { - CodeStubAssembler::TVariable result(assembler_); - CodeStubAssembler::Label argument_missing(assembler_), - argument_done(assembler_, &result); - - assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual( - assembler_->IntPtrConstant(index), argc_), - &argument_missing); - result = AtIndex(index); - assembler_->Goto(&argument_done); - - assembler_->BIND(&argument_missing); - result = default_value; - assembler_->Goto(&argument_done); - - assembler_->BIND(&argument_done); - return result.value(); -} - TNode CodeStubArguments::GetOptionalArgumentValue( TNode index, TNode default_value) { CodeStubAssembler::TVariable result(assembler_); @@ -12641,13 +12697,8 @@ void CodeStubArguments::ForEach( } void CodeStubArguments::PopAndReturn(TNode value) { - TNode pop_count; - if (receiver_mode_ == ReceiverMode::kHasReceiver) { - pop_count = assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1)); - } else { - pop_count = argc_; - } - + TNode pop_count = + assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1)); assembler_->PopAndReturn(pop_count, value); } @@ -13045,35 +13096,6 @@ void CodeStubAssembler::PerformStackCheck(TNode context) { BIND(&ok); } -TNode CodeStubAssembler::AllocateSyntheticFunctionContext( - TNode native_context, int slots) { - DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS); - TNode context_heap_object = - AllocateInNewSpace(FixedArray::SizeFor(slots)); - InitializeSyntheticFunctionContext(native_context, context_heap_object, - slots); - return CAST(context_heap_object); -} - -void CodeStubAssembler::InitializeSyntheticFunctionContext( - TNode native_context, TNode context_heap_object, - int slots) { - DCHECK_GE(slots, Context::MIN_CONTEXT_SLOTS); - TNode map = CAST( - LoadContextElement(native_context, Context::FUNCTION_CONTEXT_MAP_INDEX)); - StoreMapNoWriteBarrier(context_heap_object, map); - StoreObjectFieldNoWriteBarrier(context_heap_object, FixedArray::kLengthOffset, - SmiConstant(slots)); - - TNode context = CAST(context_heap_object); - const TNode empty_scope_info = - LoadContextElement(native_context, Context::SCOPE_INFO_INDEX); - StoreContextElementNoWriteBarrier(context, Context::SCOPE_INFO_INDEX, - empty_scope_info); - StoreContextElementNoWriteBarrier(context, Context::PREVIOUS_INDEX, - UndefinedConstant()); -} - TNode CodeStubAssembler::CallApiCallback( TNode context, TNode callback, TNode argc, TNode data, TNode holder, TNode receiver) { @@ -13085,17 +13107,9 @@ TNode CodeStubAssembler::CallApiCallback( TNode context, TNode callback, TNode argc, TNode data, TNode holder, TNode receiver, TNode value) { - // CallApiCallback receives the first four arguments in registers - // (callback, argc, data and holder). The last arguments are in the stack in - // JS ordering. See ApiCallbackDescriptor. Callable callable = CodeFactory::CallApiCallback(isolate()); -#ifdef V8_REVERSE_JSARGS - return CallStub(callable, context, callback, argc, data, holder, value, - receiver); -#else return CallStub(callable, context, callback, argc, data, holder, receiver, value); -#endif } TNode CodeStubAssembler::CallRuntimeNewArray( diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index b01729c73db8d4..de2c16d35f934f 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -15,6 +15,7 @@ #include "src/compiler/code-assembler.h" #include "src/objects/arguments.h" #include "src/objects/bigint.h" +#include "src/objects/js-function.h" #include "src/objects/objects.h" #include "src/objects/promise.h" #include "src/objects/shared-function-info.h" @@ -107,130 +108,104 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; V(TypedArraySpeciesProtector, typed_array_species_protector, \ TypedArraySpeciesProtector) -#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \ - V(AccessorInfoMap, accessor_info_map, AccessorInfoMap) \ - V(AccessorPairMap, accessor_pair_map, AccessorPairMap) \ - V(AllocationMementoMap, allocation_memento_map, AllocationMementoMap) \ - V(AllocationSiteWithoutWeakNextMap, allocation_site_without_weaknext_map, \ - AllocationSiteWithoutWeakNextMap) \ - V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap) \ - V(arguments_to_string, arguments_to_string, ArgumentsToString) \ - V(ArrayBoilerplateDescriptionMap, array_boilerplate_description_map, \ - ArrayBoilerplateDescriptionMap) \ - V(Array_string, Array_string, ArrayString) \ - V(array_to_string, array_to_string, ArrayToString) \ - V(BooleanMap, boolean_map, BooleanMap) \ - V(boolean_to_string, boolean_to_string, BooleanToString) \ - V(CellMap, cell_map, CellMap) \ - V(CodeMap, code_map, CodeMap) \ - V(ConsOneByteStringMap, cons_one_byte_string_map, ConsOneByteStringMap) \ - V(ConsStringMap, cons_string_map, ConsStringMap) \ - V(constructor_string, constructor_string, ConstructorString) \ - V(CoverageInfoMap, coverage_info_map, CoverageInfoMap) \ - V(date_to_string, date_to_string, DateToString) \ - V(default_string, default_string, DefaultString) \ - V(EmptyByteArray, empty_byte_array, EmptyByteArray) \ - V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \ - V(EmptyPropertyDictionary, empty_property_dictionary, \ - EmptyPropertyDictionary) \ - V(EmptySlowElementDictionary, empty_slow_element_dictionary, \ - EmptySlowElementDictionary) \ - V(empty_string, empty_string, EmptyString) \ - V(error_to_string, error_to_string, ErrorToString) \ - V(FalseValue, false_value, False) \ - V(FeedbackVectorMap, feedback_vector_map, FeedbackVectorMap) \ - V(FixedArrayMap, fixed_array_map, FixedArrayMap) \ - V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \ - V(FixedDoubleArrayMap, fixed_double_array_map, FixedDoubleArrayMap) \ - V(Function_string, function_string, FunctionString) \ - V(FunctionTemplateInfoMap, function_template_info_map, \ - FunctionTemplateInfoMap) \ - V(function_to_string, function_to_string, FunctionToString) \ - V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \ - V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \ - V(HeapNumberMap, heap_number_map, HeapNumberMap) \ - V(Infinity_string, Infinity_string, InfinityString) \ - V(is_concat_spreadable_symbol, is_concat_spreadable_symbol, \ - IsConcatSpreadableSymbol) \ - V(iterator_symbol, iterator_symbol, IteratorSymbol) \ - V(length_string, length_string, LengthString) \ - V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \ - V(match_symbol, match_symbol, MatchSymbol) \ - V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \ - V(message_string, message_string, MessageString) \ - V(MetaMap, meta_map, MetaMap) \ - V(minus_Infinity_string, minus_Infinity_string, MinusInfinityString) \ - V(MinusZeroValue, minus_zero_value, MinusZero) \ - V(name_string, name_string, NameString) \ - V(NanValue, nan_value, Nan) \ - V(NaN_string, NaN_string, NaNString) \ - V(next_string, next_string, NextString) \ - V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \ - V(null_to_string, null_to_string, NullToString) \ - V(NullValue, null_value, Null) \ - V(number_string, number_string, numberString) \ - V(number_to_string, number_to_string, NumberToString) \ - V(Object_string, Object_string, ObjectString) \ - V(object_to_string, object_to_string, ObjectToString) \ - V(OneByteStringMap, one_byte_string_map, OneByteStringMap) \ - V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \ - V(OnePointerFillerMap, one_pointer_filler_map, OnePointerFillerMap) \ - V(PreparseDataMap, preparse_data_map, PreparseDataMap) \ - V(PromiseCapabilityMap, promise_capability_map, PromiseCapabilityMap) \ - V(promise_forwarding_handler_symbol, promise_forwarding_handler_symbol, \ - PromiseForwardingHandlerSymbol) \ - V(PromiseFulfillReactionJobTaskMap, promise_fulfill_reaction_job_task_map, \ - PromiseFulfillReactionJobTaskMap) \ - V(promise_handled_by_symbol, promise_handled_by_symbol, \ - PromiseHandledBySymbol) \ - V(PromiseReactionMap, promise_reaction_map, PromiseReactionMap) \ - V(PromiseRejectReactionJobTaskMap, promise_reject_reaction_job_task_map, \ - PromiseRejectReactionJobTaskMap) \ - V(PromiseResolveThenableJobTaskMap, promise_resolve_thenable_job_task_map, \ - PromiseResolveThenableJobTaskMap) \ - V(prototype_string, prototype_string, PrototypeString) \ - V(PrototypeInfoMap, prototype_info_map, PrototypeInfoMap) \ - V(replace_symbol, replace_symbol, ReplaceSymbol) \ - V(regexp_to_string, regexp_to_string, RegexpToString) \ - V(resolve_string, resolve_string, ResolveString) \ - V(return_string, return_string, ReturnString) \ - V(SharedFunctionInfoMap, shared_function_info_map, SharedFunctionInfoMap) \ - V(SloppyArgumentsElementsMap, sloppy_arguments_elements_map, \ - SloppyArgumentsElementsMap) \ - V(SmallOrderedHashSetMap, small_ordered_hash_set_map, \ - SmallOrderedHashSetMap) \ - V(SmallOrderedHashMapMap, small_ordered_hash_map_map, \ - SmallOrderedHashMapMap) \ - V(SmallOrderedNameDictionaryMap, small_ordered_name_dictionary_map, \ - SmallOrderedNameDictionaryMap) \ - V(species_symbol, species_symbol, SpeciesSymbol) \ - V(StaleRegister, stale_register, StaleRegister) \ - V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \ - V(string_string, string_string, StringString) \ - V(string_to_string, string_to_string, StringToString) \ - V(StringMap, string_map, StringMap) \ - V(SymbolMap, symbol_map, SymbolMap) \ - V(TheHoleValue, the_hole_value, TheHole) \ - V(then_string, then_string, ThenString) \ - V(to_string_tag_symbol, to_string_tag_symbol, ToStringTagSymbol) \ - V(TransitionArrayMap, transition_array_map, TransitionArrayMap) \ - V(TrueValue, true_value, True) \ - V(Tuple2Map, tuple2_map, Tuple2Map) \ - V(BreakPointMap, break_point_map, BreakPointMap) \ - V(BreakPointInfoMap, break_point_info_map, BreakPointInfoMap) \ - V(CachedTemplateObjectMap, cached_template_object_map, \ - CachedTemplateObjectMap) \ - V(UncompiledDataWithoutPreparseDataMap, \ - uncompiled_data_without_preparse_data_map, \ - UncompiledDataWithoutPreparseDataMap) \ - V(UncompiledDataWithPreparseDataMap, uncompiled_data_with_preparse_data_map, \ - UncompiledDataWithPreparseDataMap) \ - V(undefined_to_string, undefined_to_string, UndefinedToString) \ - V(UndefinedValue, undefined_value, Undefined) \ - V(uninitialized_symbol, uninitialized_symbol, UninitializedSymbol) \ - V(WeakFixedArrayMap, weak_fixed_array_map, WeakFixedArrayMap) \ - V(zero_string, zero_string, ZeroString) \ - TORQUE_INTERNAL_MAP_CSA_LIST(V) +#define UNIQUE_INSTANCE_TYPE_IMMUTABLE_IMMOVABLE_MAP_ADAPTER( \ + V, rootIndexName, rootAccessorName, class_name) \ + V(rootIndexName, rootAccessorName, class_name##Map) + +#define HEAP_IMMUTABLE_IMMOVABLE_OBJECT_LIST(V) \ + V(AllocationSiteWithoutWeakNextMap, allocation_site_without_weaknext_map, \ + AllocationSiteWithoutWeakNextMap) \ + V(AllocationSiteWithWeakNextMap, allocation_site_map, AllocationSiteMap) \ + V(arguments_to_string, arguments_to_string, ArgumentsToString) \ + V(Array_string, Array_string, ArrayString) \ + V(array_to_string, array_to_string, ArrayToString) \ + V(BooleanMap, boolean_map, BooleanMap) \ + V(boolean_to_string, boolean_to_string, BooleanToString) \ + V(ConsOneByteStringMap, cons_one_byte_string_map, ConsOneByteStringMap) \ + V(ConsStringMap, cons_string_map, ConsStringMap) \ + V(constructor_string, constructor_string, ConstructorString) \ + V(date_to_string, date_to_string, DateToString) \ + V(default_string, default_string, DefaultString) \ + V(EmptyByteArray, empty_byte_array, EmptyByteArray) \ + V(EmptyFixedArray, empty_fixed_array, EmptyFixedArray) \ + V(EmptyScopeInfo, empty_scope_info, EmptyScopeInfo) \ + V(EmptyPropertyDictionary, empty_property_dictionary, \ + EmptyPropertyDictionary) \ + V(EmptySlowElementDictionary, empty_slow_element_dictionary, \ + EmptySlowElementDictionary) \ + V(empty_string, empty_string, EmptyString) \ + V(error_to_string, error_to_string, ErrorToString) \ + V(errors_string, errors_string, ErrorsString) \ + V(FalseValue, false_value, False) \ + V(FixedArrayMap, fixed_array_map, FixedArrayMap) \ + V(FixedCOWArrayMap, fixed_cow_array_map, FixedCOWArrayMap) \ + V(Function_string, function_string, FunctionString) \ + V(function_to_string, function_to_string, FunctionToString) \ + V(GlobalPropertyCellMap, global_property_cell_map, PropertyCellMap) \ + V(has_instance_symbol, has_instance_symbol, HasInstanceSymbol) \ + V(Infinity_string, Infinity_string, InfinityString) \ + V(is_concat_spreadable_symbol, is_concat_spreadable_symbol, \ + IsConcatSpreadableSymbol) \ + V(iterator_symbol, iterator_symbol, IteratorSymbol) \ + V(length_string, length_string, LengthString) \ + V(ManyClosuresCellMap, many_closures_cell_map, ManyClosuresCellMap) \ + V(match_symbol, match_symbol, MatchSymbol) \ + V(megamorphic_symbol, megamorphic_symbol, MegamorphicSymbol) \ + V(message_string, message_string, MessageString) \ + V(minus_Infinity_string, minus_Infinity_string, MinusInfinityString) \ + V(MinusZeroValue, minus_zero_value, MinusZero) \ + V(name_string, name_string, NameString) \ + V(NanValue, nan_value, Nan) \ + V(NaN_string, NaN_string, NaNString) \ + V(next_string, next_string, NextString) \ + V(NoClosuresCellMap, no_closures_cell_map, NoClosuresCellMap) \ + V(null_to_string, null_to_string, NullToString) \ + V(NullValue, null_value, Null) \ + V(number_string, number_string, NumberString) \ + V(number_to_string, number_to_string, NumberToString) \ + V(Object_string, Object_string, ObjectString) \ + V(object_to_string, object_to_string, ObjectToString) \ + V(OneByteStringMap, one_byte_string_map, OneByteStringMap) \ + V(OneClosureCellMap, one_closure_cell_map, OneClosureCellMap) \ + V(OnePointerFillerMap, one_pointer_filler_map, OnePointerFillerMap) \ + V(PromiseCapabilityMap, promise_capability_map, PromiseCapabilityMap) \ + V(promise_forwarding_handler_symbol, promise_forwarding_handler_symbol, \ + PromiseForwardingHandlerSymbol) \ + V(PromiseFulfillReactionJobTaskMap, promise_fulfill_reaction_job_task_map, \ + PromiseFulfillReactionJobTaskMap) \ + V(promise_handled_by_symbol, promise_handled_by_symbol, \ + PromiseHandledBySymbol) \ + V(PromiseReactionMap, promise_reaction_map, PromiseReactionMap) \ + V(PromiseRejectReactionJobTaskMap, promise_reject_reaction_job_task_map, \ + PromiseRejectReactionJobTaskMap) \ + V(PromiseResolveThenableJobTaskMap, promise_resolve_thenable_job_task_map, \ + PromiseResolveThenableJobTaskMap) \ + V(prototype_string, prototype_string, PrototypeString) \ + V(replace_symbol, replace_symbol, ReplaceSymbol) \ + V(regexp_to_string, regexp_to_string, RegexpToString) \ + V(resolve_string, resolve_string, ResolveString) \ + V(return_string, return_string, ReturnString) \ + V(species_symbol, species_symbol, SpeciesSymbol) \ + V(StaleRegister, stale_register, StaleRegister) \ + V(StoreHandler0Map, store_handler0_map, StoreHandler0Map) \ + V(string_string, string_string, StringString) \ + V(string_to_string, string_to_string, StringToString) \ + V(StringMap, string_map, StringMap) \ + V(TheHoleValue, the_hole_value, TheHole) \ + V(then_string, then_string, ThenString) \ + V(toString_string, toString_string, ToStringString) \ + V(to_primitive_symbol, to_primitive_symbol, ToPrimitiveSymbol) \ + V(to_string_tag_symbol, to_string_tag_symbol, ToStringTagSymbol) \ + V(TrueValue, true_value, True) \ + V(undefined_to_string, undefined_to_string, UndefinedToString) \ + V(UndefinedValue, undefined_value, Undefined) \ + V(uninitialized_symbol, uninitialized_symbol, UninitializedSymbol) \ + V(valueOf_string, valueOf_string, ValueOfString) \ + V(wasm_wrapped_object_symbol, wasm_wrapped_object_symbol, \ + WasmWrappedObjectSymbol) \ + V(zero_string, zero_string, ZeroString) \ + UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR( \ + UNIQUE_INSTANCE_TYPE_IMMUTABLE_IMMOVABLE_MAP_ADAPTER, V) #define HEAP_IMMOVABLE_OBJECT_LIST(V) \ HEAP_MUTABLE_IMMOVABLE_OBJECT_LIST(V) \ @@ -289,10 +264,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; #define CSA_DEBUG_INFO(name) \ { #name, __FILE__, __LINE__ } #define BIND(label) Bind(label, CSA_DEBUG_INFO(label)) -#define VARIABLE(name, ...) \ - Variable name(this, CSA_DEBUG_INFO(name), __VA_ARGS__) -#define VARIABLE_CONSTRUCTOR(name, ...) \ - name(this, CSA_DEBUG_INFO(name), __VA_ARGS__) #define TYPED_VARIABLE_DEF(type, name, ...) \ TVariable name(CSA_DEBUG_INFO(name), __VA_ARGS__) #define TYPED_VARIABLE_CONSTRUCTOR(name, ...) \ @@ -302,8 +273,6 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol }; #define CSA_ASSERT_BRANCH(csa, ...) ((void)0) #define CSA_ASSERT_JS_ARGC_EQ(csa, expected) ((void)0) #define BIND(label) Bind(label) -#define VARIABLE(name, ...) Variable name(this, __VA_ARGS__) -#define VARIABLE_CONSTRUCTOR(name, ...) name(this, __VA_ARGS__) #define TYPED_VARIABLE_DEF(type, name, ...) TVariable name(__VA_ARGS__) #define TYPED_VARIABLE_CONSTRUCTOR(name, ...) name(__VA_ARGS__) #endif // DEBUG @@ -367,15 +336,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler #endif } - MachineRepresentation ParameterRepresentation(ParameterMode mode) const { - return mode == INTPTR_PARAMETERS ? MachineType::PointerRepresentation() - : MachineRepresentation::kTaggedSigned; - } - - MachineRepresentation OptimalParameterRepresentation() const { - return ParameterRepresentation(OptimalParameterMode()); - } - TNode ParameterToIntPtr(TNode value) { return SmiUntag(value); } TNode ParameterToIntPtr(TNode value) { return value; } // TODO(v8:9708): remove once all uses are ported. @@ -384,27 +344,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return UncheckedCast(value); } - template - TNode IntPtrToParameter(TNode value); - - Node* IntPtrToParameter(SloppyTNode value, ParameterMode mode) { - if (mode == SMI_PARAMETERS) return SmiTag(value); - return value; - } - - Node* Int32ToParameter(SloppyTNode value, ParameterMode mode) { - return IntPtrToParameter(ChangeInt32ToIntPtr(value), mode); - } + TNode ParameterToTagged(TNode value) { return value; } - TNode ParameterToTagged(Node* value, ParameterMode mode) { - if (mode != SMI_PARAMETERS) return SmiTag(value); - return UncheckedCast(value); - } + TNode ParameterToTagged(TNode value) { return SmiTag(value); } - Node* TaggedToParameter(SloppyTNode value, ParameterMode mode) { - if (mode != SMI_PARAMETERS) return SmiUntag(value); - return value; - } + template + TNode TaggedToParameter(TNode value); bool ToParameterConstant(Node* node, intptr_t* out, ParameterMode mode) { if (mode == ParameterMode::SMI_PARAMETERS) { @@ -449,11 +394,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode TaggedIndexToSmi(TNode value); TNode SmiToTaggedIndex(TNode value); - // Pointer compression specific. Returns true if the upper 32 bits of a Smi - // contain the sign of a lower 32 bits (i.e. not corrupted) so that the Smi - // can be directly used as an index in element offset computation. - TNode IsValidSmiIndex(TNode smi); - // Pointer compression specific. Ensures that the upper 32 bits of a Smi // contain the sign of a lower 32 bits so that the Smi can be directly used // as an index in element offset computation. @@ -471,37 +411,13 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode TaggedToDirectString(TNode value, Label* fail); - TNode TaggedToNumber(TNode value, Label* fail) { - GotoIfNot(IsNumber(value), fail); - return UncheckedCast(value); - } - TNode TaggedToHeapObject(TNode value, Label* fail) { GotoIf(TaggedIsSmi(value), fail); return UncheckedCast(value); } - TNode HeapObjectToJSAggregateError( - TNode heap_object, Label* fail); - - TNode HeapObjectToJSArray(TNode heap_object, - Label* fail) { - GotoIfNot(IsJSArray(heap_object), fail); - return UncheckedCast(heap_object); - } - - TNode HeapObjectToJSArrayBuffer(TNode heap_object, - Label* fail) { - GotoIfNot(IsJSArrayBuffer(heap_object), fail); - return UncheckedCast(heap_object); - } - - TNode TaggedToFastJSArray(TNode context, - TNode value, Label* fail) { - GotoIf(TaggedIsSmi(value), fail); - TNode heap_object = CAST(value); - GotoIfNot(IsFastJSArray(heap_object, context), fail); - return UncheckedCast(heap_object); + TNode Uint16Constant(uint16_t t) { + return UncheckedCast(Int32Constant(t)); } TNode HeapObjectToJSDataView(TNode heap_object, @@ -656,13 +572,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler template TNode IntPtrOrSmiConstant(int value); - // TODO(v8:9708): remove once all uses are ported. - Node* IntPtrOrSmiConstant(int value, ParameterMode mode); - - bool IsIntPtrOrSmiConstantZero(TNode test); - bool IsIntPtrOrSmiConstantZero(TNode test); - // TODO(v8:9708): remove once all uses are ported. - bool IsIntPtrOrSmiConstantZero(Node* test, ParameterMode mode); bool TryGetIntPtrOrSmiConstantValue(Node* maybe_constant, int* value, ParameterMode mode); @@ -713,10 +622,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler BitcastTaggedToWordForTagAndSmiBits(b))); \ } else { \ DCHECK(SmiValuesAre31Bits()); \ - if (kSystemPointerSize == kInt64Size) { \ - CSA_ASSERT(this, IsValidSmi(a)); \ - CSA_ASSERT(this, IsValidSmi(b)); \ - } \ return BitcastWordToTaggedSigned(ChangeInt32ToIntPtr(Int32OpName( \ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))))); \ @@ -777,22 +682,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } } - Node* WordOrSmiShl(Node* a, int shift, ParameterMode mode) { - if (mode == SMI_PARAMETERS) { - return SmiShl(CAST(a), shift); - } else { - DCHECK_EQ(INTPTR_PARAMETERS, mode); - return WordShl(a, shift); - } - } + TNode WordOrSmiShr(TNode a, int shift) { return SmiShr(a, shift); } - Node* WordOrSmiShr(Node* a, int shift, ParameterMode mode) { - if (mode == SMI_PARAMETERS) { - return SmiShr(CAST(a), shift); - } else { - DCHECK_EQ(INTPTR_PARAMETERS, mode); - return WordShr(a, shift); - } + TNode WordOrSmiShr(TNode a, int shift) { + return WordShr(a, shift); } #define SMI_COMPARISON_OP(SmiOpName, IntPtrOpName, Int32OpName) \ @@ -803,10 +696,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } else { \ DCHECK_EQ(kTaggedSize, kInt32Size); \ DCHECK(SmiValuesAre31Bits()); \ - if (kSystemPointerSize == kInt64Size) { \ - CSA_ASSERT(this, IsValidSmi(a)); \ - CSA_ASSERT(this, IsValidSmi(b)); \ - } \ return Int32OpName( \ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(a)), \ TruncateIntPtrToInt32(BitcastTaggedToWordForTagAndSmiBits(b))); \ @@ -885,9 +774,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode AllocateInNewSpace(int size, AllocationFlags flags = kNone); TNode Allocate(TNode size, AllocationFlags flags = kNone); - TNode AllocateAllowLOS(TNode size) { - return Allocate(size, AllocationFlag::kAllowLargeObjectAllocation); - } TNode Allocate(int size, AllocationFlags flags = kNone); TNode InnerAllocate(TNode previous, int offset); @@ -919,7 +805,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler void Check(SloppyTNode condition_node, const char* message, const char* file, int line, std::initializer_list extra_nodes = {}); - void FailAssert(const char* message, const char* file, int line, + void FailAssert(const char* message, + const std::vector& files_and_lines, std::initializer_list extra_nodes = {}); void FastCheck(TNode condition); @@ -1176,12 +1063,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return UncheckedCast( LoadObjectField(object, offset, MachineTypeOf::value)); } - TNode LoadObjectField(SloppyTNode object, int offset) { + TNode LoadObjectField(TNode object, int offset) { return UncheckedCast( LoadObjectField(object, offset, MachineType::AnyTagged())); } - TNode LoadObjectField(SloppyTNode object, - SloppyTNode offset) { + TNode LoadObjectField(TNode object, + TNode offset) { return UncheckedCast( LoadObjectField(object, offset, MachineType::AnyTagged())); } @@ -1348,7 +1235,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // This is only used on a newly allocated PropertyArray which // doesn't have an existing hash. void InitializePropertyArrayLength(TNode property_array, - Node* length, ParameterMode mode); + TNode length); // Check if the map is set for slow properties. TNode IsDictionaryMap(SloppyTNode map); @@ -1428,52 +1315,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ParameterMode parameter_mode = INTPTR_PARAMETERS, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); + template TNode LoadFixedArrayElement( - TNode object, Node* index, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS, + TNode object, TNode index, int additional_offset = 0, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe, CheckBounds check_bounds = CheckBounds::kAlways); // This doesn't emit a bounds-check. As part of the security-performance // tradeoff, only use it if it is performance critical. TNode UnsafeLoadFixedArrayElement( - TNode object, Node* index, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { - return LoadFixedArrayElement(object, index, additional_offset, - parameter_mode, needs_poisoning, - CheckBounds::kDebugOnly); - } - - TNode LoadFixedArrayElement( - TNode object, TNode index, - LoadSensitivity needs_poisoning, - CheckBounds check_bounds = CheckBounds::kAlways) { - return LoadFixedArrayElement(object, index, 0, INTPTR_PARAMETERS, - needs_poisoning, check_bounds); - } - // This doesn't emit a bounds-check. As part of the security-performance - // tradeoff, only use it if it is performance critical. - TNode UnsafeLoadFixedArrayElement(TNode object, - TNode index, - LoadSensitivity needs_poisoning) { - return LoadFixedArrayElement(object, index, needs_poisoning, - CheckBounds::kDebugOnly); - } - - TNode LoadFixedArrayElement( TNode object, TNode index, int additional_offset = 0, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { return LoadFixedArrayElement(object, index, additional_offset, - INTPTR_PARAMETERS, needs_poisoning); + needs_poisoning, CheckBounds::kDebugOnly); } TNode LoadFixedArrayElement( TNode object, int index, int additional_offset = 0, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { return LoadFixedArrayElement(object, IntPtrConstant(index), - additional_offset, INTPTR_PARAMETERS, - needs_poisoning); + additional_offset, needs_poisoning); } // This doesn't emit a bounds-check. As part of the security-performance // tradeoff, only use it if it is performance critical. @@ -1481,12 +1342,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode object, int index, int additional_offset = 0, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { return LoadFixedArrayElement(object, IntPtrConstant(index), - additional_offset, INTPTR_PARAMETERS, - needs_poisoning, CheckBounds::kDebugOnly); - } - TNode LoadFixedArrayElement(TNode object, - TNode index) { - return LoadFixedArrayElement(object, index, 0, SMI_PARAMETERS); + additional_offset, needs_poisoning, + CheckBounds::kDebugOnly); } TNode LoadPropertyArrayElement(TNode object, @@ -1497,56 +1354,26 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Array is any array-like type that has a fixed header followed by // tagged elements. template - TNode LoadAndUntagToWord32ArrayElement( - TNode array, int array_header_size, Node* index, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); + TNode LoadAndUntagToWord32ArrayElement(TNode array, + int array_header_size, + TNode index, + int additional_offset = 0); // Load an array element from a FixedArray, untag it and return it as Word32. TNode LoadAndUntagToWord32FixedArrayElement( - TNode object, Node* index, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS); - - TNode LoadAndUntagToWord32FixedArrayElement( - TNode object, int index, int additional_offset = 0) { - return LoadAndUntagToWord32FixedArrayElement( - object, IntPtrConstant(index), additional_offset, INTPTR_PARAMETERS); - } + TNode object, TNode index, + int additional_offset = 0); // Load an array element from a WeakFixedArray. - TNode LoadWeakFixedArrayElement( - TNode object, Node* index, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); - - TNode LoadWeakFixedArrayElement( - TNode object, int index, int additional_offset = 0, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { - return LoadWeakFixedArrayElement(object, IntPtrConstant(index), - additional_offset, INTPTR_PARAMETERS, - needs_poisoning); - } + TNode LoadWeakFixedArrayElement(TNode object, + TNode index, + int additional_offset = 0); // Load an array element from a FixedDoubleArray. TNode LoadFixedDoubleArrayElement( - SloppyTNode object, Node* index, - MachineType machine_type, int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - Label* if_hole = nullptr); - - TNode LoadFixedDoubleArrayElement(TNode object, - TNode index, - Label* if_hole = nullptr) { - return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0, - SMI_PARAMETERS, if_hole); - } - - TNode LoadFixedDoubleArrayElement(TNode object, - TNode index, - Label* if_hole = nullptr) { - return LoadFixedDoubleArrayElement(object, index, MachineType::Float64(), 0, - INTPTR_PARAMETERS, if_hole); - } + TNode object, TNode index, + Label* if_hole = nullptr, + MachineType machine_type = MachineType::Float64()); // Load an array element from a FixedArray, FixedDoubleArray or a // NumberDictionary (depending on the |elements_kind|) and return @@ -1565,24 +1392,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler int additional_offset = 0); TNode LoadFeedbackVectorLength(TNode); - TNode LoadDoubleWithHoleCheck(TNode array, - TNode index, - Label* if_hole = nullptr); TNode LoadDoubleWithHoleCheck(TNode array, TNode index, Label* if_hole = nullptr); - TNode LoadDoubleWithHoleCheck(TNode array, - TNode index, - Label* if_hole = nullptr) { - return LoadDoubleWithHoleCheck(array, Signed(index), if_hole); - } TNode IsDoubleHole(TNode base, TNode offset); // Load Float64 value by |base| + |offset| address. If the value is a double // hole then jump to |if_hole|. If |machine_type| is None then only the hole // check is generated. TNode LoadDoubleWithHoleCheck( - SloppyTNode base, SloppyTNode offset, Label* if_hole, + TNode base, TNode offset, Label* if_hole, MachineType machine_type = MachineType::Float64()); TNode LoadFixedTypedArrayElementAsTagged(TNode data_pointer, TNode index, @@ -1607,17 +1426,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode LoadScopeInfoHasExtensionField(TNode scope_info); // Context manipulation: - TNode LoadContextElement(SloppyTNode context, - int slot_index); - TNode LoadContextElement(SloppyTNode context, - SloppyTNode slot_index); - TNode LoadContextElement(TNode context, - TNode slot_index); - void StoreContextElement(SloppyTNode context, int slot_index, - SloppyTNode value); - void StoreContextElement(SloppyTNode context, - SloppyTNode slot_index, - SloppyTNode value); void StoreContextElementNoWriteBarrier(SloppyTNode context, int slot_index, SloppyTNode value); @@ -1771,13 +1579,10 @@ class V8_EXPORT_PRIVATE CodeStubAssembler parameter_mode, CheckBounds::kDebugOnly); } - void StorePropertyArrayElement( - TNode array, Node* index, SloppyTNode value, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - int additional_offset = 0, - ParameterMode parameter_mode = INTPTR_PARAMETERS) { - StoreFixedArrayOrPropertyArrayElement(array, index, value, barrier_mode, - additional_offset, parameter_mode); + void StorePropertyArrayElement(TNode array, + TNode index, TNode value) { + StoreFixedArrayOrPropertyArrayElement( + array, index, value, UPDATE_WRITE_BARRIER, 0, INTPTR_PARAMETERS); } void StoreFixedArrayElement( @@ -1824,13 +1629,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler } void StoreDoubleHole(TNode object, TNode offset); - void StoreFixedDoubleArrayHole(TNode array, Node* index, - ParameterMode mode = INTPTR_PARAMETERS); - void StoreFixedDoubleArrayHoleSmi(TNode array, - TNode index) { - StoreFixedDoubleArrayHole(array, index, SMI_PARAMETERS); - } - + void StoreFixedDoubleArrayHole(TNode array, + TNode index); void StoreFeedbackVectorSlot( TNode feedback_vector, TNode slot, TNode value, @@ -1845,9 +1645,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode EnsureArrayPushable(TNode context, TNode map, Label* bailout); - void TryStoreArrayElement(ElementsKind kind, ParameterMode mode, - Label* bailout, TNode elements, - Node* index, TNode value); + void TryStoreArrayElement(ElementsKind kind, Label* bailout, + TNode elements, TNode index, + TNode value); // Consumes args into the array, and returns tagged new length. TNode BuildAppendJSArray(ElementsKind kind, TNode array, CodeStubArguments* args, @@ -1961,45 +1761,43 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // // Allocate and return a JSArray with initialized header fields and its // uninitialized elements. - // The ParameterMode argument is only used for the capacity parameter. std::pair, TNode> AllocateUninitializedJSArrayWithElements( ElementsKind kind, TNode array_map, TNode length, - TNode allocation_site, TNode capacity, - AllocationFlags allocation_flags = kNone, + base::Optional> allocation_site, + TNode capacity, AllocationFlags allocation_flags = kNone, int array_header_size = JSArray::kHeaderSize); // Allocate a JSArray and fill elements with the hole. - TNode AllocateJSArray(ElementsKind kind, TNode array_map, - TNode capacity, TNode length, - TNode allocation_site, - AllocationFlags allocation_flags = kNone); - TNode AllocateJSArray(ElementsKind kind, TNode array_map, - TNode capacity, TNode length, - TNode allocation_site, - AllocationFlags allocation_flags = kNone) { + TNode AllocateJSArray( + ElementsKind kind, TNode array_map, TNode capacity, + TNode length, base::Optional> allocation_site, + AllocationFlags allocation_flags = kNone); + TNode AllocateJSArray( + ElementsKind kind, TNode array_map, TNode capacity, + TNode length, base::Optional> allocation_site, + AllocationFlags allocation_flags = kNone) { return AllocateJSArray(kind, array_map, SmiUntag(capacity), length, allocation_site, allocation_flags); } TNode AllocateJSArray(ElementsKind kind, TNode array_map, TNode capacity, TNode length, AllocationFlags allocation_flags = kNone) { - return AllocateJSArray(kind, array_map, SmiUntag(capacity), length, {}, - allocation_flags); + return AllocateJSArray(kind, array_map, SmiUntag(capacity), length, + base::nullopt, allocation_flags); } TNode AllocateJSArray(ElementsKind kind, TNode array_map, TNode capacity, TNode length, AllocationFlags allocation_flags = kNone) { - return AllocateJSArray(kind, array_map, capacity, length, {}, + return AllocateJSArray(kind, array_map, capacity, length, base::nullopt, allocation_flags); } // Allocate a JSArray and initialize the header fields. - TNode AllocateJSArray(TNode array_map, - TNode elements, - TNode length, - TNode allocation_site = {}, - int array_header_size = JSArray::kHeaderSize); + TNode AllocateJSArray( + TNode array_map, TNode elements, TNode length, + base::Optional> allocation_site = base::nullopt, + int array_header_size = JSArray::kHeaderSize); enum class HoleConversionMode { kDontConvert, kConvertToUndefined }; // Clone a fast JSArray |array| into a new fast JSArray. @@ -2014,34 +1812,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // function generates significantly less code in this case. TNode CloneFastJSArray( TNode context, TNode array, - TNode allocation_site = {}, + base::Optional> allocation_site = base::nullopt, HoleConversionMode convert_holes = HoleConversionMode::kDontConvert); TNode ExtractFastJSArray(TNode context, - TNode array, Node* begin, - Node* count, - ParameterMode mode = INTPTR_PARAMETERS, - Node* capacity = nullptr, - TNode allocation_site = {}); - - TNode AllocateFixedArray( - ElementsKind kind, Node* capacity, ParameterMode mode = INTPTR_PARAMETERS, - AllocationFlags flags = kNone, - SloppyTNode fixed_array_map = nullptr); - - TNode AllocateFixedArray( - ElementsKind kind, TNode capacity, AllocationFlags flags, - SloppyTNode fixed_array_map = nullptr) { - return AllocateFixedArray(kind, capacity, INTPTR_PARAMETERS, flags, - fixed_array_map); - } + TNode array, TNode begin, + TNode count); + template TNode AllocateFixedArray( - ElementsKind kind, TNode capacity, AllocationFlags flags, - SloppyTNode fixed_array_map = nullptr) { - return AllocateFixedArray(kind, capacity, SMI_PARAMETERS, flags, - fixed_array_map); - } + ElementsKind kind, TNode capacity, AllocationFlags flags = kNone, + base::Optional> fixed_array_map = base::nullopt); TNode GetCreationContext(TNode receiver, Label* if_bailout); @@ -2089,14 +1870,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return result; } - TNode AllocatePropertyArray( - Node* capacity, ParameterMode mode = INTPTR_PARAMETERS, - AllocationFlags flags = kNone); - - // Perform CreateArrayIterator (ES #sec-createarrayiterator). - TNode CreateArrayIterator(TNode context, - TNode object, - IterationKind mode); + TNode AllocatePropertyArray(TNode capacity); // TODO(v8:9722): Return type should be JSIteratorResult TNode AllocateJSIteratorResult(SloppyTNode context, @@ -2124,8 +1898,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode length); void FillPropertyArrayWithUndefined(TNode array, - Node* from_index, Node* to_index, - ParameterMode mode = INTPTR_PARAMETERS); + TNode from_index, + TNode to_index); enum class DestroySource { kNo, kYes }; @@ -2142,35 +1916,35 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // being cloned, to ensure that mutable HeapNumbers are unique between the // source and cloned object. void CopyPropertyArrayValues(TNode from_array, - TNode to_array, Node* length, + TNode to_array, + TNode length, WriteBarrierMode barrier_mode, - ParameterMode mode, DestroySource destroy_source); // Copies all elements from |from_array| of |length| size to // |to_array| of the same size respecting the elements kind. + template void CopyFixedArrayElements( ElementsKind kind, TNode from_array, - TNode to_array, Node* length, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - ParameterMode mode = INTPTR_PARAMETERS) { + TNode to_array, TNode length, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) { CopyFixedArrayElements(kind, from_array, kind, to_array, - IntPtrOrSmiConstant(0, mode), length, length, - barrier_mode, mode); + IntPtrOrSmiConstant(0), length, length, + barrier_mode); } // Copies |element_count| elements from |from_array| starting from element // zero to |to_array| of |capacity| size respecting both array's elements // kinds. + template void CopyFixedArrayElements( ElementsKind from_kind, TNode from_array, ElementsKind to_kind, TNode to_array, - TNode element_count, TNode capacity, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - ParameterMode mode = INTPTR_PARAMETERS) { + TNode element_count, TNode capacity, + WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) { CopyFixedArrayElements(from_kind, from_array, to_kind, to_array, - IntPtrOrSmiConstant(0, mode), element_count, - capacity, barrier_mode, mode); + IntPtrOrSmiConstant(0), element_count, + capacity, barrier_mode); } // Copies |element_count| elements from |from_array| starting from element @@ -2181,25 +1955,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // (i.e. that there were holes). If |convert_holes_to_undefined| is // HoleConversionMode::kConvertToUndefined, then it must not be the case that // IsDoubleElementsKind(to_kind). + template void CopyFixedArrayElements( ElementsKind from_kind, TNode from_array, - ElementsKind to_kind, TNode to_array, Node* first_element, - Node* element_count, Node* capacity, + ElementsKind to_kind, TNode to_array, + TNode first_element, TNode element_count, + TNode capacity, WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER, - ParameterMode mode = INTPTR_PARAMETERS, HoleConversionMode convert_holes = HoleConversionMode::kDontConvert, TVariable* var_holes_converted = nullptr); - void CopyFixedArrayElements( - ElementsKind from_kind, TNode from_array, - ElementsKind to_kind, TNode to_array, - TNode first_element, TNode element_count, TNode capacity, - WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER) { - CopyFixedArrayElements(from_kind, from_array, to_kind, to_array, - first_element, element_count, capacity, barrier_mode, - SMI_PARAMETERS); - } - void JumpIfPointersFromHereAreInteresting(TNode object, Label* interesting); @@ -2234,17 +1999,21 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return UncheckedCast(base); } - TNode HeapObjectToSloppyArgumentsElements( - TNode base, Label* cast_fail) { - GotoIf(TaggedNotEqual(LoadMap(base), SloppyArgumentsElementsMapConstant()), - cast_fail); - return UncheckedCast(base); - } - TNode ConvertElementsKindToInt(TNode elements_kind) { return UncheckedCast(elements_kind); } + template + bool ClassHasMapConstant() { + return false; + } + + template + TNode GetClassMapConstant() { + UNREACHABLE(); + return TNode(); + } + enum class ExtractFixedArrayFlag { kFixedArrays = 1, kFixedDoubleArrays = 2, @@ -2284,33 +2053,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // * If |source_elements_kind| is given, the function will try to use the // runtime elements kind of source to make copy faster. More specifically, it // can skip write barriers. + template TNode ExtractFixedArray( - TNode source, Node* first, Node* count = nullptr, - Node* capacity = nullptr, + TNode source, base::Optional> first, + base::Optional> count = base::nullopt, + base::Optional> capacity = base::nullopt, ExtractFixedArrayFlags extract_flags = ExtractFixedArrayFlag::kAllFixedArrays, - ParameterMode parameter_mode = INTPTR_PARAMETERS, TVariable* var_holes_converted = nullptr, base::Optional> source_elements_kind = base::nullopt); - TNode ExtractFixedArray( - TNode source, TNode first, TNode count, - TNode capacity, - ExtractFixedArrayFlags extract_flags = - ExtractFixedArrayFlag::kAllFixedArrays) { - return ExtractFixedArray(source, first, count, capacity, extract_flags, - SMI_PARAMETERS); - } - - TNode ExtractFixedArray( - TNode source, TNode first, TNode count, - TNode capacity, - ExtractFixedArrayFlags extract_flags = - ExtractFixedArrayFlag::kAllFixedArrays) { - return CAST(ExtractFixedArray(source, first, count, capacity, extract_flags, - INTPTR_PARAMETERS)); - } - // Copy a portion of an existing FixedArray or FixedDoubleArray into a new // FixedArray, including special appropriate handling for COW arrays. // * |source| is either a FixedArray or FixedDoubleArray from which to copy @@ -2328,8 +2080,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // PACKED/HOLEY_ELEMENTS can be used, but not PACKED_DOUBLE_ELEMENTS. // * |allocation_flags| and |extract_flags| influence how the target // FixedArray is allocated. - // * |parameter_mode| determines the parameter mode of |first|, |count| and - // |capacity|. // * |convert_holes| is used to signify that the target array should use // undefined in places of holes. // * If |convert_holes| is true and |var_holes_converted| not nullptr, then @@ -2338,15 +2088,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // compatible with the result array. For example, if the input was of // HOLEY_SMI_ELEMENTS kind, and a conversion took place, the result will be // compatible only with HOLEY_ELEMENTS and PACKED_ELEMENTS. + template TNode ExtractToFixedArray( - SloppyTNode source, Node* first, Node* count, - Node* capacity, SloppyTNode source_map, - ElementsKind from_kind = PACKED_ELEMENTS, - AllocationFlags allocation_flags = AllocationFlag::kNone, - ExtractFixedArrayFlags extract_flags = - ExtractFixedArrayFlag::kAllFixedArrays, - ParameterMode parameter_mode = INTPTR_PARAMETERS, - HoleConversionMode convert_holes = HoleConversionMode::kDontConvert, + SloppyTNode source, TNode first, + TNode count, TNode capacity, TNode source_map, + ElementsKind from_kind, AllocationFlags allocation_flags, + ExtractFixedArrayFlags extract_flags, HoleConversionMode convert_holes, TVariable* var_holes_converted = nullptr, base::Optional> source_runtime_kind = base::nullopt); @@ -2366,15 +2113,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // is produced or not. // * |allocation_flags| and |extract_flags| influence how the target array is // allocated. - // * |parameter_mode| determines the parameter mode of |first|, |count| and - // |capacity|. + template TNode ExtractFixedDoubleArrayFillingHoles( - TNode source, Node* first, Node* count, Node* capacity, - TNode source_map, TVariable* var_holes_converted, - AllocationFlags allocation_flags, - ExtractFixedArrayFlags extract_flags = - ExtractFixedArrayFlag::kAllFixedArrays, - ParameterMode parameter_mode = INTPTR_PARAMETERS); + TNode source, TNode first, TNode count, + TNode capacity, TNode source_map, + TVariable* var_holes_converted, AllocationFlags allocation_flags, + ExtractFixedArrayFlags extract_flags); // Copy the entire contents of a FixedArray or FixedDoubleArray to a new // array, including special appropriate handling for empty arrays and COW @@ -2390,30 +2134,19 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode CloneFixedArray( TNode source, ExtractFixedArrayFlags flags = - ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW) { - ParameterMode mode = OptimalParameterMode(); - return ExtractFixedArray(source, IntPtrOrSmiConstant(0, mode), nullptr, - nullptr, flags, mode); - } + ExtractFixedArrayFlag::kAllFixedArraysDontCopyCOW); // Loads an element from |array| of |from_kind| elements by given |offset| // (NOTE: not index!), does a hole check if |if_hole| is provided and // converts the value so that it becomes ready for storing to array of // |to_kind| elements. - Node* LoadElementAndPrepareForStore(Node* array, Node* offset, + Node* LoadElementAndPrepareForStore(TNode array, + TNode offset, ElementsKind from_kind, ElementsKind to_kind, Label* if_hole); - Node* CalculateNewElementsCapacity(Node* old_capacity, - ParameterMode mode = INTPTR_PARAMETERS); - - TNode CalculateNewElementsCapacity(TNode old_capacity) { - return CAST(CalculateNewElementsCapacity(old_capacity, SMI_PARAMETERS)); - } - TNode CalculateNewElementsCapacity(TNode old_capacity) { - return UncheckedCast( - CalculateNewElementsCapacity(old_capacity, INTPTR_PARAMETERS)); - } + template + TNode CalculateNewElementsCapacity(TNode old_capacity); // Tries to grow the |elements| array of given |object| to store the |key| // or bails out if the growing gap is too big. Returns new elements. @@ -2425,26 +2158,28 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Tries to grow the |capacity|-length |elements| array of given |object| // to store the |key| or bails out if the growing gap is too big. Returns // new elements. + template TNode TryGrowElementsCapacity(TNode object, TNode elements, - ElementsKind kind, Node* key, - Node* capacity, - ParameterMode mode, + ElementsKind kind, + TNode key, + TNode capacity, Label* bailout); // Grows elements capacity of given object. Returns new elements. + template TNode GrowElementsCapacity( TNode object, TNode elements, - ElementsKind from_kind, ElementsKind to_kind, Node* capacity, - Node* new_capacity, ParameterMode mode, Label* bailout); + ElementsKind from_kind, ElementsKind to_kind, TNode capacity, + TNode new_capacity, Label* bailout); // Given a need to grow by |growth|, allocate an appropriate new capacity // if necessary, and return a new elements FixedArray object. Label |bailout| // is followed for allocation failure. - void PossiblyGrowElementsCapacity(ParameterMode mode, ElementsKind kind, - TNode array, Node* length, + void PossiblyGrowElementsCapacity(ElementsKind kind, TNode array, + TNode length, TVariable* var_elements, - Node* growth, Label* bailout); + TNode growth, Label* bailout); // Allocation site manipulation void InitializeAllocationMemento(TNode base, @@ -2566,9 +2301,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Check whether the map is for an object with special properties, such as a // JSProxy or an object with interceptors. TNode InstanceTypeEqual(SloppyTNode instance_type, int type); - TNode IsAccessorInfo(SloppyTNode object); - TNode IsAccessorPair(SloppyTNode object); - TNode IsAllocationSite(SloppyTNode object); TNode IsNoElementsProtectorCellInvalid(); TNode IsArrayIteratorProtectorCellInvalid(); TNode IsBigIntInstanceType(SloppyTNode instance_type); @@ -2577,38 +2309,28 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsCallableMap(SloppyTNode map); TNode IsCallable(SloppyTNode object); TNode TaggedIsCallable(TNode object); - TNode IsCell(SloppyTNode object); - TNode IsCode(SloppyTNode object); TNode IsConsStringInstanceType(SloppyTNode instance_type); TNode IsConstructorMap(SloppyTNode map); TNode IsConstructor(SloppyTNode object); - TNode IsCoverageInfo(TNode object); - TNode IsDebugInfo(TNode object); TNode IsDeprecatedMap(SloppyTNode map); TNode IsNameDictionary(SloppyTNode object); TNode IsGlobalDictionary(SloppyTNode object); TNode IsExtensibleMap(SloppyTNode map); TNode IsExtensibleNonPrototypeMap(TNode map); TNode IsExternalStringInstanceType(SloppyTNode instance_type); - TNode IsFeedbackCell(SloppyTNode object); - TNode IsFeedbackVector(SloppyTNode object); - TNode IsContext(SloppyTNode object); TNode IsFixedArray(SloppyTNode object); TNode IsFixedArraySubclass(SloppyTNode object); TNode IsFixedArrayWithKind(SloppyTNode object, ElementsKind kind); TNode IsFixedArrayWithKindOrEmpty(SloppyTNode object, ElementsKind kind); - TNode IsFixedDoubleArray(SloppyTNode object); TNode IsFunctionWithPrototypeSlotMap(SloppyTNode map); TNode IsHashTable(SloppyTNode object); TNode IsEphemeronHashTable(SloppyTNode object); - TNode IsHeapNumber(SloppyTNode object); TNode IsHeapNumberInstanceType(SloppyTNode instance_type); TNode IsOddball(SloppyTNode object); TNode IsOddballInstanceType(SloppyTNode instance_type); TNode IsIndirectStringInstanceType(SloppyTNode instance_type); - TNode IsJSAggregateError(TNode object); TNode IsJSArrayBuffer(SloppyTNode object); TNode IsJSDataView(TNode object); TNode IsJSArrayInstanceType(SloppyTNode instance_type); @@ -2617,7 +2339,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsJSArrayIterator(SloppyTNode object); TNode IsJSAsyncGeneratorObject(SloppyTNode object); TNode IsJSFunctionInstanceType(SloppyTNode instance_type); - TNode IsAllocationSiteInstanceType(SloppyTNode instance_type); TNode IsJSFunctionMap(SloppyTNode map); TNode IsJSFunction(SloppyTNode object); TNode IsJSBoundFunction(SloppyTNode object); @@ -2650,21 +2371,16 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsMap(SloppyTNode object); TNode IsName(SloppyTNode object); TNode IsNameInstanceType(SloppyTNode instance_type); - TNode IsNativeContext(SloppyTNode object); TNode IsNullOrJSReceiver(SloppyTNode object); TNode IsNullOrUndefined(SloppyTNode object); TNode IsNumberDictionary(SloppyTNode object); TNode IsOneByteStringInstanceType(TNode instance_type); + TNode IsSeqOneByteStringInstanceType(TNode instance_type); TNode IsPrimitiveInstanceType(SloppyTNode instance_type); - TNode IsPrivateSymbol(SloppyTNode object); TNode IsPrivateName(SloppyTNode symbol); - TNode IsPromiseCapability(SloppyTNode object); TNode IsPropertyArray(SloppyTNode object); TNode IsPropertyCell(SloppyTNode object); - TNode IsPromiseReaction(SloppyTNode object); TNode IsPromiseReactionJobTask(TNode object); - TNode IsPromiseRejectReactionJobTask(SloppyTNode object); - TNode IsPromiseFulfillReactionJobTask(SloppyTNode object); TNode IsPrototypeInitialArrayPrototype(SloppyTNode context, SloppyTNode map); TNode IsPrototypeTypedArrayPrototype(SloppyTNode context, @@ -2685,13 +2401,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsCustomElementsReceiverInstanceType( TNode instance_type); TNode IsSpecialReceiverMap(SloppyTNode map); - // Returns true if the map corresponds to non-special fast or dictionary - // object. - TNode IsSimpleObjectMap(TNode map); TNode IsStringInstanceType(SloppyTNode instance_type); TNode IsString(SloppyTNode object); + TNode IsSeqOneByteString(TNode object); + TNode IsSymbolInstanceType(SloppyTNode instance_type); - TNode IsSymbol(SloppyTNode object); TNode IsInternalizedStringInstanceType(TNode instance_type); TNode IsUniqueName(TNode object); TNode IsUniqueNameNoIndex(TNode object); @@ -2700,10 +2414,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode IsNotWeakFixedArraySubclass(SloppyTNode object); TNode IsZeroOrContext(SloppyTNode object); - inline TNode IsSharedFunctionInfo(TNode object) { - return IsSharedFunctionInfoMap(LoadMap(object)); - } - TNode IsPromiseResolveProtectorCellInvalid(); TNode IsPromiseThenProtectorCellInvalid(); TNode IsArraySpeciesProtectorCellInvalid(); @@ -2720,8 +2430,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler Int32Constant(0)); } - // True iff |object| is a Smi or a HeapNumber. - TNode IsNumber(SloppyTNode object); // True iff |object| is a Smi or a HeapNumber or a BigInt. TNode IsNumeric(SloppyTNode object); @@ -2749,9 +2457,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // [0, 2^32-1). TNode IsNumberArrayIndex(TNode number); - TNode FixedArraySizeDoesntFitInNewSpace( - Node* element_count, int base_size = FixedArray::kHeaderSize, - ParameterMode mode = INTPTR_PARAMETERS); + template + TNode FixedArraySizeDoesntFitInNewSpace(TNode element_count, + int base_size); + + TNode IsMetaMap(TNode o) { return IsMapMap(o); } // ElementsKind helpers: TNode ElementsKindEqual(TNode a, TNode b) { @@ -2844,6 +2554,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode ToLength_Inline(SloppyTNode context, SloppyTNode input); + TNode OrdinaryToPrimitive(TNode context, TNode input, + OrdinaryToPrimitiveHint hint); + // Returns a node that contains a decoded (unsigned!) value of a bit // field |BitField| in |word32|. Returns result as an uint32 node. template @@ -3433,6 +3146,11 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ElementsKind elements_kind, TNode context); + template + TNode PrepareValueForWriteToTypedArray(TNode input, + ElementsKind elements_kind, + TNode context); + // Store value to an elements array with given elements kind. // TODO(turbofan): For BIGINT64_ELEMENTS and BIGUINT64_ELEMENTS // we pass {value} as BigInt object instead of int64_t. We should @@ -3457,15 +3175,15 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode context, TVariable* maybe_converted_value = nullptr); - Node* CheckForCapacityGrow(TNode object, - TNode elements, ElementsKind kind, - TNode length, TNode key, - Label* bailout); + TNode CheckForCapacityGrow( + TNode object, TNode elements, ElementsKind kind, + TNode length, TNode key, Label* bailout); TNode CopyElementsOnWrite(TNode object, TNode elements, - ElementsKind kind, Node* length, - ParameterMode mode, Label* bailout); + ElementsKind kind, + TNode length, + Label* bailout); void TransitionElementsKind(TNode object, TNode map, ElementsKind from_kind, ElementsKind to_kind, @@ -3512,53 +3230,47 @@ class V8_EXPORT_PRIVATE CodeStubAssembler enum class ForEachDirection { kForward, kReverse }; - using FastFixedArrayForEachBody = - std::function; + using FastArrayForEachBody = + std::function array, TNode offset)>; - void BuildFastFixedArrayForEach( - const CodeStubAssembler::VariableList& vars, Node* fixed_array, + void BuildFastArrayForEach( + const CodeStubAssembler::VariableList& vars, Node* array, ElementsKind kind, Node* first_element_inclusive, - Node* last_element_exclusive, const FastFixedArrayForEachBody& body, + Node* last_element_exclusive, const FastArrayForEachBody& body, ParameterMode mode = INTPTR_PARAMETERS, ForEachDirection direction = ForEachDirection::kReverse); - void BuildFastFixedArrayForEach( - Node* fixed_array, ElementsKind kind, Node* first_element_inclusive, - Node* last_element_exclusive, const FastFixedArrayForEachBody& body, + void BuildFastArrayForEach( + Node* array, ElementsKind kind, Node* first_element_inclusive, + Node* last_element_exclusive, const FastArrayForEachBody& body, ParameterMode mode = INTPTR_PARAMETERS, ForEachDirection direction = ForEachDirection::kReverse) { CodeStubAssembler::VariableList list(0, zone()); - BuildFastFixedArrayForEach(list, fixed_array, kind, first_element_inclusive, - last_element_exclusive, body, mode, direction); + BuildFastArrayForEach(list, array, kind, first_element_inclusive, + last_element_exclusive, body, mode, direction); } - TNode GetArrayAllocationSize(TNode element_count, + template + TNode GetArrayAllocationSize(TNode element_count, ElementsKind kind, int header_size) { return ElementOffsetFromIndex(element_count, kind, header_size); } - // TODO(v8:9708): remove once all uses are ported. - TNode GetArrayAllocationSize(Node* element_count, ElementsKind kind, - ParameterMode mode, int header_size) { - return ElementOffsetFromIndex(element_count, kind, mode, header_size); - } - - TNode GetFixedArrayAllocationSize(Node* element_count, - ElementsKind kind, - ParameterMode mode) { - return GetArrayAllocationSize(element_count, kind, mode, - FixedArray::kHeaderSize); + template + TNode GetFixedArrayAllocationSize(TNode element_count, + ElementsKind kind) { + return GetArrayAllocationSize(element_count, kind, FixedArray::kHeaderSize); } - TNode GetPropertyArrayAllocationSize(Node* element_count, - ParameterMode mode) { - return GetArrayAllocationSize(element_count, PACKED_ELEMENTS, mode, + TNode GetPropertyArrayAllocationSize(TNode element_count) { + return GetArrayAllocationSize(element_count, PACKED_ELEMENTS, PropertyArray::kHeaderSize); } - void GotoIfFixedArraySizeDoesntFitInNewSpace(Node* element_count, - Label* doesnt_fit, int base_size, - ParameterMode mode); + template + void GotoIfFixedArraySizeDoesntFitInNewSpace(TNode element_count, + Label* doesnt_fit, + int base_size); void InitializeFieldsWithRoot(TNode object, TNode start_offset, @@ -3689,6 +3401,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // JSTypedArray helpers TNode LoadJSTypedArrayLength(TNode typed_array); TNode LoadJSTypedArrayDataPtr(TNode typed_array); + TNode GetTypedArrayBuffer(TNode context, + TNode array); template TNode ElementOffsetFromIndex(TNode index, ElementsKind kind, @@ -3764,6 +3478,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler bool ConstexprInt32NotEqual(int32_t a, int32_t b) { return a != b; } bool ConstexprInt32GreaterThanEqual(int32_t a, int32_t b) { return a >= b; } uint32_t ConstexprUint32Add(uint32_t a, uint32_t b) { return a + b; } + int32_t ConstexprUint32Sub(uint32_t a, uint32_t b) { return a - b; } int31_t ConstexprInt31Add(int31_t a, int31_t b) { int32_t val; CHECK(!base::bits::SignedAddOverflow32(a, b, &val)); @@ -3775,6 +3490,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler return val; } + int32_t ConstexprWord32Or(int32_t a, int32_t b) { return a | b; } + bool ConstexprUintPtrLessThan(uintptr_t a, uintptr_t b) { return a < b; } // CSA does not support 64-bit types on 32-bit platforms so as a workaround @@ -3903,12 +3620,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TNode TryToIntptr(SloppyTNode key, Label* if_not_intptr, TVariable* var_instance_type = nullptr); - TNode AllocateSyntheticFunctionContext( - TNode native_context, int slots); - void InitializeSyntheticFunctionContext(TNode native_context, - TNode context_heap_object, - int slots); - TNode ArrayCreate(TNode context, TNode length); // Allocate a clone of a mutable primitive, if {object} is a mutable @@ -3943,9 +3654,8 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // fields initialized. TNode AllocateUninitializedJSArray( TNode array_map, TNode length, - TNode allocation_site, TNode size_in_bytes); - - TNode IsValidSmi(TNode smi); + base::Optional> allocation_site, + TNode size_in_bytes); TNode SmiShiftBitsConstant() { return IntPtrConstant(kSmiShiftSize + kSmiTagSize); @@ -3991,10 +3701,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler TVariable* var_maybe_bigint = nullptr, TVariable* var_feedback = nullptr); - Node* LoadObjectField(SloppyTNode object, int offset, + Node* LoadObjectField(TNode object, int offset, MachineType type); + Node* LoadObjectField(TNode object, TNode offset, MachineType type); - Node* LoadObjectField(SloppyTNode object, - SloppyTNode offset, MachineType type); // Low-level accessors for Descriptor arrays. template @@ -4017,49 +3726,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ParameterMode parameter_mode = INTPTR_PARAMETERS); }; -// template class V8_EXPORT_PRIVATE CodeStubArguments { public: using Node = compiler::Node; - enum ReceiverMode { kHasReceiver, kNoReceiver }; // |argc| specifies the number of arguments passed to the builtin excluding - // the receiver. The arguments will include a receiver iff |receiver_mode| - // is kHasReceiver. + // the receiver. The arguments include the receiver. + CodeStubArguments(CodeStubAssembler* assembler, TNode argc) + : CodeStubArguments(assembler, argc, TNode()) {} + CodeStubArguments(CodeStubAssembler* assembler, TNode argc) + : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc)) {} CodeStubArguments(CodeStubAssembler* assembler, TNode argc, - ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) - : CodeStubArguments(assembler, argc, TNode(), receiver_mode) {} - - CodeStubArguments(CodeStubAssembler* assembler, TNode argc, - ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) - : CodeStubArguments(assembler, assembler->ChangeInt32ToIntPtr(argc), - TNode(), receiver_mode) {} - - // TODO(v8:9708): Consider removing this variant - CodeStubArguments(CodeStubAssembler* assembler, TNode argc, - ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) - : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc), - TNode(), receiver_mode) {} - - // |argc| specifies the number of arguments passed to the builtin excluding - // the receiver. The arguments will include a receiver iff |receiver_mode| - // is kHasReceiver. - CodeStubArguments(CodeStubAssembler* assembler, TNode argc, - TNode fp, - ReceiverMode receiver_mode = ReceiverMode::kHasReceiver); - - CodeStubArguments(CodeStubAssembler* assembler, TNode argc, - TNode fp, - ReceiverMode receiver_mode = ReceiverMode::kHasReceiver) - : CodeStubArguments(assembler, assembler->ParameterToIntPtr(argc), fp, - receiver_mode) {} + TNode fp); // Used by Torque to construct arguments based on a Torque-defined // struct of values. CodeStubArguments(CodeStubAssembler* assembler, TorqueStructArguments torque_arguments) : assembler_(assembler), - receiver_mode_(ReceiverMode::kHasReceiver), argc_(torque_arguments.length), base_(torque_arguments.base), fp_(torque_arguments.frame) {} @@ -4072,68 +3756,41 @@ class V8_EXPORT_PRIVATE CodeStubArguments { // Computes address of the index'th argument. TNode AtIndexPtr(TNode index) const; - TNode AtIndexPtr(TNode index) const { - return AtIndexPtr(assembler_->ParameterToIntPtr(index)); - } // |index| is zero-based and does not include the receiver TNode AtIndex(TNode index) const; - // TODO(v8:9708): Consider removing this variant - TNode AtIndex(TNode index) const { - return AtIndex(assembler_->ParameterToIntPtr(index)); - } - TNode AtIndex(int index) const; - TNode GetOptionalArgumentValue(int index) { - return GetOptionalArgumentValue(index, assembler_->UndefinedConstant()); - } - TNode GetOptionalArgumentValue(int index, - TNode default_value); - TNode GetLength() const { return argc_; } TorqueStructArguments GetTorqueArguments() const { return TorqueStructArguments{fp_, base_, argc_}; } + TNode GetOptionalArgumentValue(TNode index, + TNode default_value); TNode GetOptionalArgumentValue(TNode index) { return GetOptionalArgumentValue(index, assembler_->UndefinedConstant()); } - TNode GetOptionalArgumentValue(TNode index, - TNode default_value); - - using ForEachBodyFunction = std::function arg)>; + TNode GetOptionalArgumentValue(int index) { + return GetOptionalArgumentValue(assembler_->IntPtrConstant(index)); + } // Iteration doesn't include the receiver. |first| and |last| are zero-based. - template - void ForEach(const ForEachBodyFunction& body, TNode first = {}, - TNode last = {}) const { + using ForEachBodyFunction = std::function arg)>; + void ForEach(const ForEachBodyFunction& body, TNode first = {}, + TNode last = {}) const { CodeStubAssembler::VariableList list(0, assembler_->zone()); ForEach(list, body, first, last); } - - // Iteration doesn't include the receiver. |first| and |last| are zero-based. void ForEach(const CodeStubAssembler::VariableList& vars, const ForEachBodyFunction& body, TNode first = {}, TNode last = {}) const; - void ForEach(const CodeStubAssembler::VariableList& vars, - const ForEachBodyFunction& body, TNode first, - TNode last = {}) const { - TNode first_intptr = assembler_->ParameterToIntPtr(first); - TNode last_intptr; - if (last != nullptr) { - last_intptr = assembler_->ParameterToIntPtr(last); - } - return ForEach(vars, body, first_intptr, last_intptr); - } - void PopAndReturn(TNode value); private: CodeStubAssembler* assembler_; - ReceiverMode receiver_mode_; TNode argc_; TNode base_; TNode fp_; @@ -4226,6 +3883,19 @@ class PrototypeCheckAssembler : public CodeStubAssembler { DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags) +#define CLASS_MAP_CONSTANT_ADAPTER(V, rootIndexName, rootAccessorName, \ + class_name) \ + template <> \ + inline bool CodeStubAssembler::ClassHasMapConstant() { \ + return true; \ + } \ + template <> \ + inline TNode CodeStubAssembler::GetClassMapConstant() { \ + return class_name##MapConstant(); \ + } + +UNIQUE_INSTANCE_TYPE_MAP_LIST_GENERATOR(CLASS_MAP_CONSTANT_ADAPTER, _) + } // namespace internal } // namespace v8 #endif // V8_CODEGEN_CODE_STUB_ASSEMBLER_H_ diff --git a/deps/v8/src/codegen/compilation-cache.cc b/deps/v8/src/codegen/compilation-cache.cc index ef3d83a06eb88a..9c5cb42edd6612 100644 --- a/deps/v8/src/codegen/compilation-cache.cc +++ b/deps/v8/src/codegen/compilation-cache.cc @@ -12,6 +12,7 @@ #include "src/objects/objects-inl.h" #include "src/objects/slots.h" #include "src/objects/visitors.h" +#include "src/utils/ostreams.h" namespace v8 { namespace internal { @@ -28,16 +29,17 @@ CompilationCache::CompilationCache(Isolate* isolate) eval_global_(isolate), eval_contextual_(isolate), reg_exp_(isolate, kRegExpGenerations), + code_(isolate), enabled_script_and_eval_(true) { CompilationSubCache* subcaches[kSubCacheCount] = { - &script_, &eval_global_, &eval_contextual_, ®_exp_}; + &script_, &eval_global_, &eval_contextual_, ®_exp_, &code_}; for (int i = 0; i < kSubCacheCount; ++i) { subcaches_[i] = subcaches[i]; } } Handle CompilationSubCache::GetTable(int generation) { - DCHECK(generation < generations_); + DCHECK_LT(generation, generations()); Handle result; if (tables_[generation].IsUndefined(isolate())) { result = CompilationCacheTable::New(isolate(), kInitialCacheSize); @@ -50,33 +52,44 @@ Handle CompilationSubCache::GetTable(int generation) { return result; } -void CompilationSubCache::Age() { - // Don't directly age single-generation caches. - if (generations_ == 1) { - if (!tables_[0].IsUndefined(isolate())) { - CompilationCacheTable::cast(tables_[0]).Age(); - } - return; - } +// static +void CompilationSubCache::AgeByGeneration(CompilationSubCache* c) { + DCHECK_GT(c->generations(), 1); // Age the generations implicitly killing off the oldest. - for (int i = generations_ - 1; i > 0; i--) { - tables_[i] = tables_[i - 1]; + for (int i = c->generations() - 1; i > 0; i--) { + c->tables_[i] = c->tables_[i - 1]; } // Set the first generation as unborn. - tables_[0] = ReadOnlyRoots(isolate()).undefined_value(); + c->tables_[0] = ReadOnlyRoots(c->isolate()).undefined_value(); +} + +// static +void CompilationSubCache::AgeCustom(CompilationSubCache* c) { + DCHECK_EQ(c->generations(), 1); + if (c->tables_[0].IsUndefined(c->isolate())) return; + CompilationCacheTable::cast(c->tables_[0]).Age(); +} + +void CompilationCacheScript::Age() { AgeCustom(this); } +void CompilationCacheEval::Age() { AgeCustom(this); } +void CompilationCacheRegExp::Age() { AgeByGeneration(this); } +void CompilationCacheCode::Age() { + if (FLAG_trace_turbo_nci) CompilationCacheCode::TraceAgeing(); + AgeByGeneration(this); } void CompilationSubCache::Iterate(RootVisitor* v) { v->VisitRootPointers(Root::kCompilationCache, nullptr, FullObjectSlot(&tables_[0]), - FullObjectSlot(&tables_[generations_])); + FullObjectSlot(&tables_[generations()])); } void CompilationSubCache::Clear() { MemsetPointer(reinterpret_cast(tables_), - ReadOnlyRoots(isolate()).undefined_value().ptr(), generations_); + ReadOnlyRoots(isolate()).undefined_value().ptr(), + generations()); } void CompilationSubCache::Remove(Handle function_info) { @@ -253,6 +266,58 @@ void CompilationCacheRegExp::Put(Handle source, JSRegExp::Flags flags, CompilationCacheTable::PutRegExp(isolate(), table, source, flags, data)); } +MaybeHandle CompilationCacheCode::Lookup(Handle key) { + // Make sure not to leak the table into the surrounding handle + // scope. Otherwise, we risk keeping old tables around even after + // having cleared the cache. + HandleScope scope(isolate()); + MaybeHandle maybe_value; + int generation = 0; + for (; generation < generations(); generation++) { + Handle table = GetTable(generation); + maybe_value = table->LookupCode(key); + if (!maybe_value.is_null()) break; + } + + if (maybe_value.is_null()) { + isolate()->counters()->compilation_cache_misses()->Increment(); + return MaybeHandle(); + } + + Handle value = maybe_value.ToHandleChecked(); + if (generation != 0) Put(key, value); // Add to the first generation. + isolate()->counters()->compilation_cache_hits()->Increment(); + return scope.CloseAndEscape(value); +} + +void CompilationCacheCode::Put(Handle key, + Handle value) { + HandleScope scope(isolate()); + Handle table = GetFirstTable(); + SetFirstTable(CompilationCacheTable::PutCode(isolate(), table, key, value)); +} + +void CompilationCacheCode::TraceAgeing() { + DCHECK(FLAG_trace_turbo_nci); + StdoutStream os; + os << "NCI cache ageing: Removing oldest generation" << std::endl; +} + +void CompilationCacheCode::TraceInsertion(Handle key, + Handle value) { + DCHECK(FLAG_trace_turbo_nci); + StdoutStream os; + os << "NCI cache insertion: " << Brief(*key) << ", " << Brief(*value) + << std::endl; +} + +void CompilationCacheCode::TraceHit(Handle key, + Handle value) { + DCHECK(FLAG_trace_turbo_nci); + StdoutStream os; + os << "NCI cache hit: " << Brief(*key) << ", " << Brief(*value) << std::endl; +} + void CompilationCache::Remove(Handle function_info) { if (!IsEnabledScriptAndEval()) return; @@ -306,6 +371,10 @@ MaybeHandle CompilationCache::LookupRegExp(Handle source, return reg_exp_.Lookup(source, flags); } +MaybeHandle CompilationCache::LookupCode(Handle sfi) { + return code_.Lookup(sfi); +} + void CompilationCache::PutScript(Handle source, Handle native_context, LanguageMode language_mode, @@ -345,6 +414,11 @@ void CompilationCache::PutRegExp(Handle source, JSRegExp::Flags flags, reg_exp_.Put(source, flags, data); } +void CompilationCache::PutCode(Handle shared, + Handle code) { + code_.Put(shared, code); +} + void CompilationCache::Clear() { for (int i = 0; i < kSubCacheCount; i++) { subcaches_[i]->Clear(); diff --git a/deps/v8/src/codegen/compilation-cache.h b/deps/v8/src/codegen/compilation-cache.h index 04bea44a82b36e..8aac29fc2982b9 100644 --- a/deps/v8/src/codegen/compilation-cache.h +++ b/deps/v8/src/codegen/compilation-cache.h @@ -5,6 +5,7 @@ #ifndef V8_CODEGEN_COMPILATION_CACHE_H_ #define V8_CODEGEN_COMPILATION_CACHE_H_ +#include "src/base/hashmap.h" #include "src/objects/compilation-cache.h" #include "src/utils/allocation.h" @@ -25,13 +26,11 @@ class CompilationSubCache { public: CompilationSubCache(Isolate* isolate, int generations) : isolate_(isolate), generations_(generations) { - tables_ = NewArray(generations); + DCHECK_LE(generations, kMaxGenerations); } - ~CompilationSubCache() { DeleteArray(tables_); } - - // Index for the first generation in the cache. - static const int kFirstGeneration = 0; + static constexpr int kFirstGeneration = 0; + static constexpr int kMaxGenerations = 2; // Get the compilation cache tables for a specific generation. Handle GetTable(int generation); @@ -47,7 +46,7 @@ class CompilationSubCache { // Age the sub-cache by evicting the oldest generation and creating a new // young generation. - void Age(); + virtual void Age() = 0; // GC support. void Iterate(RootVisitor* v); @@ -59,15 +58,20 @@ class CompilationSubCache { void Remove(Handle function_info); // Number of generations in this sub-cache. - inline int generations() { return generations_; } + int generations() const { return generations_; } protected: - Isolate* isolate() { return isolate_; } + Isolate* isolate() const { return isolate_; } + + // Ageing occurs either by removing the oldest generation, or with + // custom logic implemented in CompilationCacheTable::Age. + static void AgeByGeneration(CompilationSubCache* c); + static void AgeCustom(CompilationSubCache* c); private: - Isolate* isolate_; - int generations_; // Number of generations. - Object* tables_; // Compilation cache tables - one for each generation. + Isolate* const isolate_; + const int generations_; + Object tables_[kMaxGenerations]; // One for each generation. DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache); }; @@ -88,6 +92,8 @@ class CompilationCacheScript : public CompilationSubCache { LanguageMode language_mode, Handle function_info); + void Age() override; + private: bool HasOrigin(Handle function_info, MaybeHandle name, int line_offset, int column_offset, @@ -123,6 +129,8 @@ class CompilationCacheEval : public CompilationSubCache { Handle native_context, Handle feedback_cell, int position); + void Age() override; + private: DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval); }; @@ -138,10 +146,38 @@ class CompilationCacheRegExp : public CompilationSubCache { void Put(Handle source, JSRegExp::Flags flags, Handle data); + void Age() override; + private: DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp); }; +// Sub-cache for Code objects. All code inserted into this cache must +// be usable across different native contexts. +class CompilationCacheCode : public CompilationSubCache { + public: + explicit CompilationCacheCode(Isolate* isolate) + : CompilationSubCache(isolate, kGenerations) {} + + MaybeHandle Lookup(Handle key); + void Put(Handle key, Handle value); + + void Age() override; + + // TODO(jgruber,v8:8888): For simplicity we use the generational + // approach here, but could consider something else (or more + // generations) in the future. + static constexpr int kGenerations = 2; + + static void TraceAgeing(); + static void TraceInsertion(Handle key, + Handle value); + static void TraceHit(Handle key, Handle value); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheCode); +}; + // The compilation cache keeps shared function infos for compiled // scripts and evals. The shared function infos are looked up using // the source string as the key. For regular expressions the @@ -169,6 +205,8 @@ class V8_EXPORT_PRIVATE CompilationCache { MaybeHandle LookupRegExp(Handle source, JSRegExp::Flags flags); + MaybeHandle LookupCode(Handle sfi); + // Associate the (source, kind) pair to the shared function // info. This may overwrite an existing mapping. void PutScript(Handle source, Handle native_context, @@ -187,6 +225,8 @@ class V8_EXPORT_PRIVATE CompilationCache { void PutRegExp(Handle source, JSRegExp::Flags flags, Handle data); + void PutCode(Handle shared, Handle code); + // Clear the cache - also used to initialize the cache at startup. void Clear(); @@ -217,9 +257,6 @@ class V8_EXPORT_PRIVATE CompilationCache { base::HashMap* EagerOptimizingSet(); - // The number of sub caches covering the different types to cache. - static const int kSubCacheCount = 4; - bool IsEnabledScriptAndEval() const { return FLAG_compilation_cache && enabled_script_and_eval_; } @@ -232,6 +269,9 @@ class V8_EXPORT_PRIVATE CompilationCache { CompilationCacheEval eval_global_; CompilationCacheEval eval_contextual_; CompilationCacheRegExp reg_exp_; + CompilationCacheCode code_; + + static constexpr int kSubCacheCount = 5; CompilationSubCache* subcaches_[kSubCacheCount]; // Current enable state of the compilation cache for scripts and eval. diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc index c436c57407cdca..bcc97e32f729fd 100644 --- a/deps/v8/src/codegen/compiler.cc +++ b/deps/v8/src/codegen/compiler.cc @@ -30,12 +30,12 @@ #include "src/execution/frames-inl.h" #include "src/execution/isolate-inl.h" #include "src/execution/isolate.h" -#include "src/execution/off-thread-isolate.h" #include "src/execution/runtime-profiler.h" #include "src/execution/vm-state-inl.h" #include "src/handles/maybe-handles.h" #include "src/heap/heap-inl.h" -#include "src/heap/off-thread-factory-inl.h" +#include "src/heap/local-factory-inl.h" +#include "src/heap/local-heap-inl.h" #include "src/init/bootstrapper.h" #include "src/interpreter/interpreter.h" #include "src/logging/log-inl.h" @@ -56,21 +56,102 @@ namespace v8 { namespace internal { -// A wrapper around a OptimizedCompilationInfo that detaches the Handles from -// the underlying DeferredHandleScope and stores them in info_ on -// destruction. -class CompilationHandleScope final { +namespace { + +bool IsForNativeContextIndependentCachingOnly(CodeKind kind) { + return CodeKindIsNativeContextIndependentJSFunction(kind) && + !FLAG_turbo_nci_as_highest_tier; +} + +bool IsForNativeContextIndependentCachingOnly(OptimizedCompilationInfo* info) { + return IsForNativeContextIndependentCachingOnly(info->code_kind()); +} + +class CompilerTracer : public AllStatic { public: - explicit CompilationHandleScope(Isolate* isolate, - OptimizedCompilationInfo* info) - : deferred_(isolate), info_(info) {} - ~CompilationHandleScope() { info_->set_deferred_handles(deferred_.Detach()); } + static void PrintTracePrefix(const CodeTracer::Scope& scope, + const char* header, + OptimizedCompilationInfo* info) { + PrintF(scope.file(), "[%s ", header); + info->closure()->ShortPrint(scope.file()); + PrintF(scope.file(), " (target %s)", CodeKindToString(info->code_kind())); + } + + static void PrintTracePrefix(const CodeTracer::Scope& scope, + const char* header, + Handle function) { + PrintF(scope.file(), "[%s ", header); + function->ShortPrint(scope.file()); + } - private: - DeferredHandleScope deferred_; - OptimizedCompilationInfo* info_; + static void PrintTraceSuffix(const CodeTracer::Scope& scope) { + PrintF(scope.file(), "]\n"); + } + + static void TracePrepareJob(Isolate* isolate, OptimizedCompilationInfo* info, + const char* compiler_name) { + if (!FLAG_trace_opt || !info->IsOptimizing()) return; + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintTracePrefix(scope, "compiling method", info); + PrintF(scope.file(), " using %s%s", compiler_name, + info->is_osr() ? " OSR" : ""); + PrintTraceSuffix(scope); + } + + static void TraceCompilationStats(Isolate* isolate, + OptimizedCompilationInfo* info, + double ms_creategraph, double ms_optimize, + double ms_codegen) { + if (!FLAG_trace_opt || !info->IsOptimizing()) return; + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintTracePrefix(scope, "optimizing", info); + PrintF(scope.file(), " - took %0.3f, %0.3f, %0.3f ms", ms_creategraph, + ms_optimize, ms_codegen); + PrintTraceSuffix(scope); + } + + static void TraceCompletedJob(Isolate* isolate, + OptimizedCompilationInfo* info) { + if (!FLAG_trace_opt) return; + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintTracePrefix(scope, "completed optimizing", info); + PrintTraceSuffix(scope); + } + + static void TraceAbortedJob(Isolate* isolate, + OptimizedCompilationInfo* info) { + if (!FLAG_trace_opt) return; + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintTracePrefix(scope, "aborted optimizing", info); + PrintF(scope.file(), " because: %s", + GetBailoutReason(info->bailout_reason())); + PrintTraceSuffix(scope); + } + + static void TraceOptimizedCodeCacheHit(Isolate* isolate, + Handle function, + BailoutId osr_offset) { + if (!FLAG_trace_opt) return; + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintTracePrefix(scope, "found optimized code for", function); + if (!osr_offset.IsNone()) { + PrintF(scope.file(), " at OSR AST id %d", osr_offset.ToInt()); + } + PrintTraceSuffix(scope); + } + + static void TraceOptimizeForAlwaysOpt(Isolate* isolate, + Handle function) { + if (!FLAG_trace_opt) return; + CodeTracer::Scope scope(isolate->GetCodeTracer()); + PrintTracePrefix(scope, "optimizing", function); + PrintF(scope.file(), " because --always-opt"); + PrintTraceSuffix(scope); + } }; +} // namespace + // Helper that times a scoped region and records the elapsed time. struct ScopedTimer { explicit ScopedTimer(base::TimeDelta* location) : location_(location) { @@ -172,9 +253,7 @@ CompilationJob::Status UnoptimizedCompilationJob::FinalizeJob( } CompilationJob::Status UnoptimizedCompilationJob::FinalizeJob( - Handle shared_info, OffThreadIsolate* isolate) { - DisallowHeapAccess no_heap_access; - + Handle shared_info, LocalIsolate* isolate) { // Delegate to the underlying implementation. DCHECK_EQ(state(), State::kReadyToFinalize); ScopedTimer t(&time_taken_to_finalize_); @@ -231,15 +310,7 @@ void RecordUnoptimizedFunctionCompilation( CompilationJob::Status OptimizedCompilationJob::PrepareJob(Isolate* isolate) { DCHECK_EQ(ThreadId::Current(), isolate->thread_id()); DisallowJavascriptExecution no_js(isolate); - - if (FLAG_trace_opt && compilation_info()->IsOptimizing()) { - CodeTracer::Scope scope(isolate->GetCodeTracer()); - OFStream os(scope.file()); - os << "[compiling method " << Brief(*compilation_info()->closure()) - << " using " << compiler_name_; - if (compilation_info()->is_osr()) os << " OSR"; - os << "]" << std::endl; - } + CompilerTracer::TracePrepareJob(isolate, compilation_info(), compiler_name_); // Delegate to the underlying implementation. DCHECK_EQ(state(), State::kReadyToPrepare); @@ -287,13 +358,8 @@ void OptimizedCompilationJob::RecordCompilationStats(CompilationMode mode, double ms_creategraph = time_taken_to_prepare_.InMillisecondsF(); double ms_optimize = time_taken_to_execute_.InMillisecondsF(); double ms_codegen = time_taken_to_finalize_.InMillisecondsF(); - if (FLAG_trace_opt) { - CodeTracer::Scope scope(isolate->GetCodeTracer()); - PrintF(scope.file(), "[optimizing "); - function->ShortPrint(scope.file()); - PrintF(scope.file(), " - took %0.3f, %0.3f, %0.3f ms]\n", ms_creategraph, - ms_optimize, ms_codegen); - } + CompilerTracer::TraceCompilationStats( + isolate, compilation_info(), ms_creategraph, ms_optimize, ms_codegen); if (FLAG_trace_opt_stats) { static double compilation_time = 0.0; static int compiled_functions = 0; @@ -434,7 +500,7 @@ void InstallCoverageInfo(Isolate* isolate, Handle shared, isolate->debug()->InstallCoverageInfo(shared, coverage_info); } -void InstallCoverageInfo(OffThreadIsolate* isolate, +void InstallCoverageInfo(LocalIsolate* isolate, Handle shared, Handle coverage_info) { // We should only have coverage info when finalizing on the main thread. @@ -445,13 +511,6 @@ template void InstallUnoptimizedCode(UnoptimizedCompilationInfo* compilation_info, Handle shared_info, LocalIsolate* isolate) { - DCHECK_EQ(shared_info->language_mode(), - compilation_info->literal()->language_mode()); - - // Update the shared function info with the scope info. - Handle scope_info = compilation_info->scope()->scope_info(); - shared_info->set_scope_info(*scope_info); - if (compilation_info->has_bytecode_array()) { DCHECK(!shared_info->HasBytecodeArray()); // Only compiled once. DCHECK(!compilation_info->has_asm_wasm_data()); @@ -519,8 +578,10 @@ void EnsureSharedFunctionInfosArrayOnScript(Handle @@ -503,8 +507,23 @@ return histogram; }); + // ========================================================================= // EventHandlers +function handleSearchBar(){ + let searchBar = $('searchBarInput'); + let searchBarInput = searchBar.value; + let selectedMap = V8Map.get(searchBarInput); + //removeAllChildren($('mapIdList')); + if(selectedMap){ + let map = selectedMap; + document.state.map = map; + searchBar.className = "green"; + } else { + searchBar.className = "red"; + } +} + function handleBodyLoad() { let upload = $('fileReader'); upload.onclick = (e) => $("file").click(); @@ -1253,13 +1272,23 @@

Transitions


+ +

Search Map by Address

+ + + +
    +
+ +

Selected Map

Instructions

-

Visualize Map trees that have been gathered using --trace-maps.

+

Visualize Map trees that have been gathered using path/to/d8 $FILE --trace-maps.

+

You can inspect the transition tree in DevTools by looking at document.state.timeline.values.

Keyboard Shortcuts

SHIFT + Arrow Up
diff --git a/deps/v8/tools/map-processor.js b/deps/v8/tools/map-processor.js index d743cba383a5cd..9b261c7d1b8a79 100644 --- a/deps/v8/tools/map-processor.js +++ b/deps/v8/tools/map-processor.js @@ -43,17 +43,17 @@ class MapProcessor extends LogReader { processor: this.processFunctionMove }, 'map-create': { - parsers: [parseInt, parseInt, parseString], + parsers: [parseInt, parseString], processor: this.processMapCreate }, 'map': { - parsers: [parseString, parseInt, parseInt, parseInt, parseInt, parseInt, + parsers: [parseString, parseInt, parseString, parseString, parseInt, parseInt, parseString, parseString, parseString ], processor: this.processMap }, 'map-details': { - parsers: [parseInt, parseInt, parseString], + parsers: [parseInt, parseString, parseString], processor: this.processMapDetails } }; @@ -183,19 +183,16 @@ class MapProcessor extends LogReader { this.getExistingMap(id, time).deprecate(); } - processMapCreate(time, id, string) { + processMapCreate(time, id) { // map-create events might override existing maps if the addresses get - // rcycled. Hence we do not check for existing maps. + // recycled. Hence we do not check for existing maps. let map = this.createMap(id, time); - map.description = string; } processMapDetails(time, id, string) { //TODO(cbruni): fix initial map logging. let map = this.getExistingMap(id, time); - if (!map.description) { - //map.description = string; - } + map.description = string; } createMap(id, time) { @@ -205,8 +202,8 @@ class MapProcessor extends LogReader { } getExistingMap(id, time) { - if (id === 0) return undefined; - let map = V8Map.get(id); + if (id === "0x000000000000") return undefined; + let map = V8Map.get(id, time); if (map === undefined) { console.error("No map details provided: id=" + id); // Manually patch in a map to continue running. @@ -334,18 +331,34 @@ class V8Map { return parents; } - static get(id) { - return this.cache.get(id); + + static get(id, time = undefined) { + let maps = this.cache.get(id); + if(maps){ + for (let i = 0; i < maps.length; i++) { + //TODO: Implement time based map search + if(maps[i].time === time){ + return maps[i]; + } + } + // default return the latest + return maps[maps.length-1]; + } } static set(id, map) { - this.cache.set(id, map); + if(this.cache.has(id)){ + this.cache.get(id).push(map); + } else { + this.cache.set(id, [map]); + } } } V8Map.cache = new Map(); + // =========================================================================== class Edge { constructor(type, name, reason, time, from, to) { diff --git a/deps/v8/tools/predictable_wrapper.py b/deps/v8/tools/predictable_wrapper.py index bbb707f9019ab2..ad5adf7d297f79 100644 --- a/deps/v8/tools/predictable_wrapper.py +++ b/deps/v8/tools/predictable_wrapper.py @@ -16,6 +16,7 @@ # for py2/py3 compatibility +from __future__ import absolute_import from __future__ import print_function import sys diff --git a/deps/v8/tools/release/PRESUBMIT.py b/deps/v8/tools/release/PRESUBMIT.py index dd831f6721eeb3..3bcb26d29fa672 100644 --- a/deps/v8/tools/release/PRESUBMIT.py +++ b/deps/v8/tools/release/PRESUBMIT.py @@ -4,5 +4,5 @@ def CheckChangeOnCommit(input_api, output_api): tests = input_api.canned_checks.GetUnitTestsInDirectory( - input_api, output_api, '.', whitelist=['test_scripts.py$']) + input_api, output_api, '.', files_to_check=['test_scripts.py$']) return input_api.RunTests(tests) diff --git a/deps/v8/tools/release/auto_roll.py b/deps/v8/tools/release/auto_roll.py index 829aefb4cef4b5..27ba3e421684cf 100755 --- a/deps/v8/tools/release/auto_roll.py +++ b/deps/v8/tools/release/auto_roll.py @@ -46,14 +46,10 @@ def RunStep(self): self['json_output']['monitoring_state'] = 'detect_last_roll' self["last_roll"] = self._options.last_roll if not self["last_roll"]: - # Interpret the DEPS file to retrieve the v8 revision. - # TODO(machenbach): This should be part or the setdep api of - # depot_tools. - Var = lambda var: '%s' - exec(FileToText(os.path.join(self._options.chromium, "DEPS"))) - - # The revision rolled last. - self["last_roll"] = vars['v8_revision'] + # Get last-rolled v8 revision from Chromium's DEPS file. + self["last_roll"] = self.Command( + "gclient", "getdep -r src/v8", cwd=self._options.chromium).strip() + self["last_version"] = self.GetVersionTag(self["last_roll"]) assert self["last_version"], "The last rolled v8 revision is not tagged." diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py index dbb3ba5f24b6eb..fd69075872f9a3 100644 --- a/deps/v8/tools/release/common_includes.py +++ b/deps/v8/tools/release/common_includes.py @@ -511,7 +511,7 @@ def ReadAndPersist(var_name, def_name): def WaitForLGTM(self): print ("Please wait for an LGTM, then type \"LGTM\" to commit " "your change. (If you need to iterate on the patch or double check " - "that it's sane, do so in another shell, but remember to not " + "that it's sensible, do so in another shell, but remember to not " "change the headline of the uploaded CL.") answer = "" while answer != "LGTM": diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py index 44376b1dc3f88b..cf86efb3cad5be 100755 --- a/deps/v8/tools/release/test_scripts.py +++ b/deps/v8/tools/release/test_scripts.py @@ -558,9 +558,11 @@ def testChromiumRollUpToDate(self): TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory() json_output_file = os.path.join(TEST_CONFIG["CHROMIUM"], "out.json") TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS")) + chrome_dir = TEST_CONFIG["CHROMIUM"] self.Expect([ Cmd("git fetch origin", ""), Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""), + Cmd("gclient getdep -r src/v8", "last_roll_hsh", cwd=chrome_dir), Cmd("git describe --tags last_roll_hsh", "3.22.4"), Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""), Cmd("git rev-list --max-age=395200 --tags", @@ -597,6 +599,7 @@ def WriteDeps(): expectations = [ Cmd("git fetch origin", ""), Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""), + Cmd("gclient getdep -r src/v8", "last_roll_hsh", cwd=chrome_dir), Cmd("git describe --tags last_roll_hsh", "3.22.3.1"), Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""), Cmd("git rev-list --max-age=395200 --tags", diff --git a/deps/v8/tools/run-num-fuzzer.py b/deps/v8/tools/run-num-fuzzer.py index 9b5a065158cf04..49e432998dc3cf 100755 --- a/deps/v8/tools/run-num-fuzzer.py +++ b/deps/v8/tools/run-num-fuzzer.py @@ -5,6 +5,7 @@ # found in the LICENSE file. +from __future__ import absolute_import import sys from testrunner import num_fuzzer diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py index 2ca93855485a4e..d22c7303af2f4b 100755 --- a/deps/v8/tools/run-tests.py +++ b/deps/v8/tools/run-tests.py @@ -5,6 +5,7 @@ # found in the LICENSE file. +from __future__ import absolute_import import sys from testrunner import standard_runner diff --git a/deps/v8/tools/run.py b/deps/v8/tools/run.py index 5a656e19b59727..59b3c15e682b58 100755 --- a/deps/v8/tools/run.py +++ b/deps/v8/tools/run.py @@ -6,7 +6,18 @@ """This program wraps an arbitrary command since gn currently can only execute scripts.""" +from __future__ import print_function + import subprocess import sys -sys.exit(subprocess.call(sys.argv[1:])) +result = subprocess.call(sys.argv[1:]) +if result != 0: + # Windows error codes such as 0xC0000005 and 0xC0000409 are much easier + # to recognize and differentiate in hex. + if result < -100: + # Print negative hex numbers as positive by adding 2^32. + print('Return code is %08X' % (result + 2**32)) + else: + print('Return code is %d' % result) +sys.exit(result) diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py index d7255a94d35b94..80ea1f956592bd 100644 --- a/deps/v8/tools/run_perf.py +++ b/deps/v8/tools/run_perf.py @@ -575,6 +575,32 @@ def FlattenRunnables(node, node_cb): raise Exception('Invalid suite configuration.') +def find_build_directory(base_path, arch): + """Returns the location of d8 or node in the build output directory. + + This supports a seamless transition between legacy build location + (out/Release) and new build location (out/build). + """ + def is_build(path): + # We support d8 or node as executables. We don't support testing on + # Windows. + return (os.path.isfile(os.path.join(path, 'd8')) or + os.path.isfile(os.path.join(path, 'node'))) + possible_paths = [ + # Location developer wrapper scripts is using. + '%s.release' % arch, + # Current build location on bots. + 'build', + # Legacy build location on bots. + 'Release', + ] + possible_paths = [os.path.join(base_path, p) for p in possible_paths] + actual_paths = filter(is_build, possible_paths) + assert actual_paths, 'No build directory found.' + assert len(actual_paths) == 1, 'Found ambiguous build directories.' + return actual_paths[0] + + class Platform(object): def __init__(self, args): self.shell_dir = args.shell_dir @@ -881,8 +907,7 @@ def Main(argv): 'to auto-detect.', default='x64', choices=SUPPORTED_ARCHS + ['auto']) parser.add_argument('--buildbot', - help='Adapt to path structure used on buildbots and adds ' - 'timestamps/level to all logged status messages', + help='Deprecated', default=False, action='store_true') parser.add_argument('-d', '--device', help='The device ID to run Android tests on. If not ' @@ -978,13 +1003,9 @@ def Main(argv): workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) - if args.buildbot: - build_config = 'Release' - else: - build_config = '%s.release' % args.arch - if args.binary_override_path == None: - args.shell_dir = os.path.join(workspace, args.outdir, build_config) + args.shell_dir = find_build_directory( + os.path.join(workspace, args.outdir), args.arch) default_binary_name = 'd8' else: if not os.path.isfile(args.binary_override_path): @@ -998,8 +1019,8 @@ def Main(argv): default_binary_name = os.path.basename(args.binary_override_path) if args.outdir_secondary: - args.shell_dir_secondary = os.path.join( - workspace, args.outdir_secondary, build_config) + args.shell_dir_secondary = find_build_directory( + os.path.join(workspace, args.outdir_secondary), args.arch) else: args.shell_dir_secondary = None diff --git a/deps/v8/tools/sanitizers/sancov_formatter.py b/deps/v8/tools/sanitizers/sancov_formatter.py index b66bfed815d160..c95ff821c3157f 100755 --- a/deps/v8/tools/sanitizers/sancov_formatter.py +++ b/deps/v8/tools/sanitizers/sancov_formatter.py @@ -69,7 +69,7 @@ # Executables found in the build output for which no coverage is generated. # Exclude them from the coverage data file. -EXE_BLACKLIST = [ +EXE_EXCLUSIONS = [ 'generate-bytecode-expectations', 'hello-world', 'mksnapshot', @@ -109,7 +109,7 @@ def executables(build_dir): file_path = os.path.join(build_dir, f) if (os.path.isfile(file_path) and os.access(file_path, os.X_OK) and - f not in EXE_BLACKLIST): + f not in EXE_EXCLUSIONS): yield file_path diff --git a/deps/v8/tools/snapshot/asm_to_inline_asm.py b/deps/v8/tools/snapshot/asm_to_inline_asm.py index ad8fdcb0fe2688..e49c961bb70b89 100644 --- a/deps/v8/tools/snapshot/asm_to_inline_asm.py +++ b/deps/v8/tools/snapshot/asm_to_inline_asm.py @@ -15,12 +15,12 @@ def asm_to_inl_asm(in_filename, out_filename): with open(in_filename, 'r') as infile, open(out_filename, 'wb') as outfile: - outfile.write('__asm__(\n') + outfile.write(b'__asm__(\n') for line in infile: # Escape " in .S file before outputing it to inline asm file. line = line.replace('"', '\\"') - outfile.write(' "%s\\n"\n' % line.rstrip()) - outfile.write(');\n') + outfile.write(b' "%s\\n"\n' % line.rstrip().encode('utf8')) + outfile.write(b');\n') return 0 if __name__ == '__main__': diff --git a/deps/v8/tools/sodium/index.html b/deps/v8/tools/sodium/index.html deleted file mode 100644 index cbfe49902d0c8d..00000000000000 --- a/deps/v8/tools/sodium/index.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - Sodium - - - - - - - - - - -
- -
- -
- - - - -
-
-
-
-
- - - - - diff --git a/deps/v8/tools/sodium/sodium.js b/deps/v8/tools/sodium/sodium.js deleted file mode 100644 index 44475a177f7715..00000000000000 --- a/deps/v8/tools/sodium/sodium.js +++ /dev/null @@ -1,409 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -var Sodium = (function() { - "use strict"; - - var kinds = ["FUNCTION", "OPTIMIZED_FUNCTION", "STUB", "BUILTIN", - "LOAD_IC", "KEYED_LOAD_IC", "CALL_IC", "KEYED_CALL_IC", - "STORE_IC", "KEYED_STORE_IC", "BINARY_OP_IC", "COMPARE_IC", - "COMPARE_NIL_IC", "TO_BOOLEAN_IC"]; - var kindsWithSource = { - 'FUNCTION': true, - 'OPTIMIZED_FUNCTION': true - }; - - var addressRegEx = "0x[0-9a-f]{8,16}"; - var nameFinder = new RegExp("^name = (.+)$"); - var kindFinder = new RegExp("^kind = (.+)$"); - var firstPositionFinder = new RegExp("^source_position = (\\d+)$"); - var separatorFilter = new RegExp("^--- (.)+ ---$"); - var rawSourceFilter = new RegExp("^--- Raw source ---$"); - var codeEndFinder = new RegExp("^--- End code ---$"); - var whiteSpaceLineFinder = new RegExp("^\\W*$"); - var instructionBeginFinder = - new RegExp("^Instructions\\W+\\(size = \\d+\\)"); - var instructionFinder = - new RegExp("^\(" + addressRegEx + "\)\(\\W+\\d+\\W+.+\)"); - var positionFinder = - new RegExp("^(" + addressRegEx + ")\\W+position\\W+\\((\\d+)\\)"); - var addressFinder = new RegExp("\(" + addressRegEx + "\)"); - var addressReplacer = new RegExp("\(" + addressRegEx + "\)", "gi"); - - var fileContent = ""; - var selectedFunctionKind = ""; - var currentFunctionKind = ""; - - var currentFunctionName = ""; - var firstSourcePosition = 0; - var startAddress = ""; - var readingSource = false; - var readingAsm = false; - var sourceBegin = -1; - var sourceEnd = -1; - var asmBegin = -1; - var asmEnd = -1; - var codeObjects = []; - var selectedAsm = null; - var selectedSource = null; - var selectedSourceClass = ""; - - function Code(name, kind, sourceBegin, sourceEnd, asmBegin, asmEnd, - firstSourcePosition, startAddress) { - this.name = name; - this.kind = kind; - this.sourceBegin = sourceBegin; - this.sourceEnd = sourceEnd; - this.asmBegin = asmBegin; - this.asmEnd = asmEnd; - this.firstSourcePosition = firstSourcePosition; - this.startAddress = startAddress; - } - - function getCurrentCodeObject() { - var functionSelect = document.getElementById('function-selector-id'); - return functionSelect.options[functionSelect.selectedIndex].codeObject; - } - - function getCurrentSourceText() { - var code = getCurrentCodeObject(); - if (code.sourceBegin == -1 || code.sourceEnd == -1) return ""; - return fileContent.substring(code.sourceBegin, code.sourceEnd); - } - - function getCurrentAsmText() { - var code = getCurrentCodeObject(); - if (code.asmBegin == -1 || code.asmEnd == -1) return ""; - return fileContent.substring(code.asmBegin, code.asmEnd); - } - - function setKindByIndex(index) { - selectedFunctionKind = kinds[index]; - } - - function processLine(text, begin, end) { - var line = text.substring(begin, end); - if (readingSource) { - if (separatorFilter.exec(line) != null) { - readingSource = false; - } else { - if (sourceBegin == -1) { - sourceBegin = begin; - } - sourceEnd = end; - } - } else { - if (readingAsm) { - if (codeEndFinder.exec(line) != null) { - readingAsm = false; - asmEnd = begin; - var newCode = - new Code(currentFunctionName, currentFunctionKind, - sourceBegin, sourceEnd, asmBegin, asmEnd, - firstSourcePosition, startAddress); - codeObjects.push(newCode); - currentFunctionKind = null; - } else { - if (asmBegin == -1) { - matches = instructionBeginFinder.exec(line); - if (matches != null) { - asmBegin = begin; - } - } - if (startAddress == "") { - matches = instructionFinder.exec(line); - if (matches != null) { - startAddress = matches[1]; - } - } - } - } else { - var matches = kindFinder.exec(line); - if (matches != null) { - currentFunctionKind = matches[1]; - if (!kindsWithSource[currentFunctionKind]) { - sourceBegin = -1; - sourceEnd = -1; - } - } else if (currentFunctionKind != null) { - matches = nameFinder.exec(line); - if (matches != null) { - readingAsm = true; - asmBegin = -1; - currentFunctionName = matches[1]; - } - } else if (rawSourceFilter.exec(line) != null) { - readingSource = true; - sourceBegin = -1; - } else { - var matches = firstPositionFinder.exec(line); - if (matches != null) { - firstSourcePosition = parseInt(matches[1]); - } - } - } - } - } - - function processLines(source, size, processLine) { - var firstChar = 0; - for (var x = 0; x < size; x++) { - var curChar = source[x]; - if (curChar == '\n' || curChar == '\r') { - processLine(source, firstChar, x); - firstChar = x + 1; - } - } - if (firstChar != size - 1) { - processLine(source, firstChar, size - 1); - } - } - - function processFileContent() { - document.getElementById('source-text-pre').innerHTML = ''; - sourceBegin = -1; - codeObjects = []; - processLines(fileContent, fileContent.length, processLine); - var functionSelectElement = document.getElementById('function-selector-id'); - functionSelectElement.innerHTML = ''; - var length = codeObjects.length; - for (var i = 0; i < codeObjects.length; ++i) { - var code = codeObjects[i]; - if (code.kind == selectedFunctionKind) { - var optionElement = document.createElement("option"); - optionElement.codeObject = code; - optionElement.text = code.name; - functionSelectElement.add(optionElement, null); - } - } - } - - function asmClick(element) { - if (element == selectedAsm) return; - if (selectedAsm != null) { - selectedAsm.classList.remove('highlight-yellow'); - } - selectedAsm = element; - selectedAsm.classList.add('highlight-yellow'); - - var pc = element.firstChild.innerText; - var sourceLine = null; - if (addressFinder.exec(pc) != null) { - var position = findSourcePosition(pc); - var line = findSourceLine(position); - sourceLine = document.getElementById('source-line-' + line); - var sourceLineTop = sourceLine.offsetTop; - makeSourcePosVisible(sourceLineTop); - } - if (selectedSource == sourceLine) return; - if (selectedSource != null) { - selectedSource.classList.remove('highlight-yellow'); - selectedSource.classList.add(selectedSourceClass); - } - if (sourceLine != null) { - selectedSourceClass = sourceLine.classList[0]; - sourceLine.classList.remove(selectedSourceClass); - sourceLine.classList.add('highlight-yellow'); - } - selectedSource = sourceLine; - } - - function makeContainerPosVisible(container, newTop) { - var height = container.offsetHeight; - var margin = Math.floor(height / 4); - if (newTop < container.scrollTop + margin) { - newTop -= margin; - if (newTop < 0) newTop = 0; - container.scrollTop = newTop; - return; - } - if (newTop > (container.scrollTop + 3 * margin)) { - newTop = newTop - 3 * margin; - container.scrollTop = newTop; - } - } - - function makeAsmPosVisible(newTop) { - var asmContainer = document.getElementById('asm-container'); - makeContainerPosVisible(asmContainer, newTop); - } - - function makeSourcePosVisible(newTop) { - var sourceContainer = document.getElementById('source-container'); - makeContainerPosVisible(sourceContainer, newTop); - } - - function addressClick(element, event) { - event.stopPropagation(); - var asmLineId = 'address-' + element.innerText; - var asmLineElement = document.getElementById(asmLineId); - if (asmLineElement != null) { - var asmLineTop = asmLineElement.parentNode.offsetTop; - makeAsmPosVisible(asmLineTop); - asmLineElement.classList.add('highlight-flash-blue'); - window.setTimeout(function() { - asmLineElement.classList.remove('highlight-flash-blue'); - }, 1500); - } - } - - function prepareAsm(originalSource) { - var newSource = ""; - var lineNumber = 1; - var functionProcessLine = function(text, begin, end) { - var currentLine = text.substring(begin, end); - var matches = instructionFinder.exec(currentLine); - var clickHandler = ""; - if (matches != null) { - var restOfLine = matches[2]; - restOfLine = restOfLine.replace( - addressReplacer, - '\$1'); - currentLine = '' + - matches[1] + '' + restOfLine; - clickHandler = 'onclick=\'Sodium.asmClick(this)\' '; - } else if (whiteSpaceLineFinder.exec(currentLine)) { - currentLine = "
"; - } - newSource += '
' +
-        currentLine + '
'; - lineNumber++; - } - processLines(originalSource, originalSource.length, functionProcessLine); - return newSource; - } - - function findSourcePosition(pcToSearch) { - var position = 0; - var distance = 0x7FFFFFFF; - var pcToSearchOffset = parseInt(pcToSearch); - var processOneLine = function(text, begin, end) { - var currentLine = text.substring(begin, end); - var matches = positionFinder.exec(currentLine); - if (matches != null) { - var pcOffset = parseInt(matches[1]); - if (pcOffset <= pcToSearchOffset) { - var dist = pcToSearchOffset - pcOffset; - var pos = parseInt(matches[2]); - if ((dist < distance) || (dist == distance && pos > position)) { - position = pos; - distance = dist; - } - } - } - } - var asmText = getCurrentAsmText(); - processLines(asmText, asmText.length, processOneLine); - var code = getCurrentCodeObject(); - if (position == 0) return 0; - return position - code.firstSourcePosition; - } - - function findSourceLine(position) { - if (position == 0) return 1; - var line = 0; - var processOneLine = function(text, begin, end) { - if (begin < position) { - line++; - } - } - var sourceText = getCurrentSourceText(); - processLines(sourceText, sourceText.length, processOneLine); - return line; - } - - function functionChangedHandler() { - var functionSelect = document.getElementById('function-selector-id'); - var source = getCurrentSourceText(); - var sourceDivElement = document.getElementById('source-text'); - var code = getCurrentCodeObject(); - var newHtml = "
"
-      + 'function ' + code.name + source + "
"; - sourceDivElement.innerHTML = newHtml; - try { - // Wrap in try to work when offline. - PR.prettyPrint(); - } catch (e) { - } - var sourceLineContainer = sourceDivElement.firstChild.firstChild; - var lineCount = sourceLineContainer.childElementCount; - var current = sourceLineContainer.firstChild; - for (var i = 1; i < lineCount; ++i) { - current.id = "source-line-" + i; - current = current.nextElementSibling; - } - - var asm = getCurrentAsmText(); - document.getElementById('asm-text').innerHTML = prepareAsm(asm); - } - - function kindChangedHandler(element) { - setKindByIndex(element.selectedIndex); - processFileContent(); - functionChangedHandler(); - } - - function readLog(evt) { - //Retrieve the first (and only!) File from the FileList object - var f = evt.target.files[0]; - if (f) { - var r = new FileReader(); - r.onload = function(e) { - var file = evt.target.files[0]; - currentFunctionKind = ""; - fileContent = e.target.result; - processFileContent(); - functionChangedHandler(); - } - r.readAsText(f); - } else { - alert("Failed to load file"); - } - } - - function buildFunctionKindSelector(kindSelectElement) { - for (var x = 0; x < kinds.length; ++x) { - var optionElement = document.createElement("option"); - optionElement.value = x; - optionElement.text = kinds[x]; - kindSelectElement.add(optionElement, null); - } - kindSelectElement.selectedIndex = 1; - setKindByIndex(1); - } - - return { - buildFunctionKindSelector: buildFunctionKindSelector, - kindChangedHandler: kindChangedHandler, - functionChangedHandler: functionChangedHandler, - asmClick: asmClick, - addressClick: addressClick, - readLog: readLog - }; - -})(); diff --git a/deps/v8/tools/sodium/styles.css b/deps/v8/tools/sodium/styles.css deleted file mode 100755 index 4f7d89ee7863a8..00000000000000 --- a/deps/v8/tools/sodium/styles.css +++ /dev/null @@ -1,70 +0,0 @@ -#table-header { - background-color: rgba(150, 150, 255, 0.4); -} - -#asm-container { - background-color: rgba(200, 200, 255, 0.4); - position:absolute; - overflow:auto; - cursor:default; - width:50%; - height:92%; -} - -#source-container { - position:absolute; - overflow:auto; - width:48%; - left:51%; - height:92%; -} - -table { - border-collapse: collapse; -} - -.hover-underline:hover { - text-decoration: underline; -} - -.highlight-flash-blue { - -webkit-transition: all 1s ease; - background-color: rgba(50, 50, 245, 0.4); - border-radius: 10px; - -o-border-radius: 10px; - -moz-border-radius: 10px; - -webkit-border-radius: 10px; -} - - -.highlight-green { - background-color: rgba(0, 255, 0, 0.4); - border-radius: 10px; - -o-border-radius: 10px; - -moz-border-radius: 10px; - -webkit-border-radius: 10px; -} - -.highlight-yellow { - background-color: rgba(255, 255, 0, 0.4); - border-radius: 10px; - -o-border-radius: 10px; - -moz-border-radius: 10px; - -webkit-border-radius: 10px; -} - -.highlight-gray { - background-color: rgba(128, 128, 128, 0.4); - border-radius: 10px; - -o-border-radius: 10px; - -moz-border-radius: 10px; - -webkit-border-radius: 10px; -} - -.highlight-red { - background-color: rgba(255, 0, 0, 0.4); - border-radius: 10px; - -o-border-radius: 10px; - -moz-border-radius: 10px; - -webkit-border-radius: 10px; -} diff --git a/deps/v8/tools/system-analyzer/app-model.mjs b/deps/v8/tools/system-analyzer/app-model.mjs new file mode 100644 index 00000000000000..821d3a2c1739b7 --- /dev/null +++ b/deps/v8/tools/system-analyzer/app-model.mjs @@ -0,0 +1,102 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +class State { + #timeSelection = { start: 0, end: Infinity }; + #map; + #ic; + #selectedMapLogEvents; + #selectedIcLogEvents; + #nofChunks; + #chunks; + #icTimeline; + #mapTimeline; + #minStartTime = Number.POSITIVE_INFINITY; + #maxEndTime = Number.NEGATIVE_INFINITY; + get minStartTime() { + return this.#minStartTime; + } + get maxEndTime() { + return this.#maxEndTime; + } + #updateTimeRange(timeline) { + this.#minStartTime = Math.min(this.#minStartTime, timeline.startTime); + this.#maxEndTime = Math.max(this.#maxEndTime, timeline.endTime); + } + get mapTimeline() { + return this.#mapTimeline; + } + set mapTimeline(timeline) { + this.#updateTimeRange(timeline); + timeline.startTime = this.#minStartTime; + timeline.endTime = this.#maxEndTime; + this.#mapTimeline = timeline; + } + set icTimeline(timeline) { + this.#updateTimeRange(timeline); + timeline.startTime = this.#minStartTime; + timeline.endTime = this.#maxEndTime; + this.#icTimeline = timeline; + } + get icTimeline() { + return this.#icTimeline; + } + set chunks(value) { + //TODO(zcankara) split up between maps and ics, and every timeline track + this.#chunks = value; + } + get chunks() { + //TODO(zcankara) split up between maps and ics, and every timeline track + return this.#chunks; + } + get nofChunks() { + return this.#nofChunks; + } + set nofChunks(count) { + this.#nofChunks = count; + } + get map() { + //TODO(zcankara) rename as selectedMapEvents, array of selected events + return this.#map; + } + set map(value) { + //TODO(zcankara) rename as selectedMapEvents, array of selected events + if (!value) return; + this.#map = value; + } + get ic() { + //TODO(zcankara) rename selectedICEvents, array of selected events + return this.#ic; + } + set ic(value) { + //TODO(zcankara) rename selectedIcEvents, array of selected events + if (!value) return; + this.#ic = value; + } + get selectedMapLogEvents() { + return this.#selectedMapLogEvents; + } + set selectedMapLogEvents(value) { + if (!value) return; + this.#selectedMapLogEvents = value; + } + get selectedIcLogEvents() { + return this.#selectedIcLogEvents; + } + set selectedIcLogEvents(value) { + if (!value) return; + this.#selectedIcLogEvents = value; + } + get timeSelection() { + return this.#timeSelection; + } + get entries() { + if (!this.map) return {}; + return { + map: this.map.id, time: this.map.time + } + } +} + +export { State }; diff --git a/deps/v8/tools/system-analyzer/event.mjs b/deps/v8/tools/system-analyzer/event.mjs new file mode 100644 index 00000000000000..4ab6f34b416d34 --- /dev/null +++ b/deps/v8/tools/system-analyzer/event.mjs @@ -0,0 +1,21 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +class Event { + #time; + #type; + constructor(type, time) { + //TODO(zcankara) remove type and add empty getters to override + this.#time = time; + this.#type = type; + } + get time(){ + return this.#time; + } + get type(){ + return this.#type; + } +} + +export {Event}; diff --git a/deps/v8/tools/system-analyzer/events.mjs b/deps/v8/tools/system-analyzer/events.mjs new file mode 100644 index 00000000000000..d868b57a848e99 --- /dev/null +++ b/deps/v8/tools/system-analyzer/events.mjs @@ -0,0 +1,33 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +class SelectionEvent extends CustomEvent { + static name = "showentries"; + constructor(entries) { + super(SelectionEvent.name, { bubbles: true, composed: true }); + if (!Array.isArray(entries) || entries.length == 0) { + throw new Error("No valid entries selected!"); + } + this.entries = entries; + } +} + +class FocusEvent extends CustomEvent { + static name = "showentrydetail"; + constructor(entry) { + super(FocusEvent.name, { bubbles: true, composed: true }); + this.entry = entry; + } +} + +class SelectTimeEvent extends CustomEvent { + static name = 'timerangeselect'; + constructor(start, end) { + super(SelectTimeEvent.name, { bubbles: true, composed: true }); + this.start = start; + this.end = end; + } +} + +export { SelectionEvent, FocusEvent, SelectTimeEvent }; diff --git a/deps/v8/tools/system-analyzer/helper.mjs b/deps/v8/tools/system-analyzer/helper.mjs new file mode 100644 index 00000000000000..4570074c6df1e6 --- /dev/null +++ b/deps/v8/tools/system-analyzer/helper.mjs @@ -0,0 +1,189 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +const KB = 1024; +const MB = KB * KB; +const GB = MB * KB; +const kMillis2Seconds = 1 / 1000; + +function formatBytes(bytes) { + const units = ['B', 'KiB', 'MiB', 'GiB']; + const divisor = 1024; + let index = 0; + while (index < units.length && bytes >= divisor) { + index++; + bytes /= divisor; + } + return bytes.toFixed(2) + units[index]; +} + +function formatSeconds(millis) { + return (millis * kMillis2Seconds).toFixed(2) + 's'; +} + +function defineCustomElement(path, generator) { + let name = path.substring(path.lastIndexOf("/") + 1, path.length); + path = path + '-template.html'; + fetch(path) + .then(stream => stream.text()) + .then( + templateText => customElements.define(name, generator(templateText))); +} + +// DOM Helpers +function removeAllChildren(node) { + let range = document.createRange(); + range.selectNodeContents(node); + range.deleteContents(); +} + +function $(id) { + return document.querySelector(id) +} + +class CSSColor { + static getColor(name) { + const style = getComputedStyle(document.body); + return style.getPropertyValue(`--${name}`); + } + static get backgroundColor() { + return CSSColor.getColor('backgroud-color'); + } + static get surfaceColor() { + return CSSColor.getColor('surface-color'); + } + static get primaryColor() { + return CSSColor.getColor('primary-color'); + } + static get secondaryColor() { + return CSSColor.getColor('secondary-color'); + } + static get onSurfaceColor() { + return CSSColor.getColor('on-surface-color'); + } + static get onBackgroundColor() { + return CSSColor.getColor('on-background-color'); + } + static get onPrimaryColor() { + return CSSColor.getColor('on-primary-color'); + } + static get onSecondaryColor() { + return CSSColor.getColor('on-secondary-color'); + } + static get defaultColor() { + return CSSColor.getColor('default-color'); + } + static get errorColor() { + return CSSColor.getColor('error-color'); + } + static get mapBackgroundColor() { + return CSSColor.getColor('map-background-color'); + } + static get timelineBackgroundColor() { + return CSSColor.getColor('timeline-background-color'); + } + static get red() { + return CSSColor.getColor('red'); + } + static get green() { + return CSSColor.getColor('green'); + } + static get yellow() { + return CSSColor.getColor('yellow'); + } + static get blue() { + return CSSColor.getColor('blue'); + } + static get orange() { + return CSSColor.getColor('orange'); + } + static get violet() { + return CSSColor.getColor('violet'); + } + +} + +function transitionTypeToColor(type) { + switch (type) { + case 'new': + return CSSColor.green; + case 'Normalize': + return CSSColor.violet; + case 'SlowToFast': + return CSSColor.orange; + case 'InitialMap': + return CSSColor.yellow; + case 'Transition': + return CSSColor.primaryColor; + case 'ReplaceDescriptors': + return CSSColor.red; + case 'LoadGlobalIC': + return CSSColor.green; + case 'StoreInArrayLiteralIC': + return CSSColor.violet; + case 'StoreIC': + return CSSColor.orange; + case 'KeyedLoadIC': + return CSSColor.red; + case 'KeyedStoreIC': + return CSSColor.primaryColor; + } + return CSSColor.primaryColor; +} + + + +function div(classes) { + let node = document.createElement('div'); + if (classes !== void 0) { + if (typeof classes === 'string') { + node.classList.add(classes); + } else { + classes.forEach(cls => node.classList.add(cls)); + } + } + return node; +} + +class V8CustomElement extends HTMLElement { + constructor(templateText) { + super(); + const shadowRoot = this.attachShadow({mode: 'open'}); + shadowRoot.innerHTML = templateText; + } + $(id) { + return this.shadowRoot.querySelector(id); + } + + querySelectorAll(query) { + return this.shadowRoot.querySelectorAll(query); + } + + div(classes) {return div(classes)} + + table(className) { + let node = document.createElement('table') + if (className) node.classList.add(className) + return node; + } + + td(textOrNode) { + let node = document.createElement('td'); + if (typeof textOrNode === 'object') { + node.appendChild(textOrNode); + } else { + node.innerText = textOrNode; + } + return node; + } + + tr(){ + return document.createElement('tr'); + } + + removeAllChildren(node) { return removeAllChildren(node); } +} + +export {defineCustomElement, V8CustomElement, removeAllChildren, + $, div, transitionTypeToColor, CSSColor}; diff --git a/deps/v8/tools/system-analyzer/ic-model.mjs b/deps/v8/tools/system-analyzer/ic-model.mjs new file mode 100644 index 00000000000000..93a3d612627a08 --- /dev/null +++ b/deps/v8/tools/system-analyzer/ic-model.mjs @@ -0,0 +1,58 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import CustomIcProcessor from "./ic-processor.mjs"; + +// For compatibility with console scripts: +print = console.log; + +export class Group { + constructor(property, key, entry) { + this.property = property; + this.key = key; + this.count = 1; + this.entries = [entry]; + this.percentage = undefined; + this.groups = undefined; + } + + add(entry) { + this.count++; + this.entries.push(entry) + } + + createSubGroups() { + this.groups = {}; + for (let i = 0; i < CustomIcProcessor.kProperties.length; i++) { + let subProperty = CustomIcProcessor.kProperties[i]; + if (this.property == subProperty) continue; + this.groups[subProperty] = Group.groupBy(this.entries, subProperty); + } + } + + static groupBy(entries, property) { + let accumulator = Object.create(null); + let length = entries.length; + for (let i = 0; i < length; i++) { + let entry = entries[i]; + let key = entry[property]; + if (accumulator[key] == undefined) { + accumulator[key] = new Group(property, key, entry); + } else { + let group = accumulator[key]; + if (group.entries == undefined) console.log([group, entry]); + group.add(entry) + } + } + let result = []; + for (let key in accumulator) { + let group = accumulator[key]; + group.percentage = Math.round(group.count / length * 100 * 100) / 100; + result.push(group); + } + result.sort((a, b) => {return b.count - a.count}); + return result; + } + +} diff --git a/deps/v8/tools/system-analyzer/ic-panel-template.html b/deps/v8/tools/system-analyzer/ic-panel-template.html new file mode 100644 index 00000000000000..187e5880902b19 --- /dev/null +++ b/deps/v8/tools/system-analyzer/ic-panel-template.html @@ -0,0 +1,99 @@ + + +
+

IC Panel

+

IC Explorer

+
+
+
0
+
uninitialized
+
X
+
no feedback
+
1
+
monomorphic
+
^
+
recompute handler
+
P
+
polymorphic
+
N
+
megamorphic
+
G
+
generic
+
+
+ +

Data

+ +

Trace Count: 0

+ +

Result

+

+ Group-Key: + +

+

+ Filter by Time +

+ : + + +

+ + + +
+

+
diff --git a/deps/v8/tools/system-analyzer/ic-panel.mjs b/deps/v8/tools/system-analyzer/ic-panel.mjs new file mode 100644 index 00000000000000..14ea97a303b7e0 --- /dev/null +++ b/deps/v8/tools/system-analyzer/ic-panel.mjs @@ -0,0 +1,198 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import { Group } from './ic-model.mjs'; +import CustomIcProcessor from "./ic-processor.mjs"; +import { FocusEvent, SelectTimeEvent } from './events.mjs'; +import { defineCustomElement, V8CustomElement } from './helper.mjs'; + +defineCustomElement('ic-panel', (templateText) => + class ICPanel extends V8CustomElement { + //TODO(zcankara) Entries never set + #entries; + #filteredEntries; + constructor() { + super(templateText); + this.groupKey.addEventListener( + 'change', e => this.updateTable(e)); + this.$('#filterICTimeBtn').addEventListener( + 'click', e => this.handleICTimeFilter(e)); + } + + get entries() { + return this.#entries; + } + + get groupKey() { + return this.$('#group-key'); + } + + get table() { + return this.$('#table'); + } + + get tableBody() { + return this.$('#table-body'); + } + + get count() { + return this.$('#count'); + } + + get spanSelectAll() { + return this.querySelectorAll("span"); + } + + set filteredEntries(value) { + this.#filteredEntries = value; + this.updateTable(); + } + + get filteredEntries() { + return this.#filteredEntries; + } + + updateTable(event) { + let select = this.groupKey; + let key = select.options[select.selectedIndex].text; + let tableBody = this.tableBody; + this.removeAllChildren(tableBody); + let groups = Group.groupBy(this.filteredEntries, key, true); + this.render(groups, tableBody); + } + + escapeHtml(unsafe) { + if (!unsafe) return ""; + return unsafe.toString() + .replace(/&/g, "&") + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); + } + processValue(unsafe) { + if (!unsafe) return ""; + if (!unsafe.startsWith("http")) return this.escapeHtml(unsafe); + let a = document.createElement("a"); + a.href = unsafe; + a.textContent = unsafe; + return a; + } + + td(tr, content, className) { + let node = document.createElement("td"); + if (typeof content == "object") { + node.appendChild(content); + } else { + node.innerHTML = content; + } + node.className = className; + tr.appendChild(node); + return node + } + + handleMapClick(e) { + this.dispatchEvent(new FocusEvent(e.target.parentNode.entry)); + } + + handleFilePositionClick(e) { + this.dispatchEvent(new FocusEvent(e.target.parentNode.entry.key)); + } + + render(entries, parent) { + let fragment = document.createDocumentFragment(); + let max = Math.min(1000, entries.length) + for (let i = 0; i < max; i++) { + let entry = entries[i]; + let tr = document.createElement("tr"); + tr.entry = entry; + //TODO(zcankara) Create one bound method and use it everywhere + if (entry.property === "map") { + tr.addEventListener('click', e => this.handleMapClick(e)); + } else if (entry.property == "filePosition") { + tr.addEventListener('click', + e => this.handleFilePositionClick(e)); + } + let details = this.td(tr, 'ℹ', 'details'); + //TODO(zcankara) don't keep the whole function context alive + details.onclick = _ => this.toggleDetails(details); + this.td(tr, entry.percentage + "%", 'percentage'); + this.td(tr, entry.count, 'count'); + this.td(tr, this.processValue(entry.key), 'key'); + fragment.appendChild(tr); + } + let omitted = entries.length - max; + if (omitted > 0) { + let tr = document.createElement("tr"); + let tdNode = this.td(tr, 'Omitted ' + omitted + " entries."); + tdNode.colSpan = 4; + fragment.appendChild(tr); + } + parent.appendChild(fragment); + } + + + renderDrilldown(entry, previousSibling) { + let tr = document.createElement('tr'); + tr.className = "entry-details"; + tr.style.display = "none"; + // indent by one td. + tr.appendChild(document.createElement("td")); + let td = document.createElement("td"); + td.colSpan = 3; + for (let key in entry.groups) { + td.appendChild(this.renderDrilldownGroup(entry, key)); + } + tr.appendChild(td); + // Append the new TR after previousSibling. + previousSibling.parentNode.insertBefore(tr, previousSibling.nextSibling) + } + + renderDrilldownGroup(entry, key) { + let max = 20; + let group = entry.groups[key]; + let div = document.createElement("div") + div.className = 'drilldown-group-title' + div.textContent = key + ' [top ' + max + ' out of ' + group.length + ']'; + let table = document.createElement("table"); + this.render(group.slice(0, max), table, false) + div.appendChild(table); + return div; + } + + toggleDetails(node) { + let tr = node.parentNode; + let entry = tr.entry; + // Create subgroup in-place if the don't exist yet. + if (entry.groups === undefined) { + entry.createSubGroups(); + this.renderDrilldown(entry, tr); + } + let details = tr.nextSibling; + let display = details.style.display; + if (display != "none") { + display = "none"; + } else { + display = "table-row" + }; + details.style.display = display; + } + + initGroupKeySelect() { + let select = this.groupKey; + select.options.length = 0; + for (let i in CustomIcProcessor.kProperties) { + let option = document.createElement("option"); + option.text = CustomIcProcessor.kProperties[i]; + select.add(option); + } + } + + handleICTimeFilter(e) { + this.dispatchEvent(new SelectTimeEvent( + parseInt(this.$('#filter-time-start').value), + parseInt(this.$('#filter-time-end').value))); + } + + }); diff --git a/deps/v8/tools/system-analyzer/ic-processor.mjs b/deps/v8/tools/system-analyzer/ic-processor.mjs new file mode 100644 index 00000000000000..2d468fd2661405 --- /dev/null +++ b/deps/v8/tools/system-analyzer/ic-processor.mjs @@ -0,0 +1,282 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import {Event} from './event.mjs'; +import {Timeline} from './timeline.mjs'; + +/** + * Parser for dynamic code optimization state. + */ +function parseState(s) { + switch (s) { + case '': + return Profile.CodeState.COMPILED; + case '~': + return Profile.CodeState.OPTIMIZABLE; + case '*': + return Profile.CodeState.OPTIMIZED; + } + throw new Error('unknown code state: ' + s); +} + +class IcProcessor extends LogReader { + #profile; + MAJOR_VERSION = 8; + MINOR_VERSION = 5; + constructor() { + super(); + let propertyICParser = [ + parseInt, parseInt, parseInt, parseInt, parseString, parseString, + parseInt, parseString, parseString, parseString + ]; + LogReader.call(this, { + 'code-creation': { + parsers: [ + parseString, parseInt, parseInt, parseInt, parseInt, parseString, + parseVarArgs + ], + processor: this.processCodeCreation + }, + 'v8-version': { + parsers: [ + parseInt, parseInt, + ], + processor: this.processV8Version + }, + 'code-move': + {parsers: [parseInt, parseInt], processor: this.processCodeMove}, + 'code-delete': {parsers: [parseInt], processor: this.processCodeDelete}, + 'sfi-move': + {parsers: [parseInt, parseInt], processor: this.processFunctionMove}, + 'LoadGlobalIC': { + parsers: propertyICParser, + processor: this.processPropertyIC.bind(this, 'LoadGlobalIC') + }, + 'StoreGlobalIC': { + parsers: propertyICParser, + processor: this.processPropertyIC.bind(this, 'StoreGlobalIC') + }, + 'LoadIC': { + parsers: propertyICParser, + processor: this.processPropertyIC.bind(this, 'LoadIC') + }, + 'StoreIC': { + parsers: propertyICParser, + processor: this.processPropertyIC.bind(this, 'StoreIC') + }, + 'KeyedLoadIC': { + parsers: propertyICParser, + processor: this.processPropertyIC.bind(this, 'KeyedLoadIC') + }, + 'KeyedStoreIC': { + parsers: propertyICParser, + processor: this.processPropertyIC.bind(this, 'KeyedStoreIC') + }, + 'StoreInArrayLiteralIC': { + parsers: propertyICParser, + processor: this.processPropertyIC.bind(this, 'StoreInArrayLiteralIC') + }, + }); + this.#profile = new Profile(); + + this.LoadGlobalIC = 0; + this.StoreGlobalIC = 0; + this.LoadIC = 0; + this.StoreIC = 0; + this.KeyedLoadIC = 0; + this.KeyedStoreIC = 0; + this.StoreInArrayLiteralIC = 0; + } + get profile(){ + return this.#profile; + } + /** + * @override + */ + printError(str) { + print(str); + } + processString(string) { + let end = string.length; + let current = 0; + let next = 0; + let line; + let i = 0; + let entry; + while (current < end) { + next = string.indexOf('\n', current); + if (next === -1) break; + i++; + line = string.substring(current, next); + current = next + 1; + this.processLogLine(line); + } + } + processV8Version(majorVersion, minorVersion){ + if( + (majorVersion == this.MAJOR_VERSION && minorVersion <= this.MINOR_VERSION) + || (majorVersion < this.MAJOR_VERSION)){ + window.alert( + `Unsupported version ${majorVersion}.${minorVersion}. \n` + + `Please use the matching tool for given the V8 version.`); + } + } + processLogFile(fileName) { + this.collectEntries = true; + this.lastLogFileName_ = fileName; + let line; + while (line = readline()) { + this.processLogLine(line); + } + print(); + print('====================='); + print('LoadGlobal: ' + this.LoadGlobalIC); + print('StoreGlobal: ' + this.StoreGlobalIC); + print('Load: ' + this.LoadIC); + print('Store: ' + this.StoreIC); + print('KeyedLoad: ' + this.KeyedLoadIC); + print('KeyedStore: ' + this.KeyedStoreIC); + print('StoreInArrayLiteral: ' + this.StoreInArrayLiteralIC); + } + addEntry(entry) { + this.entries.push(entry); + } + processCodeCreation(type, kind, timestamp, start, size, name, maybe_func) { + if (maybe_func.length) { + let funcAddr = parseInt(maybe_func[0]); + let state = parseState(maybe_func[1]); + this.#profile.addFuncCode( + type, name, timestamp, start, size, funcAddr, state); + } else { + this.#profile.addCode(type, name, timestamp, start, size); + } + } + processCodeMove(from, to) { + this.#profile.moveCode(from, to); + } + processCodeDelete(start) { + this.#profile.deleteCode(start); + } + processFunctionMove(from, to) { + this.#profile.moveFunc(from, to); + } + formatName(entry) { + if (!entry) return ''; + let name = entry.func.getName(); + let re = /(.*):[0-9]+:[0-9]+$/; + let array = re.exec(name); + if (!array) return name; + return entry.getState() + array[1]; + } + + processPropertyIC( + type, pc, time, line, column, old_state, new_state, map, name, modifier, + slow_reason) { + this[type]++; + let entry = this.#profile.findEntry(pc); + print( + type + ' (' + old_state + '->' + new_state + modifier + ') at ' + + this.formatName(entry) + ':' + line + ':' + column + ' ' + name + + ' (map 0x' + map.toString(16) + ')' + + (slow_reason ? ' ' + slow_reason : '') + 'time: ' + time); + } +} + +// ================ + +IcProcessor.kProperties = [ + 'type', + 'category', + 'functionName', + 'filePosition', + 'state', + 'key', + 'map', + 'reason', + 'file' +]; + +class CustomIcProcessor extends IcProcessor { + #timeline = new Timeline(); + + functionName(pc) { + let entry = this.profile.findEntry(pc); + return this.formatName(entry); + } + + processPropertyIC( + type, pc, time, line, column, old_state, new_state, map, key, modifier, + slow_reason) { + let fnName = this.functionName(pc); + let entry = new IcLogEvent( + type, fnName, time, line, column, key, old_state, new_state, map, + slow_reason); + this.#timeline.push(entry); + } + + + get timeline(){ + return this.#timeline; + } + + processString(string) { + super.processString(string); + return this.timeline; + } +}; + +class IcLogEvent extends Event { + constructor( + type, fn_file, time, line, column, key, oldState, newState, map, reason, + additional) { + super(type, time); + this.category = 'other'; + if (this.type.indexOf('Store') !== -1) { + this.category = 'Store'; + } else if (this.type.indexOf('Load') !== -1) { + this.category = 'Load'; + } + let parts = fn_file.split(' '); + this.functionName = parts[0]; + this.file = parts[1]; + let position = line + ':' + column; + this.filePosition = this.file + ':' + position; + this.oldState = oldState; + this.newState = newState; + this.state = this.oldState + ' → ' + this.newState; + this.key = key; + this.map = map.toString(16); + this.reason = reason; + this.additional = additional; + } + + + parseMapProperties(parts, offset) { + let next = parts[++offset]; + if (!next.startsWith('dict')) return offset; + this.propertiesMode = next.substr(5) == '0' ? 'fast' : 'slow'; + this.numberOfOwnProperties = parts[++offset].substr(4); + next = parts[++offset]; + this.instanceType = next.substr(5, next.length - 6); + return offset; + } + + parsePositionAndFile(parts, start) { + // find the position of 'at' in the parts array. + let offset = start; + for (let i = start + 1; i < parts.length; i++) { + offset++; + if (parts[i] == 'at') break; + } + if (parts[offset] !== 'at') return -1; + this.position = parts.slice(start, offset).join(' '); + offset += 1; + this.isNative = parts[offset] == 'native' + offset += this.isNative ? 1 : 0; + this.file = parts[offset]; + return offset; + } +} + +export { CustomIcProcessor as default, IcLogEvent}; diff --git a/deps/v8/tools/system-analyzer/index.css b/deps/v8/tools/system-analyzer/index.css new file mode 100644 index 00000000000000..3eba296bacf713 --- /dev/null +++ b/deps/v8/tools/system-analyzer/index.css @@ -0,0 +1,139 @@ +:root { + --background-color: #000000; + --surface-color: #121212; + --primary-color: #bb86fc; + --secondary-color: #03dac6; + --on-surface-color: #ffffff; + --on-background-color: #f5f0f0; + --on-primary-color: #000000; + --on-secondary-color: #000000; + --default-color: #9b6edc; + --error-color: #cf6679; + --map-background-color: #5e5454; + --timeline-background-color: #1f1f1f; + --red: #dc6eae; + --green: #aedc6e; + --yellow: #eeff41; + --blue: #6e77dc; + --orange: #dc9b6e; + --violet: #d26edc; +} + +[data-theme="light"] { + --background-color: #ffffff; + --surface-color: #ffffff; + --primary-color: #6200ee; + --secondary-color: #03dac5; + --on-surface-color: #000000; + --on-background-color: #000000; + --on-primary-color: #ffffff; + --on-secondary-color: #000000; + --default-color: #3700b3; + --error-color: #b00020; + --map-background-color: #5e5454; + --timeline-background-color: #fdfcfc; + --red: #b71c1c; + --green: #7db300; + --yellow: #ffff00; + --blue: #0024b3; + --orange: #ef6c00; + --violet: #8f00b3; +} + +body { + font-family: "Roboto", sans-serif; + font-size: 14px; + color: var(--on-background-color); + margin-left: 2.5%; + margin-right: 2.5%; + background-color: var(--background-color); + letter-spacing: 0.5px; +} +h2, +h4 { + box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2); + transition: 0.3s; + color: var(--on-surface-color); + padding: 10px 20px; + text-align: center; + text-decoration: none; + display: inline-block; +} +dl { + display: grid; + grid-template-columns: min-content auto; + grid-gap: 10px; +} +dt { + text-align: right; + white-space: nowrap; +} +dd { + margin: 0; +} +.panel { + box-shadow: 0 4px 8px 0 rgba(0, 0, 0, 0.2); + transition: 0.3s; + background-color: var(--surface-color); + color: var(--on-surface-color); + padding: 10px 10px 10px 10px; + margin: auto; + overflow-x: scroll; +} +button { + cursor: pointer; +} +input, +select, +button { + background-color: var(--surface-color); + color: var(--on-surface-color); +} +.colorbox { + width: 10px; + height: 10px; + border: 1px var(--background-color) solid; +} + +.primary { + background-color: var(--default-color); +} + +.red { + background-color: var(--red); +} + +.green { + background-color: var(--green); +} + +.yellow { + background-color: var(--yellow); + color: var(--map-background-color); +} + +.blue { + background-color: var(--blue); +} + +.orange { + background-color: var(--orange); +} + +.violet { + background-color: var(--violet); + color: var(--map-background-color); +} + +.success { + background-color: var(--secondary-color); +} + +.failure { + background-color: var(--error-color); +} +a:link { + color: var(--secondary-color); + background-color: transparent; + text-decoration: none; +} diff --git a/deps/v8/tools/system-analyzer/index.html b/deps/v8/tools/system-analyzer/index.html new file mode 100644 index 00000000000000..56747406062ae5 --- /dev/null +++ b/deps/v8/tools/system-analyzer/index.html @@ -0,0 +1,186 @@ + + + + + + + + Indicium + + + + + + + + + + + + + + + + + + + + +
+
+

+ +

+
+
+ + + + + + +
+
+
+ +
+
+

Instructions

+

+ Unified web interface for analyzing the trace information of the Maps/ICs +

+
    +
  • Visualize Map trees that have gathered
  • +
  • /path/to/d8 --trace-maps $FILE
  • +
  • Visualize IC events that have gathered
  • +
  • /path/to/d8 --trace_ic $FILE (your_script.js)
  • +
+

Keyboard Shortcuts

+
+
SHIFT + Arrow Up
+
Follow Map transition forward (first child)
+ +
SHIFT + Arrow Down
+
Follow Map transition backwards
+ +
Arrow Up
+
Go to previous Map chunk
+ +
Arrow Down
+
Go to next Map in chunk
+ +
Arrow Left
+
Go to previous chunk
+ +
Arrow Right
+
Go to next chunk
+ +
+
+
Timeline zoom in
+ +
-
+
Timeline zoom out
+
+
+ + + diff --git a/deps/v8/tools/system-analyzer/index.mjs b/deps/v8/tools/system-analyzer/index.mjs new file mode 100644 index 00000000000000..5913d4ddc8889c --- /dev/null +++ b/deps/v8/tools/system-analyzer/index.mjs @@ -0,0 +1,289 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import CustomIcProcessor from "./ic-processor.mjs"; +import { SelectionEvent, FocusEvent } from "./events.mjs"; +import { IcLogEvent } from "./ic-processor.mjs"; +import { State } from "./app-model.mjs"; +import { MapProcessor, MapLogEvent } from "./map-processor.mjs"; +import { SelectTimeEvent } from "./events.mjs"; +import { $ } from "./helper.mjs"; +import "./ic-panel.mjs"; +import "./timeline-panel.mjs"; +import "./map-panel.mjs"; +import "./log-file-reader.mjs"; +class App { + #state; + #view; + #navigation; + constructor(fileReaderId, mapPanelId, timelinePanelId, + icPanelId, mapTrackId, icTrackId) { + this.#view = { + logFileReader: $(fileReaderId), + icPanel: $(icPanelId), + mapPanel: $(mapPanelId), + timelinePanel: $(timelinePanelId), + mapTrack: $(mapTrackId), + icTrack: $(icTrackId), + }; + this.#state = new State(); + this.#navigation = new Navigation(this.#state, this.#view); + document.addEventListener('keydown', + e => this.#navigation.handleKeyDown(e)); + this.toggleSwitch = $('.theme-switch input[type="checkbox"]'); + this.toggleSwitch.addEventListener("change", (e) => this.switchTheme(e)); + this.#view.logFileReader.addEventListener("fileuploadstart", (e) => + this.handleFileUpload(e) + ); + this.#view.logFileReader.addEventListener("fileuploadend", (e) => + this.handleDataUpload(e) + ); + Object.entries(this.#view).forEach(([_, panel]) => { + panel.addEventListener(SelectionEvent.name, + e => this.handleShowEntries(e)); + panel.addEventListener(FocusEvent.name, + e => this.handleShowEntryDetail(e)); + panel.addEventListener(SelectTimeEvent.name, + e => this.handleTimeRangeSelect(e)); + }); + } + handleShowEntries(e) { + if (e.entries[0] instanceof MapLogEvent) { + this.showMapEntries(e.entries); + } else if (e.entries[0] instanceof IcLogEvent) { + this.showIcEntries(e.entries); + } else { + console.error("Undefined selection!"); + } + } + showMapEntries(entries) { + this.#state.selectedMapLogEvents = entries; + this.#view.mapPanel.selectedMapLogEvents = this.#state.selectedMapLogEvents; + } + showIcEntries(entries) { + this.#state.selectedIcLogEvents = entries; + //TODO(zcankara) use selectedLogEvents + this.#view.icPanel.filteredEntries = this.#state.selectedIcLogEvents; + } + + handleTimeRangeSelect(e) { + this.selectTimeRange(e.start, e.end); + } + handleShowEntryDetail(e) { + if (e.entry instanceof MapLogEvent) { + this.selectMapLogEvent(e.entry); + } else if (e.entry instanceof IcLogEvent) { + this.selectICLogEvent(e.entry); + } else if (typeof e.entry === 'string') { + this.selectSourcePositionEvent(e.entry); + } else { + console.log("undefined"); + } + } + handleClickSourcePositions(e) { + //TODO(zcankara) Handle source position + console.log("Entry containing source position: ", e.entries); + } + selectTimeRange(start, end) { + this.#state.timeSelection.start = start; + this.#state.timeSelection.end = end; + this.#state.icTimeline.selectTimeRange(start, end); + this.#state.mapTimeline.selectTimeRange(start, end); + this.#view.mapPanel.selectedMapLogEvents = + this.#state.mapTimeline.selection; + this.#view.icPanel.filteredEntries = this.#state.icTimeline.selection; + } + selectMapLogEvent(entry) { + this.#state.map = entry; + this.#view.mapTrack.selectedEntry = entry; + this.#view.mapPanel.map = entry; + } + selectICLogEvent(entry) { + this.#state.ic = entry; + this.#view.icPanel.filteredEntries = [entry]; + } + selectSourcePositionEvent(sourcePositions) { + console.log("source positions: ", sourcePositions); + } + handleFileUpload(e) { + $("#container").className = "initial"; + } + // Map event log processing + handleLoadTextMapProcessor(text) { + let mapProcessor = new MapProcessor(); + return mapProcessor.processString(text); + } + // IC event file reading and log processing + loadICLogFile(fileData) { + let reader = new FileReader(); + reader.onload = (evt) => { + let icProcessor = new CustomIcProcessor(); + //TODO(zcankara) Assign timeline directly to the ic panel + //TODO(zcankara) Exe: this.#icPanel.timeline = document.state.icTimeline + //TODO(zcankara) Set the data of the State object first + this.#state.icTimeline = icProcessor.processString(fileData.chunk); + this.#view.icTrack.data = this.#state.icTimeline; + this.#view.icPanel.filteredEntries = this.#view.icTrack.data.all; + this.#view.icPanel.count.innerHTML = this.#view.icTrack.data.all.length; + }; + reader.readAsText(fileData.file); + this.#view.icPanel.initGroupKeySelect(); + } + + // call when a new file uploaded + handleDataUpload(e) { + if (!e.detail) return; + $("#container").className = "loaded"; + // instantiate the app logic + let fileData = e.detail; + try { + const timeline = this.handleLoadTextMapProcessor(fileData.chunk); + // Transitions must be set before timeline for stats panel. + this.#state.mapTimeline = timeline; + this.#view.mapPanel.transitions = this.#state.mapTimeline.transitions; + this.#view.mapTrack.data = this.#state.mapTimeline; + this.#state.chunks = this.#view.mapTrack.chunks; + this.#view.mapPanel.timeline = this.#state.mapTimeline; + } catch (error) { + console.log(error); + } + this.loadICLogFile(fileData); + this.fileLoaded = true; + } + + refreshTimelineTrackView() { + this.#view.mapTrack.data = this.#state.mapTimeline; + this.#view.icTrack.data = this.#state.icTimeline; + } + + switchTheme(event) { + document.documentElement.dataset.theme = event.target.checked + ? "light" + : "dark"; + if (this.fileLoaded) { + this.refreshTimelineTrackView(); + } + } +} + +class Navigation { + #view; + constructor(state, view) { + this.state = state; + this.#view = view; + } + get map() { + return this.state.map + } + set map(value) { + this.state.map = value + } + get chunks() { + return this.state.chunks + } + increaseTimelineResolution() { + this.#view.timelinePanel.nofChunks *= 1.5; + this.state.nofChunks *= 1.5; + } + decreaseTimelineResolution() { + this.#view.timelinePanel.nofChunks /= 1.5; + this.state.nofChunks /= 1.5; + } + selectNextEdge() { + if (!this.map) return; + if (this.map.children.length != 1) return; + this.map = this.map.children[0].to; + this.#view.mapTrack.selectedEntry = this.map; + this.updateUrl(); + this.#view.mapPanel.map = this.map; + } + selectPrevEdge() { + if (!this.map) return; + if (!this.map.parent()) return; + this.map = this.map.parent(); + this.#view.mapTrack.selectedEntry = this.map; + this.updateUrl(); + this.#view.mapPanel.map = this.map; + } + selectDefaultMap() { + this.map = this.chunks[0].at(0); + this.#view.mapTrack.selectedEntry = this.map; + this.updateUrl(); + this.#view.mapPanel.map = this.map; + } + moveInChunks(next) { + if (!this.map) return this.selectDefaultMap(); + let chunkIndex = this.map.chunkIndex(this.chunks); + let chunk = this.chunks[chunkIndex]; + let index = chunk.indexOf(this.map); + if (next) { + chunk = chunk.next(this.chunks); + } else { + chunk = chunk.prev(this.chunks); + } + if (!chunk) return; + index = Math.min(index, chunk.size() - 1); + this.map = chunk.at(index); + this.#view.mapTrack.selectedEntry = this.map; + this.updateUrl(); + this.#view.mapPanel.map = this.map; + } + moveInChunk(delta) { + if (!this.map) return this.selectDefaultMap(); + let chunkIndex = this.map.chunkIndex(this.chunks) + let chunk = this.chunks[chunkIndex]; + let index = chunk.indexOf(this.map) + delta; + let map; + if (index < 0) { + map = chunk.prev(this.chunks).last(); + } else if (index >= chunk.size()) { + map = chunk.next(this.chunks).first() + } else { + map = chunk.at(index); + } + this.map = map; + this.#view.mapTrack.selectedEntry = this.map; + this.updateUrl(); + this.#view.mapPanel.map = this.map; + } + updateUrl() { + let entries = this.state.entries; + let params = new URLSearchParams(entries); + window.history.pushState(entries, '', '?' + params.toString()); + } + handleKeyDown(event) { + switch (event.key) { + case "ArrowUp": + event.preventDefault(); + if (event.shiftKey) { + this.selectPrevEdge(); + } else { + this.moveInChunk(-1); + } + return false; + case "ArrowDown": + event.preventDefault(); + if (event.shiftKey) { + this.selectNextEdge(); + } else { + this.moveInChunk(1); + } + return false; + case "ArrowLeft": + this.moveInChunks(false); + break; + case "ArrowRight": + this.moveInChunks(true); + break; + case "+": + this.increaseTimelineResolution(); + break; + case "-": + this.decreaseTimelineResolution(); + break; + } + } +} + +export { App }; diff --git a/deps/v8/tools/system-analyzer/log-file-reader-template.html b/deps/v8/tools/system-analyzer/log-file-reader-template.html new file mode 100644 index 00000000000000..d4796d5994ee8e --- /dev/null +++ b/deps/v8/tools/system-analyzer/log-file-reader-template.html @@ -0,0 +1,86 @@ + + + + +
+
+ + Drag and drop a v8.log file into this area, or click to choose from disk. + + +
+
+
+
+
diff --git a/deps/v8/tools/system-analyzer/log-file-reader.mjs b/deps/v8/tools/system-analyzer/log-file-reader.mjs new file mode 100644 index 00000000000000..85fce9adb217e4 --- /dev/null +++ b/deps/v8/tools/system-analyzer/log-file-reader.mjs @@ -0,0 +1,77 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +import {defineCustomElement, V8CustomElement} from './helper.mjs'; + +defineCustomElement('log-file-reader', (templateText) => + class LogFileReader extends V8CustomElement { + constructor() { + super(templateText); + this.addEventListener('click', e => this.handleClick(e)); + this.addEventListener('dragover', e => this.handleDragOver(e)); + this.addEventListener('drop', e => this.handleChange(e)); + this.$('#file').addEventListener('change', e => this.handleChange(e)); + this.$('#fileReader').addEventListener('keydown', + e => this.handleKeyEvent(e)); + } + + get section() { + return this.$('#fileReaderSection'); + } + + updateLabel(text) { + this.$('#label').innerText = text; + } + + handleKeyEvent(event) { + if (event.key == "Enter") this.handleClick(event); + } + + handleClick(event) { + this.$('#file').click(); + } + + handleChange(event) { + // Used for drop and file change. + event.preventDefault(); + this.dispatchEvent(new CustomEvent( + 'fileuploadstart', {bubbles: true, composed: true})); + var host = event.dataTransfer ? event.dataTransfer : event.target; + this.readFile(host.files[0]); + } + + handleDragOver(event) { + event.preventDefault(); + } + + connectedCallback() { + this.$('#fileReader').focus(); + } + + readFile(file) { + if (!file) { + this.updateLabel('Failed to load file.'); + return; + } + this.$('#fileReader').blur(); + this.section.className = 'loading'; + const reader = new FileReader(); + reader.onload = (e) => { + try { + let dataModel = Object.create(null); + dataModel.file = file; + dataModel.chunk = e.target.result; + this.updateLabel('Finished loading \'' + file.name + '\'.'); + this.dispatchEvent(new CustomEvent( + 'fileuploadend', {bubbles: true, composed: true, detail: dataModel})); + this.section.className = 'success'; + this.$('#fileReader').classList.add('done'); + } catch (err) { + console.error(err); + this.section.className = 'failure'; + } + }; + // Delay the loading a bit to allow for CSS animations to happen. + setTimeout(() => reader.readAsText(file), 0); + } +}); diff --git a/deps/v8/tools/system-analyzer/map-panel-template.html b/deps/v8/tools/system-analyzer/map-panel-template.html new file mode 100644 index 00000000000000..94cae0e050c475 --- /dev/null +++ b/deps/v8/tools/system-analyzer/map-panel-template.html @@ -0,0 +1,21 @@ + + + +
+

Map Panel

+ +

Search Map by Address

+ + + + + +
diff --git a/deps/v8/tools/system-analyzer/map-panel.mjs b/deps/v8/tools/system-analyzer/map-panel.mjs new file mode 100644 index 00000000000000..c1e82555dea62f --- /dev/null +++ b/deps/v8/tools/system-analyzer/map-panel.mjs @@ -0,0 +1,93 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +import "./stats-panel.mjs"; +import "./map-panel/map-details.mjs"; +import "./map-panel/map-transitions.mjs"; +import { FocusEvent } from './events.mjs'; +import { MapLogEvent } from "./map-processor.mjs"; +import { defineCustomElement, V8CustomElement } from './helper.mjs'; + +defineCustomElement('map-panel', (templateText) => + class MapPanel extends V8CustomElement { + #map; + constructor() { + super(templateText); + this.searchBarBtn.addEventListener( + 'click', e => this.handleSearchBar(e)); + this.addEventListener( + FocusEvent.name, e => this.handleUpdateMapDetails(e)); + } + + handleUpdateMapDetails(e) { + if (e.entry instanceof MapLogEvent) { + this.mapDetailsPanel.mapDetails = e.entry; + } + } + + get statsPanel() { + return this.$('#stats-panel'); + } + + get mapTransitionsPanel() { + return this.$('#map-transitions'); + } + + get mapDetailsPanel() { + return this.$('#map-details'); + } + + get searchBarBtn() { + return this.$('#searchBarBtn'); + } + + get searchBar() { + return this.$('#searchBar'); + } + + get mapDetails() { + return this.mapDetailsPanel.mapDetails; + } + + // send a timeline to the stats-panel + get timeline() { + return this.statsPanel.timeline; + } + set timeline(value) { + console.assert(value !== undefined, "timeline undefined!"); + this.statsPanel.timeline = value; + this.statsPanel.update(); + } + get transitions() { + return this.statsPanel.transitions; + } + set transitions(value) { + this.statsPanel.transitions = value; + } + + set map(value) { + this.#map = value; + this.mapTransitionsPanel.map = this.#map; + } + + handleSearchBar(e) { + let searchBar = this.$('#searchBarInput'); + let searchBarInput = searchBar.value; + //access the map from model cache + let selectedMap = MapLogEvent.get(searchBarInput); + if (selectedMap) { + searchBar.className = "success"; + } else { + searchBar.className = "failure"; + } + this.dispatchEvent(new FocusEvent(selectedMap)); + } + + set selectedMapLogEvents(list) { + this.mapTransitionsPanel.selectedMapLogEvents = list; + } + get selectedMapLogEvents() { + return this.mapTransitionsPanel.selectedMapLogEvents; + } + + }); diff --git a/deps/v8/tools/system-analyzer/map-panel/map-details-template.html b/deps/v8/tools/system-analyzer/map-panel/map-details-template.html new file mode 100644 index 00000000000000..b4b1541fd84d56 --- /dev/null +++ b/deps/v8/tools/system-analyzer/map-panel/map-details-template.html @@ -0,0 +1,21 @@ + + + + + + +
+

Map Details

+
+
diff --git a/deps/v8/tools/system-analyzer/map-panel/map-details.mjs b/deps/v8/tools/system-analyzer/map-panel/map-details.mjs new file mode 100644 index 00000000000000..dddccf7d8bc0bb --- /dev/null +++ b/deps/v8/tools/system-analyzer/map-panel/map-details.mjs @@ -0,0 +1,41 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +import { V8CustomElement, defineCustomElement } from "../helper.mjs"; +import { FocusEvent } from "../events.mjs"; + +defineCustomElement( + "./map-panel/map-details", + (templateText) => + class MapDetails extends V8CustomElement { + constructor() { + super(templateText); + this.mapDetails.addEventListener("click", () => + this.handleClickSourcePositions() + ); + this.selectedMap = undefined; + } + get mapDetails() { + return this.$("#mapDetails"); + } + + setSelectedMap(value) { + this.selectedMap = value; + } + + set mapDetails(map) { + let details = ""; + if (map) { + details += "ID: " + map.id; + details += "\nSource location: " + map.filePosition; + details += "\n" + map.description; + this.setSelectedMap(map); + } + this.mapDetails.innerText = details; + } + + handleClickSourcePositions() { + this.dispatchEvent(new FocusEvent(this.selectedMap.filePosition)); + } + } +); diff --git a/deps/v8/tools/system-analyzer/map-panel/map-transitions-template.html b/deps/v8/tools/system-analyzer/map-panel/map-transitions-template.html new file mode 100644 index 00000000000000..99fb251b19511c --- /dev/null +++ b/deps/v8/tools/system-analyzer/map-panel/map-transitions-template.html @@ -0,0 +1,148 @@ + + + + + + +
+
+

Transitions

+
+
+
+
+
+
diff --git a/deps/v8/tools/system-analyzer/map-panel/map-transitions.mjs b/deps/v8/tools/system-analyzer/map-panel/map-transitions.mjs new file mode 100644 index 00000000000000..578e2bb358f470 --- /dev/null +++ b/deps/v8/tools/system-analyzer/map-panel/map-transitions.mjs @@ -0,0 +1,192 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +import { V8CustomElement, defineCustomElement } from "../helper.mjs"; +import { FocusEvent } from "../events.mjs"; + +defineCustomElement( + "./map-panel/map-transitions", + (templateText) => + class MapTransitions extends V8CustomElement { + #map; + #selectedMapLogEvents; + constructor() { + super(templateText); + this.transitionView.addEventListener("mousemove", (e) => + this.handleTransitionViewChange(e) + ); + this.currentNode = this.transitionView; + this.currentMap = undefined; + } + + get transitionView() { + return this.$("#transitionView"); + } + + get tooltip() { + return this.$("#tooltip"); + } + + get tooltipContents() { + return this.$("#tooltipContents"); + } + + set map(value) { + this.#map = value; + this.showMap(); + } + + handleTransitionViewChange(e) { + this.tooltip.style.left = e.pageX + "px"; + this.tooltip.style.top = e.pageY + "px"; + let map = e.target.map; + if (map) { + this.tooltipContents.innerText = map.description; + } + } + + selectMap(map) { + this.currentMap = map; + this.dispatchEvent(new FocusEvent(map)); + } + + dblClickSelectMap(map) { + this.dispatchEvent(new FocusEvent(map)); + } + + showMap() { + // Called when a map selected + if (this.currentMap === this.#map) return; + this.currentMap = this.#map; + this.selectedMapLogEvents = [this.#map]; + this.dispatchEvent(new FocusEvent(this.#map)); + } + + showMaps() { + // Timeline dbl click to show map transitions of selected maps + this.transitionView.style.display = "none"; + this.removeAllChildren(this.transitionView); + this.selectedMapLogEvents.forEach((map) => this.addMapAndParentTransitions(map)); + this.transitionView.style.display = ""; + } + + set selectedMapLogEvents(list) { + this.#selectedMapLogEvents = list; + this.showMaps(); + } + + get selectedMapLogEvents() { + return this.#selectedMapLogEvents; + } + + addMapAndParentTransitions(map) { + if (map === void 0) return; + this.currentNode = this.transitionView; + let parents = map.getParents(); + if (parents.length > 0) { + this.addTransitionTo(parents.pop()); + parents.reverse().forEach((each) => this.addTransitionTo(each)); + } + let mapNode = this.addSubtransitions(map); + // Mark and show the selected map. + mapNode.classList.add("selected"); + if (this.selectedMap == map) { + setTimeout( + () => + mapNode.scrollIntoView({ + behavior: "smooth", + block: "nearest", + inline: "nearest", + }), + 1 + ); + } + } + + addMapNode(map) { + let node = this.div("map"); + if (map.edge) node.style.backgroundColor = map.edge.getColor(); + node.map = map; + node.addEventListener("click", () => this.selectMap(map)); + node.addEventListener("dblclick", () => this.dblClickSelectMap(map)); + if (map.children.length > 1) { + node.innerText = map.children.length; + let showSubtree = this.div("showSubtransitions"); + showSubtree.addEventListener("click", (e) => + this.toggleSubtree(e, node) + ); + node.appendChild(showSubtree); + } else if (map.children.length == 0) { + node.innerHTML = "●"; + } + this.currentNode.appendChild(node); + return node; + } + + addSubtransitions(map) { + let mapNode = this.addTransitionTo(map); + // Draw outgoing linear transition line. + let current = map; + while (current.children.length == 1) { + current = current.children[0].to; + this.addTransitionTo(current); + } + return mapNode; + } + + addTransitionEdge(map) { + let classes = ["transitionEdge"]; + let edge = this.div(classes); + edge.style.backgroundColor = map.edge.getColor(); + let labelNode = this.div("transitionLabel"); + labelNode.innerText = map.edge.toString(); + edge.appendChild(labelNode); + return edge; + } + + addTransitionTo(map) { + // transition[ transitions[ transition[...], transition[...], ...]]; + + let transition = this.div("transition"); + if (map.isDeprecated()) transition.classList.add("deprecated"); + if (map.edge) { + transition.appendChild(this.addTransitionEdge(map)); + } + let mapNode = this.addMapNode(map); + transition.appendChild(mapNode); + + let subtree = this.div("transitions"); + transition.appendChild(subtree); + + this.currentNode.appendChild(transition); + this.currentNode = subtree; + + return mapNode; + } + + toggleSubtree(event, node) { + let map = node.map; + event.target.classList.toggle("opened"); + let transitionsNode = node.parentElement.querySelector(".transitions"); + let subtransitionNodes = transitionsNode.children; + if (subtransitionNodes.length <= 1) { + // Add subtransitions excepth the one that's already shown. + let visibleTransitionMap = + subtransitionNodes.length == 1 + ? transitionsNode.querySelector(".map").map + : void 0; + map.children.forEach((edge) => { + if (edge.to != visibleTransitionMap) { + this.currentNode = transitionsNode; + this.addSubtransitions(edge.to); + } + }); + } else { + // remove all but the first (currently selected) subtransition + for (let i = subtransitionNodes.length - 1; i > 0; i--) { + transitionsNode.removeChild(subtransitionNodes[i]); + } + } + } + } +); diff --git a/deps/v8/tools/system-analyzer/map-processor.mjs b/deps/v8/tools/system-analyzer/map-processor.mjs new file mode 100644 index 00000000000000..a1f056b4747e0c --- /dev/null +++ b/deps/v8/tools/system-analyzer/map-processor.mjs @@ -0,0 +1,538 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import {transitionTypeToColor} from './helper.mjs'; +import {Timeline} from './timeline.mjs'; + +// =========================================================================== +import {Event} from './event.mjs'; +const kChunkHeight = 250; +const kChunkWidth = 10; + +function define(prototype, name, fn) { + Object.defineProperty(prototype, name, {value: fn, enumerable: false}); +} + +define(Array.prototype, 'max', function(fn) { + if (this.length === 0) return undefined; + if (fn === undefined) fn = (each) => each; + let max = fn(this[0]); + for (let i = 1; i < this.length; i++) { + max = Math.max(max, fn(this[i])); + } + return max; +}) +define(Array.prototype, 'first', function() { + return this[0] +}); +define(Array.prototype, 'last', function() { + return this[this.length - 1] +}); +// =========================================================================== + +class MapProcessor extends LogReader { + #profile = new Profile(); + #timeline = new Timeline(); + #formatPCRegexp = /(.*):[0-9]+:[0-9]+$/; + MAJOR_VERSION = 7; + MINOR_VERSION = 6; + constructor() { + super(); + this.dispatchTable_ = { + __proto__: null, + 'code-creation': { + parsers: [ + parseString, parseInt, parseInt, parseInt, parseInt, parseString, + parseVarArgs + ], + processor: this.processCodeCreation + }, + 'v8-version': { + parsers: [ + parseInt, parseInt, + ], + processor: this.processV8Version + }, + 'code-move': { + parsers: [parseInt, parseInt], + 'sfi-move': + {parsers: [parseInt, parseInt], processor: this.processCodeMove}, + 'code-delete': {parsers: [parseInt], processor: this.processCodeDelete}, + processor: this.processFunctionMove + }, + 'map-create': + {parsers: [parseInt, parseString], processor: this.processMapCreate}, + 'map': { + parsers: [ + parseString, parseInt, parseString, parseString, parseInt, parseInt, + parseString, parseString, parseString + ], + processor: this.processMap + }, + 'map-details': { + parsers: [parseInt, parseString, parseString], + processor: this.processMapDetails + } + }; + } + + printError(str) { + console.error(str); + throw str + } + + processString(string) { + let end = string.length; + let current = 0; + let next = 0; + let line; + let i = 0; + let entry; + try { + while (current < end) { + next = string.indexOf('\n', current); + if (next === -1) break; + i++; + line = string.substring(current, next); + current = next + 1; + this.processLogLine(line); + } + } catch (e) { + console.error('Error occurred during parsing, trying to continue: ' + e); + } + return this.finalize(); + } + + processLogFile(fileName) { + this.collectEntries = true; + this.lastLogFileName_ = fileName; + let i = 1; + let line; + try { + while (line = readline()) { + this.processLogLine(line); + i++; + } + } catch (e) { + console.error( + 'Error occurred during parsing line ' + i + + ', trying to continue: ' + e); + } + return this.finalize(); + } + + finalize() { + // TODO(cbruni): print stats; + this.#timeline.transitions = new Map(); + let id = 0; + this.#timeline.forEach(map => { + if (map.isRoot()) id = map.finalizeRootMap(id + 1); + if (map.edge && map.edge.name) { + let edge = map.edge; + let list = this.#timeline.transitions.get(edge.name); + if (list === undefined) { + this.#timeline.transitions.set(edge.name, [edge]); + } else { + list.push(edge); + } + } + }); + return this.#timeline; + } + + addEntry(entry) { + this.entries.push(entry); + } + + /** + * Parser for dynamic code optimization state. + */ + parseState(s) { + switch (s) { + case '': + return Profile.CodeState.COMPILED; + case '~': + return Profile.CodeState.OPTIMIZABLE; + case '*': + return Profile.CodeState.OPTIMIZED; + } + throw new Error('unknown code state: ' + s); + } + + processCodeCreation(type, kind, timestamp, start, size, name, maybe_func) { + if (maybe_func.length) { + let funcAddr = parseInt(maybe_func[0]); + let state = this.parseState(maybe_func[1]); + this.#profile.addFuncCode( + type, name, timestamp, start, size, funcAddr, state); + } else { + this.#profile.addCode(type, name, timestamp, start, size); + } + } + + processV8Version(majorVersion, minorVersion){ + if( + (majorVersion == this.MAJOR_VERSION && minorVersion <= this.MINOR_VERSION) + || (majorVersion < this.MAJOR_VERSION)){ + window.alert( + `Unsupported version ${majorVersion}.${minorVersion}. \n` + + `Please use the matching tool for given the V8 version.`); + } + } + + processCodeMove(from, to) { + this.#profile.moveCode(from, to); + } + + processCodeDelete(start) { + this.#profile.deleteCode(start); + } + + processFunctionMove(from, to) { + this.#profile.moveFunc(from, to); + } + + formatPC(pc, line, column) { + let entry = this.#profile.findEntry(pc); + if (!entry) return '' + if (entry.type === 'Builtin') { + return entry.name; + } + let name = entry.func.getName(); + let array = this.#formatPCRegexp.exec(name); + if (array === null) { + entry = name; + } else { + entry = entry.getState() + array[1]; + } + return entry + ':' + line + ':' + column; + } + + processMap(type, time, from, to, pc, line, column, reason, name) { + let time_ = parseInt(time); + if (type === 'Deprecate') return this.deprecateMap(type, time_, from); + let from_ = this.getExistingMap(from, time_); + let to_ = this.getExistingMap(to, time_); + let edge = new Edge(type, name, reason, time, from_, to_); + to_.filePosition = this.formatPC(pc, line, column); + edge.finishSetup(); + } + + deprecateMap(type, time, id) { + this.getExistingMap(id, time).deprecate(); + } + + processMapCreate(time, id) { + // map-create events might override existing maps if the addresses get + // recycled. Hence we do not check for existing maps. + let map = this.createMap(id, time); + } + + processMapDetails(time, id, string) { + // TODO(cbruni): fix initial map logging. + let map = this.getExistingMap(id, time); + map.description = string; + } + + createMap(id, time) { + let map = new MapLogEvent(id, time); + this.#timeline.push(map); + return map; + } + + getExistingMap(id, time) { + if (id === '0x000000000000') return undefined; + let map = MapLogEvent.get(id, time); + if (map === undefined) { + console.error('No map details provided: id=' + id); + // Manually patch in a map to continue running. + return this.createMap(id, time); + }; + return map; + } +} + +// =========================================================================== + +class MapLogEvent extends Event { + edge = void 0; + children = []; + depth = 0; + // TODO(zcankara): Change this to private class field. + #isDeprecated = false; + deprecatedTargets = null; + leftId= 0; + rightId = 0; + filePosition = ''; + id = -1; + constructor(id, time) { + if (!time) throw new Error('Invalid time'); + super(id, time); + MapLogEvent.set(id, this); + this.id = id; + } + + finalizeRootMap(id) { + let stack = [this]; + while (stack.length > 0) { + let current = stack.pop(); + if (current.leftId !== 0) { + console.error('Skipping potential parent loop between maps:', current) + continue; + } + current.finalize(id) + id += 1; + current.children.forEach(edge => stack.push(edge.to)) + // TODO implement rightId + } + return id; + } + + finalize(id) { + // Initialize preorder tree traversal Ids for fast subtree inclusion checks + if (id <= 0) throw 'invalid id'; + let currentId = id; + this.leftId = currentId + } + + parent() { + if (this.edge === void 0) return void 0; + return this.edge.from; + } + + isDeprecated() { + return this.#isDeprecated; + } + + deprecate() { + this.#isDeprecated = true; + } + + isRoot() { + return this.edge === void 0 || this.edge.from === void 0; + } + + contains(map) { + return this.leftId < map.leftId && map.rightId < this.rightId; + } + + addEdge(edge) { + this.children.push(edge); + } + + chunkIndex(chunks) { + // Did anybody say O(n)? + for (let i = 0; i < chunks.length; i++) { + let chunk = chunks[i]; + if (chunk.isEmpty()) continue; + if (chunk.last().time < this.time) continue; + return i; + } + return -1; + } + + position(chunks) { + let index = this.chunkIndex(chunks); + let xFrom = (index + 0.5) * kChunkWidth; + let yFrom = kChunkHeight - chunks[index].yOffset(this); + return [xFrom, yFrom]; + } + + transitions() { + let transitions = Object.create(null); + let current = this; + while (current) { + let edge = current.edge; + if (edge && edge.isTransition()) { + transitions[edge.name] = edge; + } + current = current.parent() + } + return transitions; + } + + get type() { + return this.edge === void 0 ? 'new' : this.edge.type; + } + + isBootstrapped() { + return this.edge === void 0; + } + + getParents() { + let parents = []; + let current = this.parent(); + while (current) { + parents.push(current); + current = current.parent(); + } + return parents; + } + + static get(id, time = undefined) { + let maps = this.cache.get(id); + if (maps) { + for (let i = 0; i < maps.length; i++) { + // TODO: Implement time based map search + if (maps[i].time === time) { + return maps[i]; + } + } + // default return the latest + return maps[maps.length - 1]; + } + } + + static set(id, map) { + if (this.cache.has(id)) { + this.cache.get(id).push(map); + } else { + this.cache.set(id, [map]); + } + } +} + +MapLogEvent.cache = new Map(); + +// =========================================================================== +class Edge { + constructor(type, name, reason, time, from, to) { + this.type = type; + this.name = name; + this.reason = reason; + this.time = time; + this.from = from; + this.to = to; + } + + getColor() { + return transitionTypeToColor(this.type); + } + + finishSetup() { + let from = this.from; + if (from) from.addEdge(this); + let to = this.to; + if (to === undefined) return; + to.edge = this; + if (from === undefined) return; + if (to === from) throw 'From and to must be distinct.'; + if (to.time < from.time) { + console.error('invalid time order'); + } + let newDepth = from.depth + 1; + if (to.depth > 0 && to.depth != newDepth) { + console.error('Depth has already been initialized'); + } + to.depth = newDepth; + } + + chunkIndex(chunks) { + // Did anybody say O(n)? + for (let i = 0; i < chunks.length; i++) { + let chunk = chunks[i]; + if (chunk.isEmpty()) continue; + if (chunk.last().time < this.time) continue; + return i; + } + return -1; + } + + parentEdge() { + if (!this.from) return undefined; + return this.from.edge; + } + + chainLength() { + let length = 0; + let prev = this; + while (prev) { + prev = this.parent; + length++; + } + return length; + } + + isTransition() { + return this.type === 'Transition' + } + + isFastToSlow() { + return this.type === 'Normalize' + } + + isSlowToFast() { + return this.type === 'SlowToFast' + } + + isInitial() { + return this.type === 'InitialMap' + } + + isBootstrapped() { + return this.type === 'new' + } + + isReplaceDescriptors() { + return this.type === 'ReplaceDescriptors' + } + + isCopyAsPrototype() { + return this.reason === 'CopyAsPrototype' + } + + isOptimizeAsPrototype() { + return this.reason === 'OptimizeAsPrototype' + } + + symbol() { + if (this.isTransition()) return '+'; + if (this.isFastToSlow()) return '⊡'; + if (this.isSlowToFast()) return '⊛'; + if (this.isReplaceDescriptors()) { + if (this.name) return '+'; + return '∥'; + } + return ''; + } + + toString() { + let s = this.symbol(); + if (this.isTransition()) return s + this.name; + if (this.isFastToSlow()) return s + this.reason; + if (this.isCopyAsPrototype()) return s + 'Copy as Prototype'; + if (this.isOptimizeAsPrototype()) { + return s + 'Optimize as Prototype'; + } + if (this.isReplaceDescriptors() && this.name) { + return this.type + ' ' + this.symbol() + this.name; + } + return this.type + ' ' + (this.reason ? this.reason : '') + ' ' + + (this.name ? this.name : '') + } +} + + +// =========================================================================== +class ArgumentsProcessor extends BaseArgumentsProcessor { + getArgsDispatch() { + return { + '--range': + ['range', 'auto,auto', 'Specify the range limit as [start],[end]'], + '--source-map': [ + 'sourceMap', null, + 'Specify the source map that should be used for output' + ] + }; + } + + getDefaultResults() { + return { + logFileName: 'v8.log', + range: 'auto,auto', + }; + } +} + +export { MapProcessor, MapLogEvent, kChunkWidth, kChunkHeight}; diff --git a/deps/v8/tools/system-analyzer/stats-panel-template.html b/deps/v8/tools/system-analyzer/stats-panel-template.html new file mode 100644 index 00000000000000..04094b5185aa63 --- /dev/null +++ b/deps/v8/tools/system-analyzer/stats-panel-template.html @@ -0,0 +1,54 @@ + + +
+

Stats Panel

+

Stats

+
+
+
diff --git a/deps/v8/tools/system-analyzer/stats-panel.mjs b/deps/v8/tools/system-analyzer/stats-panel.mjs new file mode 100644 index 00000000000000..54eac77262690d --- /dev/null +++ b/deps/v8/tools/system-analyzer/stats-panel.mjs @@ -0,0 +1,136 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +import { V8CustomElement, defineCustomElement } from "./helper.mjs"; +import { SelectionEvent } from "./events.mjs"; + +defineCustomElement( + "stats-panel", + (templateText) => + class StatsPanel extends V8CustomElement { + #timeline; + #transitions; + constructor() { + super(templateText); + } + + get stats() { + return this.$("#stats"); + } + + set timeline(value) { + //TODO(zcankara) Trigger update + this.#timeline = value; + } + + get timeline() { + return this.#timeline; + } + + set transitions(value) { + this.#transitions = value; + } + + get transitions() { + return this.#transitions; + } + + filterUniqueTransitions(filter) { + // Returns a list of Maps whose parent is not in the list. + return this.timeline.filter((map) => { + if (filter(map) === false) return false; + let parent = map.parent(); + if (parent === undefined) return true; + return filter(parent) === false; + }); + } + + update() { + this.removeAllChildren(this.stats); + this.updateGeneralStats(); + this.updateNamedTransitionsStats(); + } + + updateGeneralStats() { + console.assert(this.#timeline !== undefined, "Timeline not set yet!"); + let pairs = [ + ["Total", null, (e) => true], + ["Transitions", "primary", (e) => e.edge && e.edge.isTransition()], + ["Fast to Slow", "violet", (e) => e.edge && e.edge.isFastToSlow()], + ["Slow to Fast", "orange", (e) => e.edge && e.edge.isSlowToFast()], + ["Initial Map", "yellow", (e) => e.edge && e.edge.isInitial()], + [ + "Replace Descriptors", + "red", + (e) => e.edge && e.edge.isReplaceDescriptors(), + ], + [ + "Copy as Prototype", + "red", + (e) => e.edge && e.edge.isCopyAsPrototype(), + ], + [ + "Optimize as Prototype", + null, + (e) => e.edge && e.edge.isOptimizeAsPrototype(), + ], + ["Deprecated", null, (e) => e.isDeprecated()], + ["Bootstrapped", "green", (e) => e.isBootstrapped()], + ]; + + let text = ""; + let tableNode = this.table("transitionType"); + tableNode.innerHTML = + "ColorTypeCountPercent"; + let name, filter; + let total = this.timeline.size(); + pairs.forEach(([name, color, filter]) => { + let row = this.tr(); + if (color !== null) { + row.appendChild(this.td(this.div(["colorbox", color]))); + } else { + row.appendChild(this.td("")); + } + row.onclick = (e) => { + // lazily compute the stats + let node = e.target.parentNode; + if (node.maps == undefined) { + node.maps = this.filterUniqueTransitions(filter); + } + this.dispatchEvent(new SelectionEvent(node.maps)); + }; + row.appendChild(this.td(name)); + let count = this.timeline.count(filter); + row.appendChild(this.td(count)); + let percent = Math.round((count / total) * 1000) / 10; + row.appendChild(this.td(percent.toFixed(1) + "%")); + tableNode.appendChild(row); + }); + this.stats.appendChild(tableNode); + } + + updateNamedTransitionsStats() { + let tableNode = this.table("transitionTable"); + let nameMapPairs = Array.from(this.transitions.entries()); + tableNode.innerHTML = + "Propery Name#"; + nameMapPairs + .sort((a, b) => b[1].length - a[1].length) + .forEach(([name, maps]) => { + let row = this.tr(); + row.maps = maps; + row.addEventListener("click", (e) => + this.dispatchEvent( + new SelectionEvent( + e.target.parentNode.maps.map((map) => map.to) + ) + ) + ); + row.appendChild(this.td(name)); + row.appendChild(this.td(maps.length)); + tableNode.appendChild(row); + }); + this.stats.appendChild(tableNode); + } + } +); diff --git a/deps/v8/tools/system-analyzer/timeline-panel-template.html b/deps/v8/tools/system-analyzer/timeline-panel-template.html new file mode 100644 index 00000000000000..be083c4c9473f0 --- /dev/null +++ b/deps/v8/tools/system-analyzer/timeline-panel-template.html @@ -0,0 +1,52 @@ + + + +
+

Timeline Panel

+

Timeline

+
+ +
+
+
+
+
+
+
+
diff --git a/deps/v8/tools/system-analyzer/timeline-panel.mjs b/deps/v8/tools/system-analyzer/timeline-panel.mjs new file mode 100644 index 00000000000000..084dccca52c4b8 --- /dev/null +++ b/deps/v8/tools/system-analyzer/timeline-panel.mjs @@ -0,0 +1,82 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import {defineCustomElement, V8CustomElement} from './helper.mjs'; +import './timeline/timeline-track.mjs'; + +defineCustomElement('timeline-panel', (templateText) => + class TimelinePanel extends V8CustomElement { + constructor() { + super(templateText); + this.timelineOverview.addEventListener( + 'mousemove', e => this.handleTimelineIndicatorMove(e)); + this.addEventListener( + 'overviewupdate', e => this.handleOverviewBackgroundUpdate(e)); + this.addEventListener( + 'scrolltrack', e => this.handleTrackScroll(e)); + this.backgroundCanvas = document.createElement('canvas'); + this.isLocked = false; + } + + get timelineOverview() { + return this.$('#timelineOverview'); + } + + get timelineOverviewIndicator() { + return this.$('#timelineOverviewIndicator'); + } + + get timelineCanvas() { + return this.timelineTracks[0].timelineCanvas; + } + + get timeline() { + return this.timelineTracks[0].timeline; + } + set nofChunks(count){ + for (const track of this.timelineTracks) { + track.nofChunks = count; + } + } + get nofChunks(){ + return this.timelineTracks[0].nofChunks; + } + get timelineTracks(){ + return this.$("slot").assignedNodes().filter( + track => track.nodeType === Node.ELEMENT_NODE); + } + handleTrackScroll(event){ + //TODO(zcankara) add forEachTrack helper method + for (const track of this.timelineTracks) { + track.scrollLeft = event.detail; + } + } + handleTimelineIndicatorMove(event) { + if (event.buttons == 0) return; + let timelineTotalWidth = this.timelineCanvas.offsetWidth; + let factor = this.timelineOverview.offsetWidth / timelineTotalWidth; + for (const track of this.timelineTracks) { + track.timelineIndicatorMove(event.movementX / factor); + } + } + + updateOverviewWindow() { + let indicator = this.timelineOverviewIndicator; + let totalIndicatorWidth = + this.timelineOverview.offsetWidth; + let div = this.timeline; + let timelineTotalWidth = this.timelineCanvas.offsetWidth; + let factor = totalIndicatorWidth / timelineTotalWidth; + let width = div.offsetWidth * factor; + let left = div.scrollLeft * factor; + indicator.style.width = width + 'px'; + indicator.style.left = left + 'px'; + } + + handleOverviewBackgroundUpdate(e){ + this.timelineOverview.style.backgroundImage = + 'url(' + e.detail + ')'; + } + +}); diff --git a/deps/v8/tools/system-analyzer/timeline.mjs b/deps/v8/tools/system-analyzer/timeline.mjs new file mode 100644 index 00000000000000..e1f3432f81d1bc --- /dev/null +++ b/deps/v8/tools/system-analyzer/timeline.mjs @@ -0,0 +1,238 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +class Timeline { + #values; + #selection; + constructor() { + this.#values = []; + this.startTime = 0; + this.endTime = 0; + } + get all(){ + return this.#values; + } + get selection(){ + return this.#selection; + } + set selection(value){ + this.#selection = value; + } + selectTimeRange(start, end){ + this.#selection = this.filter( + e => e.time >= start && e.time <= end); + } + getChunks(windowSizeMs){ + //TODO(zcankara) Fill this one + return this.chunkSizes(windowSizeMs); + } + get values(){ + //TODO(zcankara) Not to break something delete later + return this.#values; + } + + count(filter) { + return this.all.reduce((sum, each) => { + return sum + (filter(each) === true ? 1 : 0); + }, 0); + } + + filter(predicate) { + return this.all.filter(predicate); + } + + push(event) { + let time = event.time; + if (!this.isEmpty() && this.last().time > time) { + // Invalid insertion order, might happen without --single-process, + // finding insertion point. + let insertionPoint = this.find(time); + this.#values.splice(insertionPoint, event); + } else { + this.#values.push(event); + } + if (time > 0) { + this.endTime = Math.max(this.endTime, time); + if (this.startTime === 0) { + this.startTime = time; + } else { + this.startTime = Math.min(this.startTime, time); + } + } + } + + at(index) { + return this.#values[index]; + } + + isEmpty() { + return this.size() === 0; + } + + size() { + return this.#values.length; + } + + first() { + return this.#values.first(); + } + + last() { + return this.#values.last(); + } + + duration() { + return this.last().time - this.first().time; + } + + forEachChunkSize(count, fn) { + const increment = this.duration() / count; + let currentTime = this.first().time + increment; + let index = 0; + for (let i = 0; i < count; i++) { + let nextIndex = this.find(currentTime, index); + let nextTime = currentTime + increment; + fn(index, nextIndex, currentTime, nextTime); + index = nextIndex; + currentTime = nextTime; + } + } + + chunkSizes(count) { + let chunks = []; + this.forEachChunkSize(count, (start, end) => chunks.push(end - start)); + return chunks; + } + + chunks(count) { + let chunks = []; + this.forEachChunkSize(count, (start, end, startTime, endTime) => { + let items = this.#values.slice(start, end); + chunks.push(new Chunk(chunks.length, startTime, endTime, items)); + }); + return chunks; + } + + range(start, end) { + const first = this.find(start); + if (first < 0) return []; + const last = this.find(end, first); + return this.#values.slice(first, last); + } + + find(time, offset = 0) { + return this.#find(this.#values, each => each.time - time, offset); + } + + #find(array, cmp, offset = 0) { + let min = offset; + let max = array.length; + while (min < max) { + let mid = min + Math.floor((max - min) / 2); + let result = cmp(array[mid]); + if (result > 0) { + max = mid - 1; + } else { + min = mid + 1; + } + } + return min; + } + + depthHistogram() { + return this.#values.histogram(each => each.depth); + } + + fanOutHistogram() { + return this.#values.histogram(each => each.children.length); + } + + forEach(fn) { + return this.#values.forEach(fn); + } +} + +// =========================================================================== +class Chunk { + constructor(index, start, end, items) { + this.index = index; + this.start = start; + this.end = end; + this.items = items; + this.height = 0; + } + + isEmpty() { + return this.items.length === 0; + } + + last() { + return this.at(this.size() - 1); + } + + first() { + return this.at(0); + } + + at(index) { + return this.items[index]; + } + + size() { + return this.items.length; + } + + yOffset(event) { + // items[0] == oldest event, displayed at the top of the chunk + // items[n-1] == youngest event, displayed at the bottom of the chunk + return (1 - (this.indexOf(event) + 0.5) / this.size()) * this.height; + } + + indexOf(event) { + return this.items.indexOf(event); + } + + has(event) { + if (this.isEmpty()) return false; + return this.first().time <= event.time && event.time <= this.last().time; + } + + next(chunks) { + return this.findChunk(chunks, 1); + } + + prev(chunks) { + return this.findChunk(chunks, -1); + } + + findChunk(chunks, delta) { + let i = this.index + delta; + let chunk = chunks[i]; + while (chunk && chunk.size() === 0) { + i += delta; + chunk = chunks[i]; + } + return chunk; + } + + getBreakdown(event_fn){ + if (event_fn === void 0) { + event_fn = each => each; + } + let breakdown = {__proto__: null}; + this.items.forEach(each => { + const type = event_fn(each); + const v = breakdown[type]; + breakdown[type] = (v | 0) + 1; + }); + return Object.entries(breakdown).sort((a, b) => a[1] - b[1]); + } + + filter(){ + return this.items.filter(map => !map.parent() || !this.has(map.parent())); + } + +} + +export {Timeline, Chunk}; diff --git a/deps/v8/tools/system-analyzer/timeline/timeline-track-template.html b/deps/v8/tools/system-analyzer/timeline/timeline-track-template.html new file mode 100644 index 00000000000000..f4211750ef2d5e --- /dev/null +++ b/deps/v8/tools/system-analyzer/timeline/timeline-track-template.html @@ -0,0 +1,95 @@ + + + + + + +
+
+

Category

+
+
+
+
+
Frequency
+
+ +
+
diff --git a/deps/v8/tools/system-analyzer/timeline/timeline-track.mjs b/deps/v8/tools/system-analyzer/timeline/timeline-track.mjs new file mode 100644 index 00000000000000..cfb0175bd670db --- /dev/null +++ b/deps/v8/tools/system-analyzer/timeline/timeline-track.mjs @@ -0,0 +1,437 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import { + defineCustomElement, V8CustomElement, + transitionTypeToColor, CSSColor +} from '../helper.mjs'; +import { kChunkWidth, kChunkHeight } from '../map-processor.mjs'; +import { SelectionEvent, FocusEvent, SelectTimeEvent } from '../events.mjs'; + +defineCustomElement('./timeline/timeline-track', (templateText) => + class TimelineTrack extends V8CustomElement { + #timeline; + #nofChunks = 400; + #chunks; + #selectedEntry; + #timeToPixel; + #timeSelection = { start: 0, end: Infinity }; + constructor() { + super(templateText); + this.timeline.addEventListener("mousedown", + e => this.handleTimeRangeSelectionStart(e)); + this.timeline.addEventListener("mouseup", + e => this.handleTimeRangeSelectionEnd(e)); + this.timeline.addEventListener("scroll", + e => this.handleTimelineScroll(e)); + this.backgroundCanvas = document.createElement('canvas'); + this.isLocked = false; + } + + get timelineCanvas() { + return this.$('#timelineCanvas'); + } + + get timelineChunks() { + return this.$('#timelineChunks'); + } + + get timeline() { + return this.$('#timeline'); + } + + get timelineLegendContent() { + return this.$('#timelineLegendContent'); + } + + set data(value) { + this.#timeline = value; + this.updateChunks(); + this.updateTimeline(); + this.updateStats(); + } + + get data() { + return this.#timeline; + } + + set nofChunks(count) { + this.#nofChunks = count; + this.updateChunks(); + this.updateTimeline(); + } + get nofChunks() { + return this.#nofChunks; + } + updateChunks() { + this.#chunks = this.data.chunks(this.nofChunks); + } + get chunks() { + return this.#chunks; + } + set selectedEntry(value) { + this.#selectedEntry = value; + if (value.edge) this.redraw(); + } + get selectedEntry() { + return this.#selectedEntry; + } + + set scrollLeft(offset) { + this.timeline.scrollLeft = offset; + } + + updateStats() { + let unique = new Map(); + for (const entry of this.data.all) { + if (!unique.has(entry.type)) { + unique.set(entry.type, [entry]); + } else { + unique.get(entry.type).push(entry); + } + } + this.renderStatsWindow(unique); + } + + renderStatsWindow(unique) { + let timelineLegendContent = this.timelineLegendContent; + this.removeAllChildren(timelineLegendContent); + let fragment = document.createDocumentFragment(); + let colorIterator = 0; + unique.forEach((entries, type) => { + let dt = document.createElement("dt"); + dt.innerHTML = entries.length; + dt.style.backgroundColor = transitionTypeToColor(type); + dt.style.color = CSSColor.surfaceColor; + fragment.appendChild(dt); + let dd = document.createElement("dd"); + dd.innerHTML = type; + dd.entries = entries; + dd.addEventListener('dblclick', e => this.handleEntryTypeDblClick(e)); + fragment.appendChild(dd); + colorIterator += 1; + }); + timelineLegendContent.appendChild(fragment); + } + + handleEntryTypeDblClick(e) { + this.dispatchEvent(new SelectionEvent(e.target.entries)); + } + + timelineIndicatorMove(offset) { + this.timeline.scrollLeft += offset; + } + + handleTimeRangeSelectionStart(e) { + this.#timeSelection.start = this.positionToTime(e.clientX); + } + + handleTimeRangeSelectionEnd(e) { + this.#timeSelection.end = this.positionToTime(e.clientX); + this.dispatchEvent(new SelectTimeEvent( + Math.min(this.#timeSelection.start, this.#timeSelection.end), + Math.max(this.#timeSelection.start, this.#timeSelection.end))); + } + + positionToTime(posX) { + let rect = this.timeline.getBoundingClientRect(); + let posClickedX = posX - rect.left + this.timeline.scrollLeft; + let selectedTime = posClickedX / this.#timeToPixel; + return selectedTime; + } + + handleTimelineScroll(e) { + let horizontal = e.currentTarget.scrollLeft; + this.dispatchEvent(new CustomEvent( + 'scrolltrack', { + bubbles: true, composed: true, + detail: horizontal + })); + } + + asyncSetTimelineChunkBackground(backgroundTodo) { + const kIncrement = 100; + let start = 0; + let delay = 1; + while (start < backgroundTodo.length) { + let end = Math.min(start + kIncrement, backgroundTodo.length); + setTimeout((from, to) => { + for (let i = from; i < to; i++) { + let [chunk, node] = backgroundTodo[i]; + this.setTimelineChunkBackground(chunk, node); + } + }, delay++, start, end); + start = end; + } + } + + setTimelineChunkBackground(chunk, node) { + // Render the types of transitions as bar charts + const kHeight = chunk.height; + const kWidth = 1; + this.backgroundCanvas.width = kWidth; + this.backgroundCanvas.height = kHeight; + let ctx = this.backgroundCanvas.getContext('2d'); + ctx.clearRect(0, 0, kWidth, kHeight); + let y = 0; + let total = chunk.size(); + let type, count; + if (true) { + chunk.getBreakdown(map => map.type).forEach(([type, count]) => { + ctx.fillStyle = transitionTypeToColor(type); + let height = count / total * kHeight; + ctx.fillRect(0, y, kWidth, y + height); + y += height; + }); + } else { + chunk.items.forEach(map => { + ctx.fillStyle = transitionTypeToColor(map.type); + let y = chunk.yOffset(map); + ctx.fillRect(0, y, kWidth, y + 1); + }); + } + + let imageData = this.backgroundCanvas.toDataURL('image/webp', 0.2); + node.style.backgroundImage = 'url(' + imageData + ')'; + } + + updateTimeline() { + let chunksNode = this.timelineChunks; + this.removeAllChildren(chunksNode); + let chunks = this.chunks; + let max = chunks.max(each => each.size()); + let start = this.data.startTime; + let end = this.data.endTime; + let duration = end - start; + this.#timeToPixel = chunks.length * kChunkWidth / duration; + let addTimestamp = (time, name) => { + let timeNode = this.div('timestamp'); + timeNode.innerText = name; + timeNode.style.left = ((time - start) * this.#timeToPixel) + 'px'; + chunksNode.appendChild(timeNode); + }; + let backgroundTodo = []; + for (let i = 0; i < chunks.length; i++) { + let chunk = chunks[i]; + let height = (chunk.size() / max * kChunkHeight); + chunk.height = height; + if (chunk.isEmpty()) continue; + let node = this.div(); + node.className = 'chunk'; + node.style.left = (chunks[i].start * this.#timeToPixel) + 'px'; + node.style.height = height + 'px'; + node.chunk = chunk; + node.addEventListener('mousemove', e => this.handleChunkMouseMove(e)); + node.addEventListener('click', e => this.handleChunkClick(e)); + node.addEventListener('dblclick', e => this.handleChunkDoubleClick(e)); + backgroundTodo.push([chunk, node]) + chunksNode.appendChild(node); + } + this.asyncSetTimelineChunkBackground(backgroundTodo) + + // Put a time marker roughly every 20 chunks. + let expected = duration / chunks.length * 20; + let interval = (10 ** Math.floor(Math.log10(expected))); + let correction = Math.log10(expected / interval); + correction = (correction < 0.33) ? 1 : (correction < 0.75) ? 2.5 : 5; + interval *= correction; + + let time = start; + while (time < end) { + addTimestamp(time, ((time - start) / 1000) + ' ms'); + time += interval; + } + this.drawOverview(); + this.redraw(); + } + + handleChunkMouseMove(event) { + if (this.isLocked) return false; + let chunk = event.target.chunk; + if (!chunk) return; + // topmost map (at chunk.height) == map #0. + let relativeIndex = + Math.round(event.layerY / event.target.offsetHeight * chunk.size()); + let map = chunk.at(relativeIndex); + this.dispatchEvent(new FocusEvent(map)); + } + + handleChunkClick(event) { + this.isLocked = !this.isLocked; + } + + handleChunkDoubleClick(event) { + this.isLocked = true; + let chunk = event.target.chunk; + if (!chunk) return; + let maps = chunk.items; + this.dispatchEvent(new SelectionEvent(maps)); + } + + drawOverview() { + const height = 50; + const kFactor = 2; + let canvas = this.backgroundCanvas; + canvas.height = height; + canvas.width = window.innerWidth; + let ctx = canvas.getContext('2d'); + let chunks = this.data.chunkSizes(canvas.width * kFactor); + let max = chunks.max(); + ctx.clearRect(0, 0, canvas.width, height); + ctx.fillStyle = CSSColor.onBackgroundColor; + ctx.beginPath(); + ctx.moveTo(0, height); + for (let i = 0; i < chunks.length; i++) { + ctx.lineTo(i / kFactor, height - chunks[i] / max * height); + } + ctx.lineTo(chunks.length, height); + ctx.strokeStyle = CSSColor.onBackgroundColor; + ctx.stroke(); + ctx.closePath(); + ctx.fill(); + let imageData = canvas.toDataURL('image/webp', 0.2); + this.dispatchEvent(new CustomEvent( + 'overviewupdate', { + bubbles: true, composed: true, + detail: imageData + })); + } + + redraw() { + let canvas = this.timelineCanvas; + canvas.width = (this.chunks.length + 1) * kChunkWidth; + canvas.height = kChunkHeight; + let ctx = canvas.getContext('2d'); + ctx.clearRect(0, 0, canvas.width, kChunkHeight); + if (!this.selectedEntry || !this.selectedEntry.edge) return; + this.drawEdges(ctx); + } + setMapStyle(map, ctx) { + ctx.fillStyle = map.edge && map.edge.from ? + CSSColor.onBackgroundColor : CSSColor.onPrimaryColor; + } + + setEdgeStyle(edge, ctx) { + let color = transitionTypeToColor(edge.type); + ctx.strokeStyle = color; + ctx.fillStyle = color; + } + + markMap(ctx, map) { + let [x, y] = map.position(this.chunks); + ctx.beginPath(); + this.setMapStyle(map, ctx); + ctx.arc(x, y, 3, 0, 2 * Math.PI); + ctx.fill(); + ctx.beginPath(); + ctx.fillStyle = CSSColor.onBackgroundColor; + ctx.arc(x, y, 2, 0, 2 * Math.PI); + ctx.fill(); + } + + markSelectedMap(ctx, map) { + let [x, y] = map.position(this.chunks); + ctx.beginPath(); + this.setMapStyle(map, ctx); + ctx.arc(x, y, 6, 0, 2 * Math.PI); + ctx.strokeStyle = CSSColor.onBackgroundColor; + ctx.stroke(); + } + + drawEdges(ctx) { + // Draw the trace of maps in reverse order to make sure the outgoing + // transitions of previous maps aren't drawn over. + const kMaxOutgoingEdges = 100; + let nofEdges = 0; + let stack = []; + let current = this.selectedEntry; + while (current && nofEdges < kMaxOutgoingEdges) { + nofEdges += current.children.length; + stack.push(current); + current = current.parent(); + } + ctx.save(); + this.drawOutgoingEdges(ctx, this.selectedEntry, 3); + ctx.restore(); + + let labelOffset = 15; + let xPrev = 0; + while (current = stack.pop()) { + if (current.edge) { + this.setEdgeStyle(current.edge, ctx); + let [xTo, yTo] = this.drawEdge(ctx, current.edge, true, labelOffset); + if (xTo == xPrev) { + labelOffset += 8; + } else { + labelOffset = 15 + } + xPrev = xTo; + } + this.markMap(ctx, current); + current = current.parent(); + ctx.save(); + // this.drawOutgoingEdges(ctx, current, 1); + ctx.restore(); + } + // Mark selected map + this.markSelectedMap(ctx, this.selectedEntry); + } + + drawEdge(ctx, edge, showLabel = true, labelOffset = 20) { + if (!edge.from || !edge.to) return [-1, -1]; + let [xFrom, yFrom] = edge.from.position(this.chunks); + let [xTo, yTo] = edge.to.position(this.chunks); + let sameChunk = xTo == xFrom; + if (sameChunk) labelOffset += 8; + + ctx.beginPath(); + ctx.moveTo(xFrom, yFrom); + let offsetX = 20; + let offsetY = 20; + let midX = xFrom + (xTo - xFrom) / 2; + let midY = (yFrom + yTo) / 2 - 100; + if (!sameChunk) { + ctx.quadraticCurveTo(midX, midY, xTo, yTo); + } else { + ctx.lineTo(xTo, yTo); + } + if (!showLabel) { + ctx.strokeStyle = CSSColor.onBackgroundColor; + ctx.stroke(); + } else { + let centerX, centerY; + if (!sameChunk) { + centerX = (xFrom / 2 + midX + xTo / 2) / 2; + centerY = (yFrom / 2 + midY + yTo / 2) / 2; + } else { + centerX = xTo; + centerY = yTo; + } + ctx.strokeStyle = CSSColor.onBackgroundColor; + ctx.moveTo(centerX, centerY); + ctx.lineTo(centerX + offsetX, centerY - labelOffset); + ctx.stroke(); + ctx.textAlign = 'left'; + ctx.fillStyle = CSSColor.onBackgroundColor; + ctx.fillText( + edge.toString(), centerX + offsetX + 2, centerY - labelOffset); + } + return [xTo, yTo]; + } + + drawOutgoingEdges(ctx, map, max = 10, depth = 0) { + if (!map) return; + if (depth >= max) return; + ctx.globalAlpha = 0.5 - depth * (0.3 / max); + ctx.strokeStyle = CSSColor.timelineBackgroundColor; + + const limit = Math.min(map.children.length, 100) + for (let i = 0; i < limit; i++) { + let edge = map.children[i]; + this.drawEdge(ctx, edge, true); + this.drawOutgoingEdges(ctx, edge.to, max, depth + 1); + } + } + } +); diff --git a/deps/v8/tools/testrunner/PRESUBMIT.py b/deps/v8/tools/testrunner/PRESUBMIT.py index fc23947f37c11a..94ea38b56c124d 100644 --- a/deps/v8/tools/testrunner/PRESUBMIT.py +++ b/deps/v8/tools/testrunner/PRESUBMIT.py @@ -7,8 +7,8 @@ def _CommonChecks(input_api, output_api): input_api, output_api, input_api.os_path.join(input_api.PresubmitLocalPath()), - whitelist=[r'.+_unittest\.py$'], - blacklist=[], + files_to_check=[r'.+_unittest\.py$'], + files_to_skip=[], )) def CheckChangeOnUpload(input_api, output_api): diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py index c4036bb918e3a3..4ca911cc580dd8 100644 --- a/deps/v8/tools/testrunner/base_runner.py +++ b/deps/v8/tools/testrunner/base_runner.py @@ -6,7 +6,7 @@ from __future__ import print_function from functools import reduce -from collections import OrderedDict +from collections import OrderedDict, namedtuple import json import multiprocessing import optparse @@ -33,6 +33,7 @@ from testrunner.testproc.shard import ShardProc from testrunner.testproc.sigproc import SignalProc from testrunner.testproc.timeout import TimeoutProc +from testrunner.testproc import util BASE_DIR = ( @@ -115,52 +116,35 @@ ] -class ModeConfig(object): - def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode): - self.flags = flags - self.timeout_scalefactor = timeout_scalefactor - self.status_mode = status_mode - self.execution_mode = execution_mode - +ModeConfig = namedtuple( + 'ModeConfig', 'label flags timeout_scalefactor status_mode') DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"] RELEASE_FLAGS = ["--nohard-abort"] -MODES = { - "debug": ModeConfig( - flags=DEBUG_FLAGS, - timeout_scalefactor=4, - status_mode="debug", - execution_mode="debug", - ), - "optdebug": ModeConfig( + +DEBUG_MODE = ModeConfig( + label='debug', flags=DEBUG_FLAGS, timeout_scalefactor=4, status_mode="debug", - execution_mode="debug", - ), - "release": ModeConfig( +) + +RELEASE_MODE = ModeConfig( + label='release', flags=RELEASE_FLAGS, timeout_scalefactor=1, status_mode="release", - execution_mode="release", - ), - # Normal trybot release configuration. There, dchecks are always on which - # implies debug is set. Hence, the status file needs to assume debug-like - # behavior/timeouts. - "tryrelease": ModeConfig( +) + +# Normal trybot release configuration. There, dchecks are always on which +# implies debug is set. Hence, the status file needs to assume debug-like +# behavior/timeouts. +TRY_RELEASE_MODE = ModeConfig( + label='release+dchecks', flags=RELEASE_FLAGS, - timeout_scalefactor=1, - status_mode="debug", - execution_mode="release", - ), - # This mode requires v8 to be compiled with dchecks and slow dchecks. - "slowrelease": ModeConfig( - flags=RELEASE_FLAGS + ["--enable-slow-asserts"], - timeout_scalefactor=2, + timeout_scalefactor=4, status_mode="debug", - execution_mode="release", - ), -} +) PROGRESS_INDICATORS = { 'verbose': progress.VerboseProgressIndicator, @@ -168,6 +152,7 @@ def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode): 'dots': progress.DotsProgressIndicator, 'color': progress.ColorProgressIndicator, 'mono': progress.MonochromeProgressIndicator, + 'stream': progress.StreamProgressIndicator, } class TestRunnerError(Exception): @@ -240,12 +225,29 @@ def __str__(self): return '\n'.join(detected_options) +def _do_load_build_config(outdir, verbose=False): + build_config_path = os.path.join(outdir, "v8_build_config.json") + if not os.path.exists(build_config_path): + if verbose: + print("Didn't find build config: %s" % build_config_path) + raise TestRunnerError() + + with open(build_config_path) as f: + try: + build_config_json = json.load(f) + except Exception: # pragma: no cover + print("%s exists but contains invalid json. Is your build up-to-date?" + % build_config_path) + raise TestRunnerError() + + return BuildConfig(build_config_json) + + class BaseTestRunner(object): def __init__(self, basedir=None): self.basedir = basedir or BASE_DIR self.outdir = None self.build_config = None - self.mode_name = None self.mode_options = None self.target_os = None @@ -265,6 +267,9 @@ def execute(self, sys_args=None): # this less cryptic by printing it ourselves. print(' '.join(sys.argv)) + # Kill stray processes from previous tasks on swarming. + util.kill_processes_linux() + self._load_build_config(options) command.setup(self.target_os, options.device) @@ -279,7 +284,7 @@ def execute(self, sys_args=None): tests = self._load_testsuite_generators(args, options) self._setup_env() print(">>> Running tests for %s.%s" % (self.build_config.arch, - self.mode_name)) + self.mode_options.label)) exit_code = self._do_execute(tests, args, options) if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results: print("Force exit code 0 after failures. Json test results file " @@ -313,9 +318,6 @@ def _add_parser_default_options(self, parser): default="out") parser.add_option("--arch", help="The architecture to run tests for") - parser.add_option("-m", "--mode", - help="The test mode in which to run (uppercase for builds" - " in CI): %s" % MODES.keys()) parser.add_option("--shell-dir", help="DEPRECATED! Executables from build " "directory will be used") parser.add_option("--test-root", help="Root directory of the test suites", @@ -400,9 +402,8 @@ def _add_parser_options(self, parser): def _parse_args(self, parser, sys_args): options, args = parser.parse_args(sys_args) - if any(map(lambda v: v and ',' in v, - [options.arch, options.mode])): # pragma: no cover - print('Multiple arch/mode are deprecated') + if options.arch and ',' in options.arch: # pragma: no cover + print('Multiple architectures are deprecated') raise TestRunnerError() return options, args @@ -410,7 +411,12 @@ def _parse_args(self, parser, sys_args): def _load_build_config(self, options): for outdir in self._possible_outdirs(options): try: - self.build_config = self._do_load_build_config(outdir, options.verbose) + self.build_config = _do_load_build_config(outdir, options.verbose) + + # In auto-detect mode the outdir is always where we found the build config. + # This ensures that we'll also take the build products from there. + self.outdir = outdir + break except TestRunnerError: pass @@ -433,8 +439,7 @@ def _load_build_config(self, options): # Returns possible build paths in order: # gn # outdir - # outdir/arch.mode - # Each path is provided in two versions: and /mode for bots. + # outdir on bots def _possible_outdirs(self, options): def outdirs(): if options.gn: @@ -442,17 +447,13 @@ def outdirs(): return yield options.outdir - if options.arch and options.mode: - yield os.path.join(options.outdir, - '%s.%s' % (options.arch, options.mode)) + + if os.path.basename(options.outdir) != 'build': + yield os.path.join(options.outdir, 'build') for outdir in outdirs(): yield os.path.join(self.basedir, outdir) - # bot option - if options.mode: - yield os.path.join(self.basedir, outdir, options.mode) - def _get_gn_outdir(self): gn_out_dir = os.path.join(self.basedir, DEFAULT_OUT_GN) latest_timestamp = -1 @@ -468,51 +469,13 @@ def _get_gn_outdir(self): print(">>> Latest GN build found: %s" % latest_config) return os.path.join(DEFAULT_OUT_GN, latest_config) - def _do_load_build_config(self, outdir, verbose=False): - build_config_path = os.path.join(outdir, "v8_build_config.json") - if not os.path.exists(build_config_path): - if verbose: - print("Didn't find build config: %s" % build_config_path) - raise TestRunnerError() - - with open(build_config_path) as f: - try: - build_config_json = json.load(f) - except Exception: # pragma: no cover - print("%s exists but contains invalid json. Is your build up-to-date?" - % build_config_path) - raise TestRunnerError() - - # In auto-detect mode the outdir is always where we found the build config. - # This ensures that we'll also take the build products from there. - self.outdir = os.path.dirname(build_config_path) - - return BuildConfig(build_config_json) - def _process_default_options(self, options): - # We don't use the mode for more path-magic. - # Therefore transform the bot mode here to fix build_config value. - if options.mode: - options.mode = self._bot_to_v8_mode(options.mode) - - build_config_mode = 'debug' if self.build_config.is_debug else 'release' - if options.mode: - if options.mode not in MODES: # pragma: no cover - print('%s mode is invalid' % options.mode) - raise TestRunnerError() - if MODES[options.mode].execution_mode != build_config_mode: - print ('execution mode (%s) for %s is inconsistent with build config ' - '(%s)' % ( - MODES[options.mode].execution_mode, - options.mode, - build_config_mode)) - raise TestRunnerError() - - self.mode_name = options.mode + if self.build_config.is_debug: + self.mode_options = DEBUG_MODE + elif self.build_config.dcheck_always_on: + self.mode_options = TRY_RELEASE_MODE else: - self.mode_name = build_config_mode - - self.mode_options = MODES[self.mode_name] + self.mode_options = RELEASE_MODE if options.arch and options.arch != self.build_config.arch: print('--arch value (%s) inconsistent with build config (%s).' % ( @@ -533,15 +496,6 @@ def _process_default_options(self, options): options.command_prefix = shlex.split(options.command_prefix) options.extra_flags = sum(map(shlex.split, options.extra_flags), []) - def _bot_to_v8_mode(self, config): - """Convert build configs from bots to configs understood by the v8 runner. - - V8 configs are always lower case and without the additional _x64 suffix - for 64 bit builds on windows with ninja. - """ - mode = config[:-4] if config.endswith('_x64') else config - return mode.lower() - def _process_options(self, options): pass @@ -679,6 +633,7 @@ def _get_statusfile_variables(self, options): "arch": self.build_config.arch, "asan": self.build_config.asan, "byteorder": sys.byteorder, + "cfi_vptr": self.build_config.cfi_vptr, "dcheck_always_on": self.build_config.dcheck_always_on, "deopt_fuzzer": False, "endurance_fuzzer": False, @@ -689,9 +644,7 @@ def _get_statusfile_variables(self, options): "is_clang": self.build_config.is_clang, "is_full_debug": self.build_config.is_full_debug, "mips_arch_variant": mips_arch_variant, - "mode": self.mode_options.status_mode - if not self.build_config.dcheck_always_on - else "debug", + "mode": self.mode_options.status_mode, "msan": self.build_config.msan, "no_harness": options.no_harness, "no_i18n": self.build_config.no_i18n, @@ -804,10 +757,7 @@ def _create_progress_indicators(self, test_count, options): procs.append(progress.JUnitTestProgressIndicator(options.junitout, options.junittestsuite)) if options.json_test_results: - procs.append(progress.JsonTestProgressIndicator( - self.framework_name, - self.build_config.arch, - self.mode_options.execution_mode)) + procs.append(progress.JsonTestProgressIndicator(self.framework_name)) for proc in procs: proc.configure(options) diff --git a/deps/v8/tools/testrunner/local/command.py b/deps/v8/tools/testrunner/local/command.py index 50403a0e5e6953..df603d79d2f70b 100644 --- a/deps/v8/tools/testrunner/local/command.py +++ b/deps/v8/tools/testrunner/local/command.py @@ -200,13 +200,17 @@ def wrapped(arg): stderr=subprocess.PIPE, env=self._get_env(), shell=True, + # Make the new shell create its own process group. This allows to kill + # all spawned processes reliably (https://crbug.com/v8/8292). + preexec_fn=os.setsid, ) except Exception as e: sys.stderr.write('Error executing: %s\n' % self) raise e def _kill_process(self, process): - process.kill() + # Kill the whole process group (PID == GPID after setsid). + os.killpg(process.pid, signal.SIGKILL) def taskkill_windows(process, verbose=False, force=True): diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py index f99941eb991542..c8a1b307e41c2f 100644 --- a/deps/v8/tools/testrunner/local/statusfile.py +++ b/deps/v8/tools/testrunner/local/statusfile.py @@ -27,12 +27,13 @@ # for py2/py3 compatibility from __future__ import print_function +from __future__ import absolute_import import os import re -from variants import ALL_VARIANTS -from utils import Freeze +from .variants import ALL_VARIANTS +from .utils import Freeze # Possible outcomes FAIL = "FAIL" diff --git a/deps/v8/tools/testrunner/local/statusfile_unittest.py b/deps/v8/tools/testrunner/local/statusfile_unittest.py index e8d5ff99cd1875..3e2493c0ce6065 100755 --- a/deps/v8/tools/testrunner/local/statusfile_unittest.py +++ b/deps/v8/tools/testrunner/local/statusfile_unittest.py @@ -4,10 +4,17 @@ # found in the LICENSE file. +from __future__ import absolute_import +import os +import sys import unittest -import statusfile -from utils import Freeze +TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) +sys.path.append(TOOLS_PATH) + +from testrunner.local import statusfile +from testrunner.local.utils import Freeze TEST_VARIABLES = { diff --git a/deps/v8/tools/testrunner/local/testsuite.py b/deps/v8/tools/testrunner/local/testsuite.py index 864d7346fca164..a72ef4be610215 100644 --- a/deps/v8/tools/testrunner/local/testsuite.py +++ b/deps/v8/tools/testrunner/local/testsuite.py @@ -223,7 +223,7 @@ def __iter__(self): return self def __next__(self): - return self.next() + return next(self) def next(self): return next(self._iterator) diff --git a/deps/v8/tools/testrunner/local/utils.py b/deps/v8/tools/testrunner/local/utils.py index 9128c433a0490b..a6b92dc7566189 100644 --- a/deps/v8/tools/testrunner/local/utils.py +++ b/deps/v8/tools/testrunner/local/utils.py @@ -34,8 +34,7 @@ import os import platform import re -import subprocess -import urllib2 +import urllib ### Exit codes and their meaning. @@ -138,23 +137,6 @@ def IsWindows(): return GuessOS() == 'windows' -def URLRetrieve(source, destination): - """urllib is broken for SSL connections via a proxy therefore we - can't use urllib.urlretrieve().""" - if IsWindows(): - try: - # In python 2.7.6 on windows, urlopen has a problem with redirects. - # Try using curl instead. Note, this is fixed in 2.7.8. - subprocess.check_call(["curl", source, '-k', '-L', '-o', destination]) - return - except: - # If there's no curl, fall back to urlopen. - print("Curl is currently not installed. Falling back to python.") - pass - with open(destination, 'w') as f: - f.write(urllib2.urlopen(source).read()) - - class FrozenDict(dict): def __setitem__(self, *args, **kwargs): raise Exception('Tried to mutate a frozen dict') diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py index 39c3467b1b9755..e78571e14f483f 100644 --- a/deps/v8/tools/testrunner/local/variants.py +++ b/deps/v8/tools/testrunner/local/variants.py @@ -12,9 +12,11 @@ # Alias of exhaustive variants, but triggering new test framework features. "infra_staging": [[]], "interpreted_regexp": [["--regexp-interpret-all"]], + "experimental_regexp": [["--enable-experimental-regexp-engine"]], "jitless": [["--jitless"]], "minor_mc": [["--minor-mc"]], "nci": [["--turbo-nci"]], + "nci_as_highest_tier": [["--turbo-nci-as-highest-tier"]], "no_lfa": [["--no-lazy-feedback-allocation"]], # No optimization means disable all optimizations. OptimizeFunctionOnNextCall # would not force optimization too. It turns into a Nop. Please see diff --git a/deps/v8/tools/testrunner/num_fuzzer.py b/deps/v8/tools/testrunner/num_fuzzer.py index d4e92a61e80eb5..7777f4c66d8e85 100755 --- a/deps/v8/tools/testrunner/num_fuzzer.py +++ b/deps/v8/tools/testrunner/num_fuzzer.py @@ -5,13 +5,14 @@ # found in the LICENSE file. # for py2/py3 compatibility +from __future__ import absolute_import from __future__ import print_function import random import sys # Adds testrunner to the path hence it has to be imported at the beggining. -import base_runner +from . import base_runner from testrunner.local import utils diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py index 2a75cf60c45ac8..ac4defd2d78d59 100644 --- a/deps/v8/tools/testrunner/objects/testcase.py +++ b/deps/v8/tools/testrunner/objects/testcase.py @@ -160,6 +160,11 @@ def is_pass_or_fail(self): statusfile.FAIL in self._statusfile_outcomes and statusfile.CRASH not in self._statusfile_outcomes) + @property + def is_fail(self): + return (statusfile.FAIL in self._statusfile_outcomes and + statusfile.PASS not in self._statusfile_outcomes) + @property def only_standard_variant(self): return statusfile.NO_VARIANTS in self._statusfile_outcomes diff --git a/deps/v8/tools/testrunner/outproc/base.py b/deps/v8/tools/testrunner/outproc/base.py index 9b65ca564a9466..847b2242ffa34e 100644 --- a/deps/v8/tools/testrunner/outproc/base.py +++ b/deps/v8/tools/testrunner/outproc/base.py @@ -193,6 +193,8 @@ def _ignore_actual_line(self, line): line.startswith('**') or line.startswith('ANDROID') or line.startswith('###') or + # Android linker warning. + line.startswith('WARNING: linker:') or # FIXME(machenbach): The test driver shouldn't try to use slow # asserts if they weren't compiled. This fails in optdebug=2. line == 'Warning: unknown flag --enable-slow-asserts.' or diff --git a/deps/v8/tools/testrunner/outproc/message.py b/deps/v8/tools/testrunner/outproc/message.py index f196cfd614bd37..c253b6f8e06754 100644 --- a/deps/v8/tools/testrunner/outproc/message.py +++ b/deps/v8/tools/testrunner/outproc/message.py @@ -59,5 +59,7 @@ def _ignore_line(self, string): not string.strip() or string.startswith("==") or string.startswith("**") or - string.startswith("ANDROID") + string.startswith("ANDROID") or + # Android linker warning. + string.startswith('WARNING: linker:') ) diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py index 10545fa5f2417b..99d36bf2f85044 100755 --- a/deps/v8/tools/testrunner/standard_runner.py +++ b/deps/v8/tools/testrunner/standard_runner.py @@ -5,6 +5,7 @@ # found in the LICENSE file. # for py2/py3 compatibility +from __future__ import absolute_import from __future__ import print_function from functools import reduce @@ -15,7 +16,7 @@ import tempfile # Adds testrunner to the path hence it has to be imported at the beggining. -import base_runner +from . import base_runner from testrunner.local import utils from testrunner.local.variants import ALL_VARIANTS @@ -379,10 +380,8 @@ def _duration_results_text(test): ] assert os.path.exists(options.json_test_results) - complete_results = [] with open(options.json_test_results, "r") as f: - complete_results = json.loads(f.read()) - output = complete_results[0] + output = json.load(f) lines = [] for test in output['slowest_tests']: suffix = '' diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py index a993fc18a372cb..d0e62046f41533 100644 --- a/deps/v8/tools/testrunner/testproc/progress.py +++ b/deps/v8/tools/testrunner/testproc/progress.py @@ -4,25 +4,20 @@ # for py2/py3 compatibility from __future__ import print_function +from __future__ import absolute_import import datetime import json import os import platform -import subprocess import sys import time -import util from . import base +from . import util from ..local import junit_output -# Base dir of the build products for Release and Debug. -OUT_DIR = os.path.abspath( - os.path.join(os.path.dirname(__file__), '..', '..', '..', 'out')) - - def print_failure_header(test): if test.output_proc.negative: negative_marker = '[negative] ' @@ -113,6 +108,28 @@ def finished(self): print("===") +class StreamProgressIndicator(ProgressIndicator): + def __init__(self): + super(StreamProgressIndicator, self).__init__() + self._requirement = base.DROP_PASS_OUTPUT + + def _on_result_for(self, test, result): + if not result.has_unexpected_output: + self.print('PASS', test) + elif result.output.HasCrashed(): + self.print("CRASH", test) + elif result.output.HasTimedOut(): + self.print("TIMEOUT", test) + else: + if test.is_fail: + self.print("UNEXPECTED PASS", test) + else: + self.print("FAIL", test) + + def print(self, prefix, test): + print('%s: %ss' % (prefix, test)) + sys.stdout.flush() + class VerboseProgressIndicator(SimpleProgressIndicator): def __init__(self): super(VerboseProgressIndicator, self).__init__() @@ -143,16 +160,10 @@ def _on_result_for(self, test, result): # feedback channel from the workers, providing which tests are currently run. def _print_processes_linux(self): if platform.system() == 'Linux': - try: - cmd = 'ps -aux | grep "%s"' % OUT_DIR - output = subprocess.check_output(cmd, shell=True) - self._print('List of processes:') - for line in (output or '').splitlines(): - # Show command with pid, but other process info cut off. - self._print('pid: %s cmd: %s' % - (line.split()[1], line[line.index(OUT_DIR):])) - except: - pass + self._print('List of processes:') + for pid, cmd in util.list_processes_linux(): + # Show command with pid, but other process info cut off. + self._print('pid: %d cmd: %s' % (pid, cmd)) def _ensure_delay(self, delay): return time.time() - self._last_printed_time > delay @@ -244,15 +255,22 @@ def _on_result_for(self, test, result): self._clear_line(self._last_status_length) print_failure_header(test) if len(stdout): - print(self._templates['stdout'] % stdout) + self.printFormatted('stdout', stdout) if len(stderr): - print(self._templates['stderr'] % stderr) - print("Command: %s" % result.cmd.to_string(relative=True)) + self.printFormatted('stderr', stderr) + self.printFormatted( + 'command', "Command: %s" % result.cmd.to_string(relative=True)) if output.HasCrashed(): - print("exit code: %s" % output.exit_code_string) - print("--- CRASHED ---") - if output.HasTimedOut(): - print("--- TIMEOUT ---") + self.printFormatted( + 'failure', "exit code: %s" % output.exit_code_string) + self.printFormatted('failure', "--- CRASHED ---") + elif output.HasTimedOut(): + self.printFormatted('failure', "--- TIMEOUT ---") + else: + if test.is_fail: + self.printFormatted('failure', "--- UNEXPECTED PASS ---") + else: + self.printFormatted('failure', "--- FAILED ---") def finished(self): self._print_progress('Done') @@ -273,12 +291,12 @@ def _print_progress(self, name): 'mins': int(elapsed) // 60, 'secs': int(elapsed) % 60 } - status = self._truncate(status, 78) + status = self._truncateStatusLine(status, 78) self._last_status_length = len(status) print(status, end='') sys.stdout.flush() - def _truncate(self, string, length): + def _truncateStatusLine(self, string, length): if length and len(string) > (length - 3): return string[:(length - 3)] + "..." else: @@ -297,22 +315,33 @@ def __init__(self): "\033[31m-%(failed) 4d\033[0m]: %(test)s"), 'stdout': "\033[1m%s\033[0m", 'stderr': "\033[31m%s\033[0m", + 'failure': "\033[1;31m%s\033[0m", + 'command': "\033[33m%s\033[0m", } super(ColorProgressIndicator, self).__init__(templates) + def printFormatted(self, format, string): + print(self._templates[format] % string) + + def _truncateStatusLine(self, string, length): + # Add some slack for the color control chars + return super(ColorProgressIndicator, self)._truncateStatusLine( + string, length + 3*9) + def _clear_line(self, last_length): print("\033[1K\r", end='') class MonochromeProgressIndicator(CompactProgressIndicator): def __init__(self): - templates = { - 'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|" - "+%(passed) 4d|-%(failed) 4d]: %(test)s"), - 'stdout': '%s', - 'stderr': '%s', - } - super(MonochromeProgressIndicator, self).__init__(templates) + templates = { + 'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|" + "+%(passed) 4d|-%(failed) 4d]: %(test)s"), + } + super(MonochromeProgressIndicator, self).__init__(templates) + + def printFormatted(self, format, string): + print(string) def _clear_line(self, last_length): print(("\r" + (" " * last_length) + "\r"), end='') @@ -358,7 +387,7 @@ def finished(self): class JsonTestProgressIndicator(ProgressIndicator): - def __init__(self, framework_name, arch, mode): + def __init__(self, framework_name): super(JsonTestProgressIndicator, self).__init__() # We want to drop stdout/err for all passed tests on the first try, but we # need to get outputs for all runs after the first one. To accommodate that, @@ -367,8 +396,6 @@ def __init__(self, framework_name, arch, mode): self._requirement = base.DROP_PASS_STDOUT self.framework_name = framework_name - self.arch = arch - self.mode = mode self.results = [] self.duration_sum = 0 self.test_count = 0 @@ -438,24 +465,16 @@ def _test_record(self, test, result, output, run): } def finished(self): - complete_results = [] - if os.path.exists(self.options.json_test_results): - with open(self.options.json_test_results, "r") as f: - # On bots we might start out with an empty file. - complete_results = json.loads(f.read() or "[]") - duration_mean = None if self.test_count: duration_mean = self.duration_sum / self.test_count - complete_results.append({ - "arch": self.arch, - "mode": self.mode, + result = { "results": self.results, "slowest_tests": self.tests.as_list(), "duration_mean": duration_mean, "test_total": self.test_count, - }) + } with open(self.options.json_test_results, "w") as f: - f.write(json.dumps(complete_results)) + json.dump(result, f) diff --git a/deps/v8/tools/testrunner/testproc/timeout.py b/deps/v8/tools/testrunner/testproc/timeout.py index 9a4e88c8f057ba..026ba02cd97b64 100644 --- a/deps/v8/tools/testrunner/testproc/timeout.py +++ b/deps/v8/tools/testrunner/testproc/timeout.py @@ -1,3 +1,4 @@ +from __future__ import print_function # Copyright 2018 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. diff --git a/deps/v8/tools/testrunner/testproc/util.py b/deps/v8/tools/testrunner/testproc/util.py index 8c1024cc81d86d..1f5cc7ef910595 100644 --- a/deps/v8/tools/testrunner/testproc/util.py +++ b/deps/v8/tools/testrunner/testproc/util.py @@ -4,7 +4,49 @@ # found in the LICENSE file. import heapq +import os +import platform import random +import signal +import subprocess + +# Base dir of the build products for Release and Debug. +OUT_DIR = os.path.abspath( + os.path.join(os.path.dirname(__file__), '..', '..', '..', 'out')) + + +def list_processes_linux(): + """Returns list of tuples (pid, command) of processes running in the same out + directory as this checkout. + """ + if platform.system() != 'Linux': + return [] + try: + cmd = 'pgrep -fa %s' % OUT_DIR + output = subprocess.check_output(cmd, shell=True) or '' + processes = [ + (int(line.split()[0]), line[line.index(OUT_DIR):]) + for line in output.splitlines() + ] + # Filter strange process with name as out dir. + return [p for p in processes if p[1] != OUT_DIR] + except: + return [] + + +def kill_processes_linux(): + """Kill stray processes on the system that started in the same out directory. + + All swarming tasks share the same out directory location. + """ + if platform.system() != 'Linux': + return + for pid, cmd in list_processes_linux(): + try: + print('Attempting to kill %d - %s' % (pid, cmd)) + os.kill(pid, signal.SIGKILL) + except: + pass class FixedSizeTopList(): diff --git a/deps/v8/tools/testrunner/testproc/util_unittest.py b/deps/v8/tools/testrunner/testproc/util_unittest.py index 243bf9789a735b..5bf6a6e79ada6d 100644 --- a/deps/v8/tools/testrunner/testproc/util_unittest.py +++ b/deps/v8/tools/testrunner/testproc/util_unittest.py @@ -3,9 +3,18 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -from util import FixedSizeTopList +from __future__ import absolute_import + +import os +import sys import unittest +TOOLS_PATH = os.path.dirname(os.path.dirname(os.path.dirname( + os.path.abspath(__file__)))) +sys.path.append(TOOLS_PATH) + +from testrunner.testproc.util import FixedSizeTopList + class TestOrderedFixedSizeList(unittest.TestCase): def test_empty(self): ofsl = FixedSizeTopList(3) diff --git a/deps/v8/tools/torque/format-torque.py b/deps/v8/tools/torque/format-torque.py index 96eb3a1909522c..16fc798c4334a5 100755 --- a/deps/v8/tools/torque/format-torque.py +++ b/deps/v8/tools/torque/format-torque.py @@ -16,8 +16,20 @@ from subprocess import Popen, PIPE kPercentEscape = r'α'; # Unicode alpha +kDerefEscape = r'☆'; # Unicode star +kAddressofEscape = r'⌂'; # Unicode house def preprocess(input): + # Special handing of '%' for intrinsics, turn the percent + # into a unicode character so that it gets treated as part of the + # intrinsic's name if it's already adjacent to it. + input = re.sub(r'%([A-Za-z])', kPercentEscape + r'\1', input) + # Similarly, avoid treating * and & as binary operators when they're + # probably used as address operators. + input = re.sub(r'([^/])\*([a-zA-Z(])', r'\1' + kDerefEscape + r'\2', input) + input = re.sub(r'&([a-zA-Z(])', kAddressofEscape + r'\1', input) + + input = re.sub(r'(if\s+)constexpr(\s*\()', r'\1/*COxp*/\2', input) input = re.sub(r'(\s+)operator\s*(\'[^\']+\')', r'\1/*_OPE \2*/', input) input = re.sub(r'\btypeswitch\s*(\([^{]*\))\s{', r' if /*tPsW*/ \1 {', input) @@ -35,12 +47,7 @@ def preprocess(input): input = re.sub(r'@if\(', r'@iF(', input) input = re.sub(r'@export', r'@eXpOrT', input) input = re.sub(r'js-implicit[ \n]+', r'jS_iMpLiCiT_', input) - input = re.sub(r'^(\s*namespace\s+[a-zA-Z_0-9]+\s*{)(\s*)$', r'\1}\2', input, flags = re.MULTILINE); - - # Special handing of '%' for intrinsics, turn the percent - # into a unicode character so that it gets treated as part of the - # intrinsic's name if it's already adjacent to it. - input = re.sub(r'%([A-Za-z])', kPercentEscape + r'\1', input) + input = re.sub(r'^(\s*namespace\s+[a-zA-Z_0-9]+\s*{)(\s*)$', r'\1}\2', input, flags = re.MULTILINE) # includes are not recognized, change them into comments so that the # formatter ignores them first, until we can figure out a way to format cpp @@ -78,6 +85,9 @@ def postprocess(output): output = re.sub(r'^(\s*namespace\s+[a-zA-Z_0-9]+\s*{)}(\s*)$', r'\1\2', output, flags = re.MULTILINE); output = re.sub(kPercentEscape, r'%', output) + output = re.sub(kDerefEscape, r'*', output) + output = re.sub(kAddressofEscape, r'&', output) + output = re.sub( r'^// InClUdE',r'#include', output, flags=re.MULTILINE) diff --git a/deps/v8/tools/turbolizer/README.md b/deps/v8/tools/turbolizer/README.md index c5ee729d64ddf1..fa804f65e94772 100644 --- a/deps/v8/tools/turbolizer/README.md +++ b/deps/v8/tools/turbolizer/README.md @@ -74,7 +74,6 @@ well as '--cpu' to specify which CPU to sample. Turbolizer build process ------------------------ -Turbolizer is currently migrating to TypeScript. The typescript sources reside in -tools/turbolizer/src, and the typescript compiler will put the JavaScript output -into tools/turbolizer/build/. The index.html file is set up to load the JavaScript -from that directory. +The typescript sources reside in tools/turbolizer/src, and the typescript +compiler will put the JavaScript output into tools/turbolizer/build/. The +index.html file is set up to load the JavaScript from that directory. diff --git a/deps/v8/tools/turbolizer/down-arrow.png b/deps/v8/tools/turbolizer/down-arrow.png new file mode 100644 index 00000000000000..39339f289a30b9 Binary files /dev/null and b/deps/v8/tools/turbolizer/down-arrow.png differ diff --git a/deps/v8/tools/turbolizer/index.html b/deps/v8/tools/turbolizer/index.html index 268e51e0200ec0..ea1b0b74d272b1 100644 --- a/deps/v8/tools/turbolizer/index.html +++ b/deps/v8/tools/turbolizer/index.html @@ -8,6 +8,7 @@ V8 Turbolizer + @@ -21,6 +22,12 @@ + + +
diff --git a/deps/v8/tools/turbolizer/src/constants.ts b/deps/v8/tools/turbolizer/src/constants.ts index ada39ae6b3b203..47dee8547ffb6a 100644 --- a/deps/v8/tools/turbolizer/src/constants.ts +++ b/deps/v8/tools/turbolizer/src/constants.ts @@ -14,6 +14,9 @@ export const GENERATED_PANE_ID = 'right'; export const DISASSEMBLY_PANE_ID = 'disassembly'; export const DISASSEMBLY_COLLAPSE_ID = 'disassembly-shrink'; export const DISASSEMBLY_EXPAND_ID = 'disassembly-expand'; +export const RANGES_PANE_ID = "ranges"; +export const RANGES_COLLAPSE_ID = "ranges-shrink"; +export const RANGES_EXPAND_ID = "ranges-expand"; export const UNICODE_BLOCK = '▋'; export const PROF_COLS = [ { perc: 0, col: { r: 255, g: 255, b: 255 } }, diff --git a/deps/v8/tools/turbolizer/src/graphmultiview.ts b/deps/v8/tools/turbolizer/src/graphmultiview.ts index 380f7df77db0a2..4f8f6339199543 100644 --- a/deps/v8/tools/turbolizer/src/graphmultiview.ts +++ b/deps/v8/tools/turbolizer/src/graphmultiview.ts @@ -38,6 +38,11 @@ export class GraphMultiView extends View { return pane; } + hide() { + this.hideCurrentPhase(); + super.hide(); + } + constructor(id, selectionBroker, sourceResolver) { super(id); const view = this; @@ -86,7 +91,9 @@ export class GraphMultiView extends View { } show() { - super.show(); + // Insert before is used so that the display is inserted before the + // resizer for the RangeView. + this.container.insertBefore(this.divNode, this.container.firstChild); this.initializeSelect(); const lastPhaseIndex = +window.sessionStorage.getItem("lastSelectedPhase"); const initialPhaseIndex = this.sourceResolver.repairPhaseId(lastPhaseIndex); diff --git a/deps/v8/tools/turbolizer/src/range-view.ts b/deps/v8/tools/turbolizer/src/range-view.ts new file mode 100644 index 00000000000000..17058e4f3b23e9 --- /dev/null +++ b/deps/v8/tools/turbolizer/src/range-view.ts @@ -0,0 +1,938 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +import { createElement } from "../src/util"; +import { SequenceView } from "../src/sequence-view"; +import { RegisterAllocation, Range, ChildRange, Interval } from "../src/source-resolver"; + +class Constants { + // Determines how many rows each div group holds for the purposes of + // hiding by syncHidden. + static readonly ROW_GROUP_SIZE = 20; + static readonly POSITIONS_PER_INSTRUCTION = 4; + static readonly FIXED_REGISTER_LABEL_WIDTH = 6; + + static readonly INTERVAL_TEXT_FOR_NONE = "none"; + static readonly INTERVAL_TEXT_FOR_CONST = "const"; + static readonly INTERVAL_TEXT_FOR_STACK = "stack:"; +} + +// This class holds references to the HTMLElements that represent each cell. +class Grid { + elements: Array>; + + constructor() { + this.elements = []; + } + + setRow(row: number, elementsRow: Array) { + this.elements[row] = elementsRow; + } + + getCell(row: number, column: number) { + return this.elements[row][column]; + } + + getInterval(row: number, column: number) { + // The cell is within an inner wrapper div which is within the interval div. + return this.getCell(row, column).parentElement.parentElement; + } +} + +// This class is used as a wrapper to hide the switch between the +// two different Grid objects used, one for each phase, +// before and after register allocation. +class GridAccessor { + sequenceView: SequenceView; + grids: Map; + + constructor(sequenceView: SequenceView) { + this.sequenceView = sequenceView; + this.grids = new Map(); + } + + private currentGrid() { + return this.grids.get(this.sequenceView.currentPhaseIndex); + } + + getAnyGrid() { + return this.grids.values().next().value; + } + + hasGrid() { + return this.grids.has(this.sequenceView.currentPhaseIndex); + } + + addGrid(grid: Grid) { + if (this.hasGrid()) console.warn("Overwriting existing Grid."); + this.grids.set(this.sequenceView.currentPhaseIndex, grid); + } + + getCell(row: number, column: number) { + return this.currentGrid().getCell(row, column); + } + + getInterval(row: number, column: number) { + return this.currentGrid().getInterval(row, column); + } +} + +// This class is used as a wrapper to access the interval HTMLElements +class IntervalElementsAccessor { + sequenceView: SequenceView; + map: Map>; + + constructor(sequenceView: SequenceView) { + this.sequenceView = sequenceView; + this.map = new Map>(); + } + + private currentIntervals() { + const intervals = this.map.get(this.sequenceView.currentPhaseIndex); + if (intervals == undefined) { + this.map.set(this.sequenceView.currentPhaseIndex, new Array()); + return this.currentIntervals(); + } + return intervals; + } + + addInterval(interval: HTMLElement) { + this.currentIntervals().push(interval); + } + + forEachInterval(callback: (phase: number, interval: HTMLElement) => void) { + for (const phase of this.map.keys()) { + for (const interval of this.map.get(phase)) { + callback(phase, interval); + } + } + } +} + +// A simple class used to hold two Range objects. This is used to allow the two fixed register live +// ranges of normal and deferred to be easily combined into a single row. +class RangePair { + ranges: [Range, Range]; + + constructor(ranges: [Range, Range]) { + this.ranges = ranges; + } + + forEachRange(callback: (range: Range) => void) { + this.ranges.forEach((range: Range) => { if (range) callback(range); }); + } +} + +// A number of css variables regarding dimensions of HTMLElements are required by RangeView. +class CSSVariables { + positionWidth: number; + blockBorderWidth: number; + + constructor() { + const getNumberValue = varName => { + return parseFloat(getComputedStyle(document.body) + .getPropertyValue(varName).match(/[+-]?\d+(\.\d+)?/g)[0]); + }; + this.positionWidth = getNumberValue("--range-position-width"); + this.blockBorderWidth = getNumberValue("--range-block-border"); + } +} + +// Store the required data from the blocks JSON. +class BlocksData { + blockBorders: Set; + blockInstructionCountMap: Map; + + constructor(blocks: Array) { + this.blockBorders = new Set(); + this.blockInstructionCountMap = new Map(); + for (const block of blocks) { + this.blockInstructionCountMap.set(block.id, block.instructions.length); + const maxInstructionInBlock = block.instructions[block.instructions.length - 1].id; + this.blockBorders.add(maxInstructionInBlock); + } + } + + isInstructionBorder(position: number) { + return ((position + 1) % Constants.POSITIONS_PER_INSTRUCTION) == 0; + } + + isBlockBorder(position: number) { + return this.isInstructionBorder(position) + && this.blockBorders.has(Math.floor(position / Constants.POSITIONS_PER_INSTRUCTION)); + } +} + +class Divs { + // Already existing. + container: HTMLElement; + resizerBar: HTMLElement; + snapper: HTMLElement; + + // Created by constructor. + content: HTMLElement; + // showOnLoad contains all content that may change depending on the JSON. + showOnLoad: HTMLElement; + xAxisLabel: HTMLElement; + yAxisLabel: HTMLElement; + registerHeaders: HTMLElement; + registers: HTMLElement; + + // Assigned from RangeView. + wholeHeader: HTMLElement; + positionHeaders: HTMLElement; + yAxis: HTMLElement; + grid: HTMLElement; + + constructor() { + this.container = document.getElementById("ranges"); + this.resizerBar = document.getElementById("resizer-ranges"); + this.snapper = document.getElementById("show-hide-ranges"); + + this.content = document.createElement("div"); + this.content.appendChild(this.elementForTitle()); + + this.showOnLoad = document.createElement("div"); + this.showOnLoad.style.visibility = "hidden"; + this.content.appendChild(this.showOnLoad); + + this.xAxisLabel = createElement("div", "range-header-label-x"); + this.xAxisLabel.innerText = "Blocks, Instructions, and Positions"; + this.showOnLoad.appendChild(this.xAxisLabel); + this.yAxisLabel = createElement("div", "range-header-label-y"); + this.yAxisLabel.innerText = "Registers"; + this.showOnLoad.appendChild(this.yAxisLabel); + + this.registerHeaders = createElement("div", "range-register-labels"); + this.registers = createElement("div", "range-registers"); + this.registerHeaders.appendChild(this.registers); + } + + elementForTitle() { + const titleEl = createElement("div", "range-title-div"); + const titleBar = createElement("div", "range-title"); + titleBar.appendChild(createElement("div", "", "Live Ranges")); + const titleHelp = createElement("div", "range-title-help", "?"); + titleHelp.title = "Each row represents a single TopLevelLiveRange (or two if deferred exists)." + + "\nEach interval belongs to a LiveRange contained within that row's TopLevelLiveRange." + + "\nAn interval is identified by i, the index of the LiveRange within the TopLevelLiveRange," + + "\nand j, the index of the interval within the LiveRange, to give i:j."; + titleEl.appendChild(titleBar); + titleEl.appendChild(titleHelp); + return titleEl; + } +} + +class Helper { + static virtualRegisterName(registerIndex: string) { + return "v" + registerIndex; + } + + static fixedRegisterName(range: Range) { + return range.child_ranges[0].op.text; + } + + static getPositionElementsFromInterval(interval: HTMLElement) { + return interval.children[1].children; + } + + static forEachFixedRange(source: RegisterAllocation, row: number, + callback: (registerIndex: string, row: number, registerName: string, + ranges: RangePair) => void) { + + const forEachRangeInMap = (rangeMap: Map) => { + // There are two fixed live ranges for each register, one for normal, another for deferred. + // These are combined into a single row. + const fixedRegisterMap = new Map(); + for (const [registerIndex, range] of rangeMap) { + const registerName = this.fixedRegisterName(range); + if (fixedRegisterMap.has(registerName)) { + const entry = fixedRegisterMap.get(registerName); + entry.ranges[1] = range; + // Only use the deferred register index if no normal index exists. + if (!range.is_deferred) { + entry.registerIndex = parseInt(registerIndex, 10); + } + } else { + fixedRegisterMap.set(registerName, {ranges: [range, undefined], + registerIndex: parseInt(registerIndex, 10)}); + } + } + // Sort the registers by number. + const sortedMap = new Map([...fixedRegisterMap.entries()].sort(([nameA, _], [nameB, __]) => { + // Larger numbers create longer strings. + if (nameA.length > nameB.length) return 1; + if (nameA.length < nameB.length) return -1; + // Sort lexicographically if same length. + if (nameA > nameB) return 1; + if (nameA < nameB) return -1; + return 0; + })); + for (const [registerName, {ranges, registerIndex}] of sortedMap) { + callback("" + (-registerIndex - 1), row, registerName, new RangePair(ranges)); + ++row; + } + }; + + forEachRangeInMap(source.fixedLiveRanges); + forEachRangeInMap(source.fixedDoubleLiveRanges); + + return row; + } +} + +class RowConstructor { + view: RangeView; + + constructor(view: RangeView) { + this.view = view; + } + + // Constructs the row of HTMLElements for grid while providing a callback for each position + // depending on whether that position is the start of an interval or not. + // RangePair is used to allow the two fixed register live ranges of normal and deferred to be + // easily combined into a single row. + construct(grid: Grid, row: number, registerIndex: string, ranges: RangePair, + getElementForEmptyPosition: (position: number) => HTMLElement, + callbackForInterval: (position: number, interval: HTMLElement) => void) { + const positionArray = new Array(this.view.numPositions); + // Construct all of the new intervals. + const intervalMap = this.elementsForIntervals(registerIndex, ranges); + for (let position = 0; position < this.view.numPositions; ++position) { + const interval = intervalMap.get(position); + if (interval == undefined) { + positionArray[position] = getElementForEmptyPosition(position); + } else { + callbackForInterval(position, interval); + this.view.intervalsAccessor.addInterval(interval); + const intervalPositionElements = Helper.getPositionElementsFromInterval(interval); + for (let j = 0; j < intervalPositionElements.length; ++j) { + // Point positionsArray to the new elements. + positionArray[position + j] = (intervalPositionElements[j] as HTMLElement); + } + position += intervalPositionElements.length - 1; + } + } + grid.setRow(row, positionArray); + ranges.forEachRange((range: Range) => this.setUses(grid, row, range)); + } + + // This is the main function used to build new intervals. + // Returns a map of LifeTimePositions to intervals. + private elementsForIntervals(registerIndex: string, ranges: RangePair) { + const intervalMap = new Map(); + let tooltip = ""; + ranges.forEachRange((range: Range) => { + for (const childRange of range.child_ranges) { + switch (childRange.type) { + case "none": + tooltip = Constants.INTERVAL_TEXT_FOR_NONE; + break; + case "spill_range": + tooltip = Constants.INTERVAL_TEXT_FOR_STACK + registerIndex; + break; + default: + if (childRange.op.type == "constant") { + tooltip = Constants.INTERVAL_TEXT_FOR_CONST; + } else { + if (childRange.op.text) { + tooltip = childRange.op.text; + } else { + tooltip = childRange.op; + } + } + break; + } + childRange.intervals.forEach((intervalNums, index) => { + const interval = new Interval(intervalNums); + const intervalEl = this.elementForInterval(childRange, interval, tooltip, + index, range.is_deferred); + intervalMap.set(interval.start, intervalEl); + }); + } + }); + return intervalMap; + } + + private elementForInterval(childRange: ChildRange, interval: Interval, + tooltip: string, index: number, isDeferred: boolean): HTMLElement { + const intervalEl = createElement("div", "range-interval"); + const title = childRange.id + ":" + index + " " + tooltip; + intervalEl.setAttribute("title", isDeferred ? "deferred: " + title : title); + this.setIntervalColor(intervalEl, tooltip); + const intervalInnerWrapper = createElement("div", "range-interval-wrapper"); + intervalEl.style.gridColumn = (interval.start + 1) + " / " + (interval.end + 1); + intervalInnerWrapper.style.gridTemplateColumns = "repeat(" + (interval.end - interval.start) + + ",calc(" + this.view.cssVariables.positionWidth + "ch + " + + this.view.cssVariables.blockBorderWidth + "px)"; + const intervalTextEl = this.elementForIntervalString(tooltip, interval.end - interval.start); + intervalEl.appendChild(intervalTextEl); + for (let i = interval.start; i < interval.end; ++i) { + const classes = "range-position range-interval-position range-empty" + + (this.view.blocksData.isBlockBorder(i) ? " range-block-border" : + this.view.blocksData.isInstructionBorder(i) ? " range-instr-border" : ""); + const positionEl = createElement("div", classes, "_"); + positionEl.style.gridColumn = (i - interval.start + 1) + ""; + intervalInnerWrapper.appendChild(positionEl); + } + intervalEl.appendChild(intervalInnerWrapper); + return intervalEl; + } + + private setIntervalColor(interval: HTMLElement, tooltip: string) { + if (tooltip.includes(Constants.INTERVAL_TEXT_FOR_NONE)) return; + if (tooltip.includes(Constants.INTERVAL_TEXT_FOR_STACK + "-")) { + interval.style.backgroundColor = "rgb(250, 158, 168)"; + } else if (tooltip.includes(Constants.INTERVAL_TEXT_FOR_STACK)) { + interval.style.backgroundColor = "rgb(250, 158, 100)"; + } else if (tooltip.includes(Constants.INTERVAL_TEXT_FOR_CONST)) { + interval.style.backgroundColor = "rgb(153, 158, 230)"; + } else { + interval.style.backgroundColor = "rgb(153, 220, 168)"; + } + } + + private elementForIntervalString(tooltip: string, numCells: number) { + const spanEl = createElement("span", "range-interval-text"); + this.setIntervalString(spanEl, tooltip, numCells); + return spanEl; + } + + // Each interval displays a string of information about it. + private setIntervalString(spanEl: HTMLElement, tooltip: string, numCells: number) { + const spacePerCell = this.view.cssVariables.positionWidth; + // One character space is removed to accommodate for padding. + const spaceAvailable = (numCells * spacePerCell) - 0.5; + let str = tooltip + ""; + const length = tooltip.length; + spanEl.style.width = null; + let paddingLeft = null; + // Add padding if possible + if (length <= spaceAvailable) { + paddingLeft = (length == spaceAvailable) ? "0.5ch" : "1ch"; + } else { + str = ""; + } + spanEl.style.paddingTop = null; + spanEl.style.paddingLeft = paddingLeft; + spanEl.innerHTML = str; + } + + private setUses(grid: Grid, row: number, range: Range) { + for (const liveRange of range.child_ranges) { + if (liveRange.uses) { + for (const use of liveRange.uses) { + grid.getCell(row, use).classList.toggle("range-use", true); + } + } + } + } +} + +class RangeViewConstructor { + view: RangeView; + gridTemplateColumns: string; + grid: Grid; + + // Group the rows in divs to make hiding/showing divs more efficient. + currentGroup: HTMLElement; + currentPlaceholderGroup: HTMLElement; + + constructor(rangeView: RangeView) { + this.view = rangeView; + } + + construct() { + this.gridTemplateColumns = "repeat(" + this.view.numPositions + + ",calc(" + this.view.cssVariables.positionWidth + "ch + " + + this.view.cssVariables.blockBorderWidth + "px)"; + + this.grid = new Grid(); + this.view.gridAccessor.addGrid(this.grid); + + this.view.divs.wholeHeader = this.elementForHeader(); + this.view.divs.showOnLoad.appendChild(this.view.divs.wholeHeader); + + const gridContainer = document.createElement("div"); + this.view.divs.grid = this.elementForGrid(); + this.view.divs.yAxis = createElement("div", "range-y-axis"); + this.view.divs.yAxis.appendChild(this.view.divs.registerHeaders); + this.view.divs.yAxis.onscroll = () => { + this.view.scrollHandler.syncScroll(ToSync.TOP, this.view.divs.yAxis, this.view.divs.grid); + this.view.scrollHandler.saveScroll(); + }; + gridContainer.appendChild(this.view.divs.yAxis); + gridContainer.appendChild(this.view.divs.grid); + this.view.divs.showOnLoad.appendChild(gridContainer); + + this.resetGroups(); + let row = 0; + row = this.addVirtualRanges(row); + this.addFixedRanges(row); + } + + // The following three functions are for constructing the groups which the rows are contained + // within and which make up the grid. This is so as to allow groups of rows to easily be displayed + // and hidden for performance reasons. As rows are constructed, they are added to the currentGroup + // div. Each row in currentGroup is matched with an equivalent placeholder row in + // currentPlaceholderGroup that will be shown when currentGroup is hidden so as to maintain the + // dimensions and scroll positions of the grid. + + private resetGroups () { + this.currentGroup = createElement("div", "range-positions-group range-hidden"); + this.currentPlaceholderGroup = createElement("div", "range-positions-group"); + } + + private appendGroupsToGrid() { + this.view.divs.grid.appendChild(this.currentPlaceholderGroup); + this.view.divs.grid.appendChild(this.currentGroup); + } + + private addRowToGroup(row: number, rowEl: HTMLElement) { + this.currentGroup.appendChild(rowEl); + this.currentPlaceholderGroup + .appendChild(createElement("div", "range-positions range-positions-placeholder", "_")); + if ((row + 1) % Constants.ROW_GROUP_SIZE == 0) { + this.appendGroupsToGrid(); + this.resetGroups(); + } + } + + private addVirtualRanges(row: number) { + const source = this.view.sequenceView.sequence.register_allocation; + for (const [registerIndex, range] of source.liveRanges) { + const registerName = Helper.virtualRegisterName(registerIndex); + const registerEl = this.elementForVirtualRegister(registerName); + this.addRowToGroup(row, this.elementForRow(row, registerIndex, + new RangePair([range, undefined]))); + this.view.divs.registers.appendChild(registerEl); + ++row; + } + return row; + } + + private addFixedRanges(row: number) { + row = Helper.forEachFixedRange(this.view.sequenceView.sequence.register_allocation, row, + (registerIndex: string, row: number, + registerName: string, ranges: RangePair) => { + const registerEl = this.elementForFixedRegister(registerName); + this.addRowToGroup(row, this.elementForRow(row, registerIndex, ranges)); + this.view.divs.registers.appendChild(registerEl); + }); + if (row % Constants.ROW_GROUP_SIZE != 0) { + this.appendGroupsToGrid(); + } + } + + // Each row of positions and intervals associated with a register is contained in a single + // HTMLElement. RangePair is used to allow the two fixed register live ranges of normal and + // deferred to be easily combined into a single row. + private elementForRow(row: number, registerIndex: string, ranges: RangePair) { + const rowEl = createElement("div", "range-positions"); + rowEl.style.gridTemplateColumns = this.gridTemplateColumns; + + const getElementForEmptyPosition = (position: number) => { + const blockBorder = this.view.blocksData.isBlockBorder(position); + const classes = "range-position range-empty " + + (blockBorder ? "range-block-border" : + this.view.blocksData.isInstructionBorder(position) ? "range-instr-border" + : "range-position-border"); + const positionEl = createElement("div", classes, "_"); + positionEl.style.gridColumn = (position + 1) + ""; + rowEl.appendChild(positionEl); + return positionEl; + }; + + const callbackForInterval = (_, interval: HTMLElement) => { + rowEl.appendChild(interval); + }; + + this.view.rowConstructor.construct(this.grid, row, registerIndex, ranges, + getElementForEmptyPosition, callbackForInterval); + return rowEl; + } + + private elementForVirtualRegister(registerName: string) { + const regEl = createElement("div", "range-reg", registerName); + regEl.setAttribute("title", registerName); + return regEl; + } + + private elementForFixedRegister(registerName: string) { + let text = registerName; + const span = "".padEnd(Constants.FIXED_REGISTER_LABEL_WIDTH - text.length, "_"); + text = "HW - " + span + "" + text; + const regEl = createElement("div", "range-reg"); + regEl.innerHTML = text; + regEl.setAttribute("title", registerName); + return regEl; + } + + // The header element contains the three headers for the LifeTimePosition axis. + private elementForHeader() { + const headerEl = createElement("div", "range-header"); + this.view.divs.positionHeaders = createElement("div", "range-position-labels"); + + this.view.divs.positionHeaders.appendChild(this.elementForBlockHeader()); + this.view.divs.positionHeaders.appendChild(this.elementForInstructionHeader()); + this.view.divs.positionHeaders.appendChild(this.elementForPositionHeader()); + + headerEl.appendChild(this.view.divs.positionHeaders); + headerEl.onscroll = () => { + this.view.scrollHandler.syncScroll(ToSync.LEFT, + this.view.divs.wholeHeader, this.view.divs.grid); + this.view.scrollHandler.saveScroll(); + }; + return headerEl; + } + + // The LifeTimePosition axis shows three headers, for positions, instructions, and blocks. + + private elementForBlockHeader() { + const headerEl = createElement("div", "range-block-ids"); + headerEl.style.gridTemplateColumns = this.gridTemplateColumns; + + const elementForBlockIndex = (index: number, firstInstruction: number, instrCount: number) => { + const str = "B" + index; + const element = + createElement("div", "range-block-id range-header-element range-block-border", str); + element.setAttribute("title", str); + const firstGridCol = (firstInstruction * Constants.POSITIONS_PER_INSTRUCTION) + 1; + const lastGridCol = firstGridCol + (instrCount * Constants.POSITIONS_PER_INSTRUCTION); + element.style.gridColumn = firstGridCol + " / " + lastGridCol; + return element; + }; + + let blockIndex = 0; + for (let i = 0; i < this.view.sequenceView.numInstructions;) { + const instrCount = this.view.blocksData.blockInstructionCountMap.get(blockIndex); + headerEl.appendChild(elementForBlockIndex(blockIndex, i, instrCount)); + ++blockIndex; + i += instrCount; + } + return headerEl; + } + + private elementForInstructionHeader() { + const headerEl = createElement("div", "range-instruction-ids"); + headerEl.style.gridTemplateColumns = this.gridTemplateColumns; + + const elementForInstructionIndex = (index: number, isBlockBorder: boolean) => { + const classes = "range-instruction-id range-header-element " + + (isBlockBorder ? "range-block-border" : "range-instr-border"); + const element = createElement("div", classes, "" + index); + element.setAttribute("title", "" + index); + const firstGridCol = (index * Constants.POSITIONS_PER_INSTRUCTION) + 1; + element.style.gridColumn = firstGridCol + " / " + + (firstGridCol + Constants.POSITIONS_PER_INSTRUCTION); + return element; + }; + + for (let i = 0; i < this.view.sequenceView.numInstructions; ++i) { + const blockBorder = this.view.blocksData.blockBorders.has(i); + headerEl.appendChild(elementForInstructionIndex(i, blockBorder)); + } + return headerEl; + } + + private elementForPositionHeader() { + const headerEl = createElement("div", "range-positions range-positions-header"); + headerEl.style.gridTemplateColumns = this.gridTemplateColumns; + + const elementForPositionIndex = (index: number, isBlockBorder: boolean) => { + const classes = "range-position range-header-element " + + (isBlockBorder ? "range-block-border" + : this.view.blocksData.isInstructionBorder(index) ? "range-instr-border" + : "range-position-border"); + const element = createElement("div", classes, "" + index); + element.setAttribute("title", "" + index); + return element; + }; + + for (let i = 0; i < this.view.numPositions; ++i) { + headerEl.appendChild(elementForPositionIndex(i, this.view.blocksData.isBlockBorder(i))); + } + return headerEl; + } + + private elementForGrid() { + const gridEl = createElement("div", "range-grid"); + gridEl.onscroll = () => { + this.view.scrollHandler.syncScroll(ToSync.TOP, this.view.divs.grid, this.view.divs.yAxis); + this.view.scrollHandler.syncScroll(ToSync.LEFT, + this.view.divs.grid, this.view.divs.wholeHeader); + this.view.scrollHandler.saveScroll(); + }; + return gridEl; + } +} + +// Handles the work required when the phase is changed. +// Between before and after register allocation for example. +class PhaseChangeHandler { + view: RangeView; + + constructor(view: RangeView) { + this.view = view; + } + + // Called when the phase view is switched between before and after register allocation. + phaseChange() { + if (!this.view.gridAccessor.hasGrid()) { + // If this phase view has not been seen yet then the intervals need to be constructed. + this.addNewIntervals(); + } + // Show all intervals pertaining to the current phase view. + this.view.intervalsAccessor.forEachInterval((phase, interval) => { + interval.classList.toggle("range-hidden", phase != this.view.sequenceView.currentPhaseIndex); + }); + } + + private addNewIntervals() { + // All Grids should point to the same HTMLElement for empty cells in the grid, + // so as to avoid duplication. The current Grid is used to retrieve these elements. + const currentGrid = this.view.gridAccessor.getAnyGrid(); + const newGrid = new Grid(); + this.view.gridAccessor.addGrid(newGrid); + const source = this.view.sequenceView.sequence.register_allocation; + let row = 0; + for (const [registerIndex, range] of source.liveRanges) { + this.addnewIntervalsInRange(currentGrid, newGrid, row, registerIndex, + new RangePair([range, undefined])); + ++row; + } + Helper.forEachFixedRange(this.view.sequenceView.sequence.register_allocation, row, + (registerIndex, row, _, ranges) => { + this.addnewIntervalsInRange(currentGrid, newGrid, row, registerIndex, ranges); + }); + } + + private addnewIntervalsInRange(currentGrid: Grid, newGrid: Grid, row: number, + registerIndex: string, ranges: RangePair) { + const numReplacements = new Map(); + + const getElementForEmptyPosition = (position: number) => { + return currentGrid.getCell(row, position); + }; + + // Inserts new interval beside existing intervals. + const callbackForInterval = (position: number, interval: HTMLElement) => { + // Overlapping intervals are placed beside each other and the relevant ones displayed. + let currentInterval = currentGrid.getInterval(row, position); + // The number of intervals already inserted is tracked so that the inserted intervals + // are ordered correctly. + const intervalsAlreadyInserted = numReplacements.get(currentInterval); + numReplacements.set(currentInterval, intervalsAlreadyInserted ? intervalsAlreadyInserted + 1 + : 1); + if (intervalsAlreadyInserted) { + for (let j = 0; j < intervalsAlreadyInserted; ++j) { + currentInterval = (currentInterval.nextElementSibling as HTMLElement); + } + } + interval.classList.add("range-hidden"); + currentInterval.insertAdjacentElement('afterend', interval); + }; + + this.view.rowConstructor.construct(newGrid, row, registerIndex, ranges, + getElementForEmptyPosition, callbackForInterval); + } +} + +enum ToSync { LEFT, TOP } + +// Handles saving and syncing the scroll positions of the grid. +class ScrollHandler { + divs: Divs; + scrollTop: number; + scrollLeft: number; + scrollTopTimeout: NodeJS.Timeout; + scrollLeftTimeout: NodeJS.Timeout; + scrollTopFunc: (this: GlobalEventHandlers, ev: Event) => any; + scrollLeftFunc: (this: GlobalEventHandlers, ev: Event) => any; + + constructor(divs: Divs) { + this.divs = divs; + } + + // This function is used to hide the rows which are not currently in view and + // so reduce the performance cost of things like hit tests and scrolling. + syncHidden() { + + const getOffset = (rowEl: HTMLElement, placeholderRowEl: HTMLElement, isHidden: boolean) => { + return isHidden ? placeholderRowEl.offsetTop : rowEl.offsetTop; + }; + + const toHide = new Array<[HTMLElement, HTMLElement]>(); + + const sampleCell = this.divs.registers.children[1] as HTMLElement; + const buffer = 2 * sampleCell.clientHeight; + const min = this.divs.grid.offsetTop + this.divs.grid.scrollTop - buffer; + const max = min + this.divs.grid.clientHeight + buffer; + + // The rows are grouped by being contained within a group div. This is so as to allow + // groups of rows to easily be displayed and hidden with less of a performance cost. + // Each row in the mainGroup div is matched with an equivalent placeholder row in + // the placeholderGroup div that will be shown when mainGroup is hidden so as to maintain + // the dimensions and scroll positions of the grid. + + const rangeGroups = this.divs.grid.children; + for (let i = 1; i < rangeGroups.length; i += 2) { + const mainGroup = rangeGroups[i] as HTMLElement; + const placeholderGroup = rangeGroups[i - 1] as HTMLElement; + const isHidden = mainGroup.classList.contains("range-hidden"); + // The offsets are used to calculate whether the group is in view. + const offsetMin = getOffset(mainGroup.firstChild as HTMLElement, + placeholderGroup.firstChild as HTMLElement, isHidden); + const offsetMax = getOffset(mainGroup.lastChild as HTMLElement, + placeholderGroup.lastChild as HTMLElement, isHidden); + if (offsetMax > min && offsetMin < max) { + if (isHidden) { + // Show the rows, hide the placeholders. + mainGroup.classList.toggle("range-hidden", false); + placeholderGroup.classList.toggle("range-hidden", true); + } + } else if (!isHidden) { + // Only hide the rows once the new rows are shown so that scrollLeft is not lost. + toHide.push([mainGroup, placeholderGroup]); + } + } + for (const [mainGroup, placeholderGroup] of toHide) { + // Hide the rows, show the placeholders. + mainGroup.classList.toggle("range-hidden", true); + placeholderGroup.classList.toggle("range-hidden", false); + } + } + + // This function is required to keep the axes labels in line with the grid + // content when scrolling. + syncScroll(toSync: ToSync, source: HTMLElement, target: HTMLElement) { + // Continually delay timeout until scrolling has stopped. + toSync == ToSync.TOP ? clearTimeout(this.scrollTopTimeout) + : clearTimeout(this.scrollLeftTimeout); + if (target.onscroll) { + if (toSync == ToSync.TOP) this.scrollTopFunc = target.onscroll; + else this.scrollLeftFunc = target.onscroll; + } + // Clear onscroll to prevent the target syncing back with the source. + target.onscroll = null; + + if (toSync == ToSync.TOP) target.scrollTop = source.scrollTop; + else target.scrollLeft = source.scrollLeft; + + // Only show / hide the grid content once scrolling has stopped. + if (toSync == ToSync.TOP) { + this.scrollTopTimeout = setTimeout(() => { + target.onscroll = this.scrollTopFunc; + this.syncHidden(); + }, 500); + } else { + this.scrollLeftTimeout = setTimeout(() => { + target.onscroll = this.scrollLeftFunc; + this.syncHidden(); + }, 500); + } + } + + saveScroll() { + this.scrollLeft = this.divs.grid.scrollLeft; + this.scrollTop = this.divs.grid.scrollTop; + } + + restoreScroll() { + if (this.scrollLeft) { + this.divs.grid.scrollLeft = this.scrollLeft; + this.divs.grid.scrollTop = this.scrollTop; + } + } +} + +// RangeView displays the live range data as passed in by SequenceView. +// The data is displayed in a grid format, with the fixed and virtual registers +// along one axis, and the LifeTimePositions along the other. Each LifeTimePosition +// is part of an Instruction in SequenceView, which itself is part of an Instruction +// Block. The live ranges are displayed as intervals, each belonging to a register, +// and spanning across a certain range of LifeTimePositions. +// When the phase being displayed changes between before register allocation and +// after register allocation, only the intervals need to be changed. +export class RangeView { + sequenceView: SequenceView; + + initialized: boolean; + isShown: boolean; + numPositions: number; + cssVariables: CSSVariables; + divs: Divs; + rowConstructor: RowConstructor; + phaseChangeHandler: PhaseChangeHandler; + scrollHandler: ScrollHandler; + blocksData: BlocksData; + intervalsAccessor: IntervalElementsAccessor; + gridAccessor: GridAccessor; + + constructor(sequence: SequenceView) { + this.initialized = false; + this.isShown = false; + this.sequenceView = sequence; + } + + initializeContent(blocks: Array) { + if (!this.initialized) { + this.gridAccessor = new GridAccessor(this.sequenceView); + this.intervalsAccessor = new IntervalElementsAccessor(this.sequenceView); + this.cssVariables = new CSSVariables(); + this.blocksData = new BlocksData(blocks); + this.divs = new Divs(); + this.scrollHandler = new ScrollHandler(this.divs); + this.numPositions = this.sequenceView.numInstructions * Constants.POSITIONS_PER_INSTRUCTION; + this.rowConstructor = new RowConstructor(this); + const constructor = new RangeViewConstructor(this); + constructor.construct(); + this.phaseChangeHandler = new PhaseChangeHandler(this); + this.initialized = true; + } else { + // If the RangeView has already been initialized then the phase must have + // been changed. + this.phaseChangeHandler.phaseChange(); + } + } + + show() { + if (!this.isShown) { + this.isShown = true; + this.divs.container.appendChild(this.divs.content); + this.divs.resizerBar.style.visibility = "visible"; + this.divs.container.style.visibility = "visible"; + this.divs.snapper.style.visibility = "visible"; + // Dispatch a resize event to ensure that the + // panel is shown. + window.dispatchEvent(new Event('resize')); + + setTimeout(() => { + this.scrollHandler.restoreScroll(); + this.scrollHandler.syncHidden(); + this.divs.showOnLoad.style.visibility = "visible"; + }, 100); + } + } + + hide() { + if (this.initialized) { + this.isShown = false; + this.divs.container.removeChild(this.divs.content); + this.divs.resizerBar.style.visibility = "hidden"; + this.divs.container.style.visibility = "hidden"; + this.divs.snapper.style.visibility = "hidden"; + this.divs.showOnLoad.style.visibility = "hidden"; + } else { + window.document.getElementById('ranges').style.visibility = "hidden"; + } + // Dispatch a resize event to ensure that the + // panel is hidden. + window.dispatchEvent(new Event('resize')); + } + + onresize() { + if (this.isShown) this.scrollHandler.syncHidden(); + } +} diff --git a/deps/v8/tools/turbolizer/src/resizer.ts b/deps/v8/tools/turbolizer/src/resizer.ts index 4bd771f73136a1..ce0519398bb3ea 100644 --- a/deps/v8/tools/turbolizer/src/resizer.ts +++ b/deps/v8/tools/turbolizer/src/resizer.ts @@ -11,6 +11,8 @@ class Snapper { sourceCollapse: HTMLElement; disassemblyExpand: HTMLElement; disassemblyCollapse: HTMLElement; + rangesExpand: HTMLElement; + rangesCollapse: HTMLElement; constructor(resizer: Resizer) { this.resizer = resizer; @@ -18,6 +20,8 @@ class Snapper { this.sourceCollapse = document.getElementById(C.SOURCE_COLLAPSE_ID); this.disassemblyExpand = document.getElementById(C.DISASSEMBLY_EXPAND_ID); this.disassemblyCollapse = document.getElementById(C.DISASSEMBLY_COLLAPSE_ID); + this.rangesExpand = document.getElementById(C.RANGES_EXPAND_ID); + this.rangesCollapse = document.getElementById(C.RANGES_COLLAPSE_ID); document.getElementById("show-hide-source").addEventListener("click", () => { this.resizer.resizerLeft.classed("snapped", !this.resizer.resizerLeft.classed("snapped")); @@ -29,13 +33,20 @@ class Snapper { this.setDisassemblyExpanded(!this.disassemblyExpand.classList.contains("invisible")); this.resizer.updatePanes(); }); + document.getElementById("show-hide-ranges").addEventListener("click", () => { + this.resizer.resizerRanges.classed("snapped", !this.resizer.resizerRanges.classed("snapped")); + this.setRangesExpanded(!this.rangesExpand.classList.contains("invisible")); + this.resizer.updatePanes(); + }); } restoreExpandedState(): void { this.resizer.resizerLeft.classed("snapped", window.sessionStorage.getItem("expandedState-source") == "false"); this.resizer.resizerRight.classed("snapped", window.sessionStorage.getItem("expandedState-disassembly") == "false"); + this.resizer.resizerRanges.classed("snapped", window.sessionStorage.getItem("expandedState-ranges") == "false"); this.setSourceExpanded(this.getLastExpandedState("source", true)); this.setDisassemblyExpanded(this.getLastExpandedState("disassembly", true)); + this.setRangesExpanded(this.getLastExpandedState("ranges", true)); } getLastExpandedState(type: string, defaultState: boolean): boolean { @@ -48,6 +59,7 @@ class Snapper { window.sessionStorage.setItem("expandedState-source", `${isSourceExpanded}`); this.sourceExpand.classList.toggle("invisible", isSourceExpanded); this.sourceCollapse.classList.toggle("invisible", !isSourceExpanded); + document.getElementById("show-hide-ranges").style.marginLeft = isSourceExpanded ? null : "40px"; } setSourceExpanded(isSourceExpanded: boolean): void { @@ -65,30 +77,53 @@ class Snapper { this.disassemblyUpdate(isDisassemblyExpanded); this.resizer.updateRightWidth(); } + + rangesUpdate(isRangesExpanded: boolean): void { + window.sessionStorage.setItem("expandedState-ranges", `${isRangesExpanded}`); + this.rangesExpand.classList.toggle("invisible", isRangesExpanded); + this.rangesCollapse.classList.toggle("invisible", !isRangesExpanded); + } + + setRangesExpanded(isRangesExpanded: boolean): void { + this.rangesUpdate(isRangesExpanded); + this.resizer.updateRanges(); + } } export class Resizer { snapper: Snapper; deadWidth: number; + deadHeight: number; left: HTMLElement; right: HTMLElement; + ranges: HTMLElement; + middle: HTMLElement; sepLeft: number; sepRight: number; + sepRangesHeight: number; panesUpdatedCallback: () => void; resizerRight: d3.Selection; resizerLeft: d3.Selection; + resizerRanges: d3.Selection; private readonly SOURCE_PANE_DEFAULT_PERCENT = 1 / 4; private readonly DISASSEMBLY_PANE_DEFAULT_PERCENT = 3 / 4; + private readonly RANGES_PANE_HEIGHT_DEFAULT_PERCENT = 3 / 4; + private readonly RESIZER_RANGES_HEIGHT_BUFFER_PERCENTAGE = 5; + private readonly RESIZER_SIZE = document.getElementById("resizer-ranges").offsetHeight; - constructor(panesUpdatedCallback: () => void, deadWidth: number) { + constructor(panesUpdatedCallback: () => void, deadWidth: number, deadHeight: number) { const resizer = this; resizer.panesUpdatedCallback = panesUpdatedCallback; resizer.deadWidth = deadWidth; + resizer.deadHeight = deadHeight; resizer.left = document.getElementById(C.SOURCE_PANE_ID); resizer.right = document.getElementById(C.GENERATED_PANE_ID); + resizer.ranges = document.getElementById(C.RANGES_PANE_ID); + resizer.middle = document.getElementById("middle"); resizer.resizerLeft = d3.select('#resizer-left'); resizer.resizerRight = d3.select('#resizer-right'); + resizer.resizerRanges = d3.select('#resizer-ranges'); // Set default sizes, if they weren't set. if (window.sessionStorage.getItem("source-pane-percent") === null) { window.sessionStorage.setItem("source-pane-percent", `${this.SOURCE_PANE_DEFAULT_PERCENT}`); @@ -96,8 +131,11 @@ export class Resizer { if (window.sessionStorage.getItem("disassembly-pane-percent") === null) { window.sessionStorage.setItem("disassembly-pane-percent", `${this.DISASSEMBLY_PANE_DEFAULT_PERCENT}`); } + if (window.sessionStorage.getItem("ranges-pane-height-percent") === null) { + window.sessionStorage.setItem("ranges-pane-height-percent", `${this.RANGES_PANE_HEIGHT_DEFAULT_PERCENT}`); + } - this.updateWidths(); + this.updateSizes(); const dragResizeLeft = d3.drag() .on('drag', function () { @@ -151,8 +189,35 @@ export class Resizer { resizer.resizerRight.classed("dragged", false); }); resizer.resizerRight.call(dragResizeRight); + + const dragResizeRanges = d3.drag() + .on('drag', function () { + const y = d3.mouse(this.parentElement)[1]; + resizer.sepRangesHeight = Math.max(100, Math.min(y, window.innerHeight) - resizer.RESIZER_RANGES_HEIGHT_BUFFER_PERCENTAGE); + resizer.updatePanes(); + }) + .on('start', function () { + resizer.resizerRanges.classed("dragged", true); + }) + .on('end', function () { + // If the panel is close enough to the bottom, treat it as if it was pulled all the way to the bottom. + const y = d3.mouse(this.parentElement)[1]; + if (y >= (window.innerHeight - deadHeight)) { + resizer.sepRangesHeight = window.innerHeight; + resizer.updatePanes(); + } + // Snap if dragged all the way to the bottom. + resizer.resizerRanges.classed("snapped", resizer.sepRangesHeight >= window.innerHeight - 1); + if (!resizer.isRangesSnapped()) { + window.sessionStorage.setItem("ranges-pane-height-percent", `${resizer.sepRangesHeight / window.innerHeight}`); + } + resizer.snapper.setRangesExpanded(!resizer.isRangesSnapped()); + resizer.resizerRanges.classed("dragged", false); + }); + resizer.resizerRanges.call(dragResizeRanges); + window.onresize = function () { - resizer.updateWidths(); + resizer.updateSizes(); resizer.updatePanes(); }; resizer.snapper = new Snapper(resizer); @@ -167,15 +232,70 @@ export class Resizer { return this.resizerRight.classed("snapped"); } + isRangesSnapped() { + return this.resizerRanges.classed("snapped"); + } + + updateRangesPane() { + const clientHeight = window.innerHeight; + const rangesIsHidden = this.ranges.style.visibility == "hidden"; + let resizerSize = this.RESIZER_SIZE; + if (rangesIsHidden) { + resizerSize = 0; + this.sepRangesHeight = clientHeight; + } + + const rangeHeight = clientHeight - this.sepRangesHeight; + this.ranges.style.height = rangeHeight + 'px'; + const panelWidth = this.sepRight - this.sepLeft - (2 * resizerSize); + this.ranges.style.width = panelWidth + 'px'; + const multiview = document.getElementById("multiview"); + if (multiview && multiview.style) { + multiview.style.height = (this.sepRangesHeight - resizerSize) + 'px'; + multiview.style.width = panelWidth + 'px'; + } + + // Resize the range grid and labels. + const rangeGrid = (this.ranges.getElementsByClassName("range-grid")[0] as HTMLElement); + if (rangeGrid) { + const yAxis = (this.ranges.getElementsByClassName("range-y-axis")[0] as HTMLElement); + const rangeHeader = (this.ranges.getElementsByClassName("range-header")[0] as HTMLElement); + + const gridWidth = panelWidth - yAxis.clientWidth; + rangeGrid.style.width = Math.floor(gridWidth - 1) + 'px'; + // Take live ranges' right scrollbar into account. + rangeHeader.style.width = (gridWidth - rangeGrid.offsetWidth + rangeGrid.clientWidth - 1) + 'px'; + // Set resizer to horizontal. + this.resizerRanges.style('width', panelWidth + 'px'); + + const rangeTitle = (this.ranges.getElementsByClassName("range-title-div")[0] as HTMLElement); + const rangeHeaderLabel = (this.ranges.getElementsByClassName("range-header-label-x")[0] as HTMLElement); + const gridHeight = rangeHeight - rangeHeader.clientHeight - rangeTitle.clientHeight - rangeHeaderLabel.clientHeight; + rangeGrid.style.height = gridHeight + 'px'; + // Take live ranges' bottom scrollbar into account. + yAxis.style.height = (gridHeight - rangeGrid.offsetHeight + rangeGrid.clientHeight) + 'px'; + } + this.resizerRanges.style('ranges', this.ranges.style.height); + } + updatePanes() { this.left.style.width = this.sepLeft + 'px'; this.resizerLeft.style('left', this.sepLeft + 'px'); this.right.style.width = (document.body.getBoundingClientRect().width - this.sepRight) + 'px'; this.resizerRight.style('right', (document.body.getBoundingClientRect().width - this.sepRight - 1) + 'px'); - + this.updateRangesPane(); this.panesUpdatedCallback(); } + updateRanges() { + if (this.isRangesSnapped()) { + this.sepRangesHeight = window.innerHeight; + } else { + const sepRangesHeight = window.sessionStorage.getItem("ranges-pane-height-percent"); + this.sepRangesHeight = window.innerHeight * Number.parseFloat(sepRangesHeight); + } + } + updateLeftWidth() { if (this.isLeftSnapped()) { this.sepLeft = 0; @@ -194,8 +314,9 @@ export class Resizer { } } - updateWidths() { + updateSizes() { this.updateLeftWidth(); this.updateRightWidth(); + this.updateRanges(); } } diff --git a/deps/v8/tools/turbolizer/src/sequence-view.ts b/deps/v8/tools/turbolizer/src/sequence-view.ts index 49b7e9f7b2af01..187b162b1cdb30 100644 --- a/deps/v8/tools/turbolizer/src/sequence-view.ts +++ b/deps/v8/tools/turbolizer/src/sequence-view.ts @@ -3,12 +3,21 @@ // found in the LICENSE file. import { Sequence } from "../src/source-resolver"; -import { isIterable } from "../src/util"; +import { createElement } from "../src/util"; import { TextView } from "../src/text-view"; +import { RangeView } from "../src/range-view"; export class SequenceView extends TextView { sequence: Sequence; searchInfo: Array; + phaseSelect: HTMLSelectElement; + numInstructions: number; + currentPhaseIndex: number; + phaseIndexes: Set; + isShown: boolean; + rangeView: RangeView; + showRangeView: boolean; + toggleRangeViewEl: HTMLElement; createViewElement() { const pane = document.createElement('div'); @@ -20,6 +29,12 @@ export class SequenceView extends TextView { constructor(parentId, broker) { super(parentId, broker); + this.numInstructions = 0; + this.phaseIndexes = new Set(); + this.isShown = false; + this.showRangeView = false; + this.rangeView = null; + this.toggleRangeViewEl = this.elementForToggleRangeView(); } attachSelection(s) { @@ -37,34 +52,58 @@ export class SequenceView extends TextView { return this.selection.detachSelection(); } + show() { + this.currentPhaseIndex = this.phaseSelect.selectedIndex; + if (!this.isShown) { + this.isShown = true; + this.phaseIndexes.add(this.currentPhaseIndex); + this.container.appendChild(this.divNode); + this.container.getElementsByClassName("graph-toolbox")[0].appendChild(this.toggleRangeViewEl); + } + if (this.showRangeView) this.rangeView.show(); + } + + hide() { + // A single SequenceView object is used for two phases (i.e before and after + // register allocation), tracking the indexes lets the redundant hides and + // shows be avoided when switching between the two. + this.currentPhaseIndex = this.phaseSelect.selectedIndex; + if (!this.phaseIndexes.has(this.currentPhaseIndex)) { + this.isShown = false; + this.container.removeChild(this.divNode); + this.container.getElementsByClassName("graph-toolbox")[0].removeChild(this.toggleRangeViewEl); + if (this.showRangeView) this.rangeView.hide(); + } + } + + onresize() { + if (this.showRangeView) this.rangeView.onresize(); + } + initializeContent(data, rememberedSelection) { this.divNode.innerHTML = ''; this.sequence = data.sequence; this.searchInfo = []; - this.divNode.addEventListener('click', (e: MouseEvent) => { + this.divNode.onclick = (e: MouseEvent) => { if (!(e.target instanceof HTMLElement)) return; const instructionId = Number.parseInt(e.target.dataset.instructionId, 10); if (!instructionId) return; if (!e.shiftKey) this.broker.broadcastClear(null); this.broker.broadcastInstructionSelect(null, [instructionId], true); - }); + }; + this.phaseSelect = (document.getElementById('phase-select') as HTMLSelectElement); + this.currentPhaseIndex = this.phaseSelect.selectedIndex; + this.addBlocks(this.sequence.blocks); + const lastBlock = this.sequence.blocks[this.sequence.blocks.length - 1]; + this.numInstructions = lastBlock.instructions[lastBlock.instructions.length - 1].id + 1; + this.addRangeView(); this.attachSelection(rememberedSelection); this.show(); } elementForBlock(block) { const view = this; - function createElement(tag: string, cls: string | Array, content?: string) { - const el = document.createElement(tag); - if (isIterable(cls)) { - for (const c of cls) el.classList.add(c); - } else { - el.classList.add(cls); - } - if (content != undefined) el.innerHTML = content; - return el; - } function mkLinkHandler(id, handler) { return function (e) { @@ -84,16 +123,33 @@ export class SequenceView extends TextView { return mkLinkHandler(text, view.selectionHandler); } + function elementForOperandWithSpan(span, text, searchInfo, isVirtual) { + const selectionText = isVirtual ? "virt_" + text : text; + span.onclick = mkOperandLinkHandler(selectionText); + searchInfo.push(text); + view.addHtmlElementForNodeId(selectionText, span); + const container = createElement("div", ""); + container.appendChild(span); + return container; + } + function elementForOperand(operand, searchInfo) { - const text = operand.text; - const operandEl = createElement("div", ["parameter", "tag", "clickable", operand.type], text); + let isVirtual = false; + let className = "parameter tag clickable " + operand.type; + if (operand.text[0] == 'v' && !(operand.tooltip && operand.tooltip.includes("Float"))) { + isVirtual = true; + className += " virtual-reg"; + } + const span = createElement("span", className, operand.text); if (operand.tooltip) { - operandEl.setAttribute("title", operand.tooltip); + span.setAttribute("title", operand.tooltip); } - operandEl.onclick = mkOperandLinkHandler(text); - searchInfo.push(text); - view.addHtmlElementForNodeId(text, operandEl); - return operandEl; + return elementForOperandWithSpan(span, operand.text, searchInfo, isVirtual); + } + + function elementForPhiOperand(text, searchInfo) { + const span = createElement("span", "parameter tag clickable virtual-reg", text); + return elementForOperandWithSpan(span, text, searchInfo, true); } function elementForInstruction(instruction, searchInfo) { @@ -115,7 +171,7 @@ export class SequenceView extends TextView { const gapEl = createElement("div", "gap", "gap"); let hasGaps = false; for (const gap of instruction.gaps) { - const moves = createElement("div", ["comma-sep-list", "gap-move"]); + const moves = createElement("div", "comma-sep-list gap-move"); for (const move of gap) { hasGaps = true; const moveEl = createElement("div", "move"); @@ -137,7 +193,7 @@ export class SequenceView extends TextView { instContentsEl.appendChild(instEl); if (instruction.outputs.length > 0) { - const outputs = createElement("div", ["comma-sep-list", "input-output-list"]); + const outputs = createElement("div", "comma-sep-list input-output-list"); for (const output of instruction.outputs) { const outputEl = elementForOperand(output, searchInfo); outputs.appendChild(outputEl); @@ -147,8 +203,8 @@ export class SequenceView extends TextView { instEl.appendChild(assignEl); } - let text = instruction.opcode + instruction.flags; - const instLabel = createElement("div", "node-label", text) + const text = instruction.opcode + instruction.flags; + const instLabel = createElement("div", "node-label", text); if (instruction.opcode == "ArchNop" && instruction.outputs.length == 1 && instruction.outputs[0].tooltip) { instLabel.innerText = instruction.outputs[0].tooltip; } @@ -158,7 +214,7 @@ export class SequenceView extends TextView { instEl.appendChild(instLabel); if (instruction.inputs.length > 0) { - const inputs = createElement("div", ["comma-sep-list", "input-output-list"]); + const inputs = createElement("div", "comma-sep-list input-output-list"); for (const input of instruction.inputs) { const inputEl = elementForOperand(input, searchInfo); inputs.appendChild(inputEl); @@ -167,7 +223,7 @@ export class SequenceView extends TextView { } if (instruction.temps.length > 0) { - const temps = createElement("div", ["comma-sep-list", "input-output-list", "temps"]); + const temps = createElement("div", "comma-sep-list input-output-list temps"); for (const temp of instruction.temps) { const tempEl = elementForOperand(temp, searchInfo); temps.appendChild(tempEl); @@ -181,12 +237,12 @@ export class SequenceView extends TextView { const sequenceBlock = createElement("div", "schedule-block"); sequenceBlock.classList.toggle("deferred", block.deferred); - const blockId = createElement("div", ["block-id", "com", "clickable"], block.id); + const blockId = createElement("div", "block-id com clickable", block.id); blockId.onclick = mkBlockLinkHandler(block.id); sequenceBlock.appendChild(blockId); - const blockPred = createElement("div", ["predecessor-list", "block-list", "comma-sep-list"]); + const blockPred = createElement("div", "predecessor-list block-list comma-sep-list"); for (const pred of block.predecessors) { - const predEl = createElement("div", ["block-id", "com", "clickable"], pred); + const predEl = createElement("div", "block-id com clickable", pred); predEl.onclick = mkBlockLinkHandler(pred); blockPred.appendChild(predEl); } @@ -211,7 +267,7 @@ export class SequenceView extends TextView { phiEl.appendChild(assignEl); for (const input of phi.operands) { - const inputEl = createElement("div", ["parameter", "tag", "clickable"], input); + const inputEl = elementForPhiOperand(input, this.searchInfo); phiEl.appendChild(inputEl); } } @@ -221,9 +277,9 @@ export class SequenceView extends TextView { instructions.appendChild(elementForInstruction(instruction, this.searchInfo)); } sequenceBlock.appendChild(instructions); - const blockSucc = createElement("div", ["successor-list", "block-list", "comma-sep-list"]); + const blockSucc = createElement("div", "successor-list block-list comma-sep-list"); for (const succ of block.successors) { - const succEl = createElement("div", ["block-id", "com", "clickable"], succ); + const succEl = createElement("div", "block-id com clickable", succ); succEl.onclick = mkBlockLinkHandler(succ); blockSucc.appendChild(succEl); } @@ -239,6 +295,63 @@ export class SequenceView extends TextView { } } + addRangeView() { + const preventRangeView = reason => { + const toggleRangesInput = this.toggleRangeViewEl.firstChild as HTMLInputElement; + if (this.rangeView) { + toggleRangesInput.checked = false; + this.toggleRangeView(toggleRangesInput); + } + toggleRangesInput.disabled = true; + this.toggleRangeViewEl.style.textDecoration = "line-through"; + this.toggleRangeViewEl.setAttribute("title", reason); + }; + + if (this.sequence.register_allocation) { + if (!this.rangeView) { + this.rangeView = new RangeView(this); + } + const source = this.sequence.register_allocation; + if (source.fixedLiveRanges.size == 0 && source.liveRanges.size == 0) { + preventRangeView("No live ranges to show"); + } else if (this.numInstructions >= 249) { + // This is due to the css grid-column being limited to 1000 columns. + // Performance issues would otherwise impose some limit. + // TODO(george.wort@arm.com): Allow the user to specify an instruction range + // to display that spans less than 249 instructions. + preventRangeView( + "Live range display is only supported for sequences with less than 249 instructions"); + } + if (this.showRangeView) { + this.rangeView.initializeContent(this.sequence.blocks); + } + } else { + preventRangeView("No live range data provided"); + } + } + + elementForToggleRangeView() { + const toggleRangeViewEl = createElement("label", "", "show live ranges"); + const toggleRangesInput = createElement("input", "range-toggle-show") as HTMLInputElement; + toggleRangesInput.setAttribute("type", "checkbox"); + toggleRangesInput.oninput = () => this.toggleRangeView(toggleRangesInput); + toggleRangeViewEl.insertBefore(toggleRangesInput, toggleRangeViewEl.firstChild); + return toggleRangeViewEl; + } + + toggleRangeView(toggleRangesInput: HTMLInputElement) { + toggleRangesInput.disabled = true; + this.showRangeView = toggleRangesInput.checked; + if (this.showRangeView) { + this.rangeView.initializeContent(this.sequence.blocks); + this.rangeView.show(); + } else { + this.rangeView.hide(); + } + window.dispatchEvent(new Event('resize')); + toggleRangesInput.disabled = false; + } + searchInputAction(searchBar, e) { e.stopPropagation(); this.selectionHandler.clear(); diff --git a/deps/v8/tools/turbolizer/src/source-resolver.ts b/deps/v8/tools/turbolizer/src/source-resolver.ts index 588eea5b9955cc..085b44f3a7506b 100644 --- a/deps/v8/tools/turbolizer/src/source-resolver.ts +++ b/deps/v8/tools/turbolizer/src/source-resolver.ts @@ -83,7 +83,7 @@ interface InstructionsPhase { instructionOffsetToPCOffset?: any; blockIdtoInstructionRange?: any; nodeIdToInstructionRange?: any; - codeOffsetsInfo?: CodeOffsetsInfo + codeOffsetsInfo?: CodeOffsetsInfo; } interface GraphPhase { @@ -100,8 +100,44 @@ export interface Schedule { nodes: Array; } +export class Interval { + start: number; + end: number; + + constructor(numbers: [number, number]) { + this.start = numbers[0]; + this.end = numbers[1]; + } +} + +export interface ChildRange { + id: string; + type: string; + op: any; + intervals: Array<[number, number]>; + uses: Array; +} + +export interface Range { + child_ranges: Array; + is_deferred: boolean; +} + +export class RegisterAllocation { + fixedDoubleLiveRanges: Map; + fixedLiveRanges: Map; + liveRanges: Map; + + constructor(registerAllocation) { + this.fixedDoubleLiveRanges = new Map(Object.entries(registerAllocation.fixed_double_live_ranges)); + this.fixedLiveRanges = new Map(Object.entries(registerAllocation.fixed_live_ranges)); + this.liveRanges = new Map(Object.entries(registerAllocation.live_ranges)); + } +} + export interface Sequence { blocks: Array; + register_allocation: RegisterAllocation; } class CodeOffsetsInfo { @@ -720,8 +756,11 @@ export class SourceResolver { phase.schedule = state; return phase; } + parseSequence(phase) { - phase.sequence = { blocks: phase.blocks }; + phase.sequence = { blocks: phase.blocks, + register_allocation: phase.register_allocation ? new RegisterAllocation(phase.register_allocation) + : undefined }; return phase; } } diff --git a/deps/v8/tools/turbolizer/src/turbo-visualizer.ts b/deps/v8/tools/turbolizer/src/turbo-visualizer.ts index 22753cdda50d4b..2dd01c28f74d4a 100644 --- a/deps/v8/tools/turbolizer/src/turbo-visualizer.ts +++ b/deps/v8/tools/turbolizer/src/turbo-visualizer.ts @@ -18,7 +18,7 @@ window.onload = function () { let sourceViews: Array = []; let selectionBroker: SelectionBroker = null; let sourceResolver: SourceResolver = null; - const resizer = new Resizer(panesUpdatedCallback, 75); + const resizer = new Resizer(panesUpdatedCallback, 75, 75); const sourceTabsContainer = document.getElementById(C.SOURCE_PANE_ID); const sourceTabs = new Tabs(sourceTabsContainer); sourceTabs.addTab("+").classList.add("last-tab", "persistent-tab"); @@ -48,6 +48,9 @@ window.onload = function () { sourceViews.forEach(sv => sv.hide()); if (multiview) multiview.hide(); multiview = null; + document.getElementById("ranges").innerHTML = ''; + document.getElementById('ranges').style.visibility = "hidden"; + document.getElementById('show-hide-ranges').style.visibility = "hidden"; if (disassemblyView) disassemblyView.hide(); sourceViews = []; sourceResolver = new SourceResolver(); diff --git a/deps/v8/tools/turbolizer/src/util.ts b/deps/v8/tools/turbolizer/src/util.ts index d9c8dcdce05a80..8d2fc845115ac7 100644 --- a/deps/v8/tools/turbolizer/src/util.ts +++ b/deps/v8/tools/turbolizer/src/util.ts @@ -91,3 +91,10 @@ export function measureText(text: string) { export function interpolate(val: number, max: number, start: number, end: number) { return start + (end - start) * (val / max); } + +export function createElement(tag: string, cls: string, content?: string) { + const el = document.createElement(tag); + el.className = cls; + if (content != undefined) el.innerText = content; + return el; +} diff --git a/deps/v8/tools/turbolizer/turbo-visualizer-ranges.css b/deps/v8/tools/turbolizer/turbo-visualizer-ranges.css new file mode 100644 index 00000000000000..03976e2ec5408d --- /dev/null +++ b/deps/v8/tools/turbolizer/turbo-visualizer-ranges.css @@ -0,0 +1,238 @@ +/* CSS specific to the live ranges div associated with + the RangeView typescript class in src/range-view.ts. */ + +:root { + --range-y-axis-width: 18ch; + --range-position-width: 3.5ch; + --range-block-border: 6px; + --range-instr-border: 3px; + --range-position-border: 1px; +} + +.range-bold { + font-weight: bold; + color: black; +} + +#ranges { + font-family: monospace; + min-height: auto; + overflow: hidden; +} + +#resizer-ranges { + height: 10px; +} + +.range-title-div { + padding: 2ch 2ch 2ch 2ch; + white-space: nowrap; + overflow: auto; +} + +.range-title { + text-decoration: underline; + font-weight: bold; + font-size: large; + display: inline-block; +} + +.range-title-help { + margin-left: 2ch; + width: 1ch; + padding: 0 0.25ch; + border: 1px dotted black; + color: slategray; + display: inline-block; +} + +input.range-toggle-show { + vertical-align: middle; +} + +.range-header-label-x { + text-align: center; + margin-left: 13ch; +} + +.range-header-label-y { + width: 11ch; + float: left; + white-space: pre-wrap; + word-wrap: break-word; + margin-left: 6ch; + margin-top: 4ch; +} + +.range-y-axis { + display: inline-block; + width: var(--range-y-axis-width); + overflow: hidden; + white-space: nowrap; + vertical-align: top; +} + +.range-header { + display: flex; + overflow: hidden; + height: 8ch; + margin-left: var(--range-y-axis-width); +} + +.range-position-labels, +.range-register-labels { + background-color: lightgray; +} + +.range-register-labels { + float: right; +} + +.range-position-labels { + margin-top: auto; +} + +.range-registers { + float: right; + overflow: hidden; + text-align: right; +} + +.range-positions-header, +.range-instruction-ids, +.range-block-ids { + overflow: hidden; + white-space: nowrap; + display: grid; + grid-gap: 0; +} + +.range-reg { + width: 13ch; + text-align: right; +} + +.range-reg::after { + content: ":"; +} + +.range-grid { + overflow: auto; + display: inline-block; + white-space: nowrap; +} + +.range-block-id { + display: inline-block; + text-align: center; +} + +.range-instruction-id { + display: inline-block; + text-align: center; +} + +.range-position { + display: inline-block; + text-align: center; + z-index: 1; +} + +.range-transparent, +.range-position.range-empty { + color: transparent; +} + +.range-block-id:hover, +.range-instruction-id:hover, +.range-reg:hover, +.range-position:hover { + background-color: rgba(0, 0, 255, 0.10); +} + +.range-position.range-header-element { + border-bottom: 2px solid rgb(109, 107, 107); +} + +.range-block-id, +.range-instruction-id, +.range-reg, +.range-interval, +.range-position { + position: relative; + border: var(--range-position-border) solid rgb(109, 107, 107); +} + +.range-block-id, +.range-instruction-id, +.range-interval, +.range-position { + border-left: 0; +} + +.range-block-ids > .range-block-id:first-child, +.range-instruction-ids > .range-instruction-id:first-child, +.range-positions > .range-position:first-child { + border-left: var(--range-position-border) solid rgb(109, 107, 107); +} + +.range-position.range-interval-position { + border: none; +} + +.range-interval-text { + position: absolute; + padding-left: 0.5ch; + z-index: 2; + pointer-events: none +} + +.range-position.range-use { + border-left: var(--range-instr-border) solid red; +} + +.range-block-border, +.range-block-border.range-position.range-interval-position:last-child { + border-right: var(--range-block-border) solid rgb(109, 107, 107); +} + +.range-block-border.range-position.range-interval-position { + border-right: var(--range-block-border) solid transparent; +} + +.range-instr-border, +.range-instr-border.range-position.range-interval-position:last-child { + border-right: var(--range-instr-border) solid rgb(109, 107, 107); +} + +.range-instr-border.range-position.range-interval-position { + border-right: var(--range-instr-border) solid transparent; +} + +.range, +.range-interval, +.range-interval-wrapper, +.range-positions { + white-space: nowrap; + display: inline-block; +} + +.range-interval-wrapper, +.range-positions { + display: grid; + grid-gap: 0; +} + +.range-interval { + background-color: rgb(153, 158, 168); +} + +.range-hidden { + display: none !important; +} + +.range-positions-placeholder { + width: 100%; + border: var(--range-position-border) solid transparent; + color: transparent; +} \ No newline at end of file diff --git a/deps/v8/tools/turbolizer/turbo-visualizer.css b/deps/v8/tools/turbolizer/turbo-visualizer.css index 6fb6da3b79432c..c7da769eb5da77 100644 --- a/deps/v8/tools/turbolizer/turbo-visualizer.css +++ b/deps/v8/tools/turbolizer/turbo-visualizer.css @@ -342,6 +342,13 @@ input:hover, background-color: #F8F8F8; user-select: none; flex: 1; + z-index: 7; +} + +#middle.display-inline-flex, +#middle.display-inline-flex #multiview, +#middle.display-inline-flex #ranges { + display: inline-flex; } .viewpane { @@ -351,11 +358,6 @@ input:hover, flex-direction: column; } -.multiview { - width: 100%; -} - - #show-hide-disassembly { right: 0; } @@ -423,6 +425,10 @@ text { dominant-baseline: text-before-edge; } +.tab-content { + z-index: 6; +} + .resizer { z-index: 10; width: 10px; @@ -595,6 +601,10 @@ text { padding-right: .5ex; } +.instruction span { + padding-right: 0; +} + .phi-label, .instruction-id { display: inline-block; @@ -626,6 +636,10 @@ text { display: inline-block; } +.phi span { + padding-right: 0; +} + .gap .gap-move { padding-left: .5ex; padding-right: .5ex; @@ -639,6 +653,10 @@ text { content: ")"; } +.virtual-reg { + outline: 1px dotted blue; +} + .parameter.constant { outline: 1px dotted red; } diff --git a/deps/v8/tools/turbolizer/up-arrow.png b/deps/v8/tools/turbolizer/up-arrow.png new file mode 100644 index 00000000000000..68cb14e80b3371 Binary files /dev/null and b/deps/v8/tools/turbolizer/up-arrow.png differ diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py index 6cd63ac2b66798..28f71b2b339115 100755 --- a/deps/v8/tools/unittests/run_perf_test.py +++ b/deps/v8/tools/unittests/run_perf_test.py @@ -90,6 +90,21 @@ 'units': 'ms', } + +class UnitTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + sys.path.insert(0, BASE_DIR) + import run_perf + global run_perf + + def testBuildDirectory(self): + base_path = os.path.join(TEST_DATA, 'builddirs', 'dir1', 'out') + expected_path = os.path.join(base_path, 'build') + self.assertEquals( + expected_path, run_perf.find_build_directory(base_path, 'x64')) + + class PerfTest(unittest.TestCase): @classmethod def setUpClass(cls): @@ -125,6 +140,7 @@ def _WriteTestInput(self, json_content): f.write(json.dumps(json_content)) def _MockCommand(self, *args, **kwargs): + on_bots = kwargs.pop('on_bots', False) # Fake output for each test run. test_outputs = [Output(stdout=arg, timed_out=kwargs.get('timed_out', False), @@ -142,6 +158,16 @@ def execute(*args, **kwargs): run_perf.command, 'PosixCommand', mock.MagicMock(side_effect=create_cmd)).start() + build_dir = 'Release' if on_bots else 'x64.release' + out_dirs = ['out', 'out-secondary'] + return_values = [ + os.path.join(os.path.dirname(BASE_DIR), out, build_dir) + for out in out_dirs + ] + mock.patch.object( + run_perf, 'find_build_directory', + mock.MagicMock(side_effect=return_values)).start() + # Check that d8 is called from the correct cwd for each test run. dirs = [os.path.join(TEST_WORKSPACE, arg) for arg in args[0]] def chdir(*args, **kwargs): @@ -394,11 +420,12 @@ def testTwoRunsStdDevRegExp(self): def testBuildbot(self): self._WriteTestInput(V8_JSON) - self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n']) + self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'], + on_bots=True) mock.patch.object( run_perf.Platform, 'ReadBuildConfig', mock.MagicMock(return_value={'is_android': False})).start() - self.assertEqual(0, self._CallMain('--buildbot')) + self.assertEqual(0, self._CallMain()) self._VerifyResults('test', 'score', [ {'name': 'Richards', 'results': [1.234], 'stddev': ''}, {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, @@ -410,11 +437,12 @@ def testBuildbotWithTotal(self): test_input = dict(V8_JSON) test_input['total'] = True self._WriteTestInput(test_input) - self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n']) + self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'], + on_bots=True) mock.patch.object( run_perf.Platform, 'ReadBuildConfig', mock.MagicMock(return_value={'is_android': False})).start() - self.assertEqual(0, self._CallMain('--buildbot')) + self.assertEqual(0, self._CallMain()) self._VerifyResults('test', 'score', [ {'name': 'Richards', 'results': [1.234], 'stddev': ''}, {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, @@ -427,11 +455,12 @@ def testBuildbotWithTotalAndErrors(self): test_input = dict(V8_JSON) test_input['total'] = True self._WriteTestInput(test_input) - self._MockCommand(['.'], ['x\nRichards: bla\nDeltaBlue: 10657567\ny\n']) + self._MockCommand(['.'], ['x\nRichards: bla\nDeltaBlue: 10657567\ny\n'], + on_bots=True) mock.patch.object( run_perf.Platform, 'ReadBuildConfig', mock.MagicMock(return_value={'is_android': False})).start() - self.assertEqual(1, self._CallMain('--buildbot')) + self.assertEqual(1, self._CallMain()) self._VerifyResults('test', 'score', [ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, ]) @@ -484,6 +513,7 @@ def testAndroid(self): mock.patch('run_perf.AndroidPlatform.PreExecution').start() mock.patch('run_perf.AndroidPlatform.PostExecution').start() mock.patch('run_perf.AndroidPlatform.PreTests').start() + mock.patch('run_perf.find_build_directory').start() mock.patch( 'run_perf.AndroidPlatform.Run', return_value=(Output(stdout='Richards: 1.234\nDeltaBlue: 10657567\n'), diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py index 3fc91b8e90f023..8b3275172d55a4 100755 --- a/deps/v8/tools/unittests/run_tests_test.py +++ b/deps/v8/tools/unittests/run_tests_test.py @@ -67,7 +67,7 @@ def temp_base(baseroot='testroot1'): """ basedir = os.path.join(TEST_DATA_ROOT, baseroot) with temp_dir() as tempbase: - builddir = os.path.join(tempbase, 'out', 'Release') + builddir = os.path.join(tempbase, 'out', 'build') testroot = os.path.join(tempbase, 'test') os.makedirs(builddir) shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir) @@ -112,7 +112,7 @@ def run_tests(basedir, *args, **kwargs): def override_build_config(basedir, **kwargs): """Override the build config with new values provided as kwargs.""" - path = os.path.join(basedir, 'out', 'Release', 'v8_build_config.json') + path = os.path.join(basedir, 'out', 'build', 'v8_build_config.json') with open(path) as f: config = json.load(f) config.update(kwargs) @@ -171,7 +171,6 @@ def testPass(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', '--time', @@ -189,7 +188,6 @@ def testShardedProc(self): for shard in [1, 2]: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', '--shard-count=2', @@ -220,7 +218,6 @@ def testSharded(self): for shard in [1, 2]: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', '--shard-count=2', @@ -239,7 +236,6 @@ def testFail(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', 'sweet/strawberries', @@ -252,7 +248,7 @@ def check_cleaned_json_output( self, expected_results_name, actual_json, basedir): # Check relevant properties of the json output. with open(actual_json) as f: - json_output = json.load(f)[0] + json_output = json.load(f) # Replace duration in actual output as it's non-deterministic. Also # replace the python executable prefix as it has a different absolute @@ -285,7 +281,6 @@ def testFailWithRerunAndJSON(self): json_path = os.path.join(basedir, 'out.json') result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--rerun-failures-count=2', @@ -314,7 +309,6 @@ def testFlakeWithRerunAndJSON(self): json_path = os.path.join(basedir, 'out.json') result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--rerun-failures-count=2', @@ -346,7 +340,6 @@ def testAutoDetect(self): v8_enable_pointer_compression=False) result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', 'sweet/bananas', @@ -371,7 +364,6 @@ def testSkips(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=nooptimization', 'sweet/strawberries', @@ -385,7 +377,6 @@ def testRunSkips(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=nooptimization', '--run-skipped', @@ -402,7 +393,6 @@ def testDefault(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', infra_staging=False, ) self.assertIn('0 tests ran', result.stdout, result) @@ -410,24 +400,15 @@ def testDefault(self): def testNoBuildConfig(self): """Test failing run when build config is not found.""" - with temp_base() as basedir: + with temp_dir() as basedir: result = run_tests(basedir) self.assertIn('Failed to load build config', result.stdout, result) self.assertEqual(5, result.returncode, result) - def testInconsistentMode(self): - """Test failing run when attempting to wrongly override the mode.""" - with temp_base() as basedir: - override_build_config(basedir, is_debug=True) - result = run_tests(basedir, '--mode=Release') - self.assertIn('execution mode (release) for release is inconsistent ' - 'with build config (debug)', result.stdout, result) - self.assertEqual(5, result.returncode, result) - def testInconsistentArch(self): """Test failing run when attempting to wrongly override the arch.""" with temp_base() as basedir: - result = run_tests(basedir, '--mode=Release', '--arch=ia32') + result = run_tests(basedir, '--arch=ia32') self.assertIn( '--arch value (ia32) inconsistent with build config (x64).', result.stdout, result) @@ -436,13 +417,13 @@ def testInconsistentArch(self): def testWrongVariant(self): """Test using a bogus variant.""" with temp_base() as basedir: - result = run_tests(basedir, '--mode=Release', '--variants=meh') + result = run_tests(basedir, '--variants=meh') self.assertEqual(5, result.returncode, result) def testModeFromBuildConfig(self): """Test auto-detection of mode from build config.""" with temp_base() as basedir: - result = run_tests(basedir, '--outdir=out/Release', 'sweet/bananas') + result = run_tests(basedir, '--outdir=out/build', 'sweet/bananas') self.assertIn('Running tests for x64.release', result.stdout, result) self.assertEqual(0, result.returncode, result) @@ -455,7 +436,6 @@ def testReport(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--variants=default', 'sweet', '--report', @@ -471,7 +451,6 @@ def testWarnUnusedRules(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--variants=default,nooptimization', 'sweet', '--warn-unused', @@ -486,7 +465,6 @@ def testCatNoSources(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--variants=default', 'sweet/bananas', '--cat', @@ -505,7 +483,6 @@ def testPredictable(self): override_build_config(basedir, v8_enable_verify_predictable=True) result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', 'sweet/bananas', @@ -524,7 +501,6 @@ def testSlowArch(self): override_build_config(basedir, v8_target_cpu='arm64') result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', 'sweet/bananas', @@ -538,7 +514,6 @@ def testRandomSeedStressWithDefault(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--random-seed-stress-count=2', @@ -553,7 +528,6 @@ def testRandomSeedStressWithSeed(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--random-seed-stress-count=2', @@ -577,7 +551,6 @@ def testSpecificVariants(self): override_build_config(basedir, is_asan=True) result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', 'sweet/bananas', @@ -599,7 +572,6 @@ def testDotsProgress(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=dots', 'sweet/cherries', 'sweet/bananas', @@ -620,7 +592,6 @@ def _testCompactProgress(self, name): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=%s' % name, 'sweet/cherries', 'sweet/bananas', @@ -641,7 +612,6 @@ def testExitAfterNFailures(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--exit-after-n-failures=2', '-j1', @@ -660,7 +630,7 @@ def testExitAfterNFailures(self): self.assertEqual(1, result.returncode, result) def testNumFuzzer(self): - sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/Release'] + sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/build'] with temp_base() as basedir: with capture() as (stdout, stderr): @@ -674,7 +644,6 @@ def testRunnerFlags(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--random-seed=42', diff --git a/deps/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8 b/deps/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8 new file mode 100644 index 00000000000000..9daeafb9864cf4 --- /dev/null +++ b/deps/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8 @@ -0,0 +1 @@ +test diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json index d1fdb49525d8d8..08ac623cd734b2 100644 --- a/deps/v8/tools/unittests/testdata/expected_test_results1.json +++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json @@ -1,10 +1,8 @@ { - "arch": "x64", "duration_mean": 1, - "mode": "release", "results": [ { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -29,7 +27,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -54,7 +52,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -81,7 +79,7 @@ ], "slowest_tests": [ { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -105,7 +103,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -129,7 +127,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ diff --git a/deps/v8/tools/unittests/testdata/expected_test_results2.json b/deps/v8/tools/unittests/testdata/expected_test_results2.json index ac9ab9cc595845..dc353f687553e5 100644 --- a/deps/v8/tools/unittests/testdata/expected_test_results2.json +++ b/deps/v8/tools/unittests/testdata/expected_test_results2.json @@ -1,10 +1,8 @@ { - "arch": "x64", "duration_mean": 1, - "mode": "release", "results": [ { - "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -28,7 +26,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 0, "expected": [ @@ -54,7 +52,7 @@ ], "slowest_tests": [ { - "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 0, "expected": [ @@ -77,7 +75,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ diff --git a/deps/v8/tools/v8_presubmit.py b/deps/v8/tools/v8_presubmit.py index 40677b3a0a388f..6fbc3ad2ede85f 100755 --- a/deps/v8/tools/v8_presubmit.py +++ b/deps/v8/tools/v8_presubmit.py @@ -29,6 +29,7 @@ # for py2/py3 compatibility +from __future__ import absolute_import from __future__ import print_function try: @@ -59,12 +60,16 @@ # We now run our own header guard check in PRESUBMIT.py. # build/include_what_you_use: Started giving false positives for variables # named "string" and "map" assuming that you needed to include STL headers. +# runtime/references: As of May 2020 the C++ style guide suggests using +# references for out parameters, see +# https://google.github.io/styleguide/cppguide.html#Inputs_and_Outputs. LINT_RULES = """ -build/header_guard -build/include_what_you_use -readability/fn_size -readability/multiline_comment +-runtime/references -whitespace/comments """.split() diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py index ac69cfb836bfb3..31cc9d2a9a23bb 100644 --- a/deps/v8/tools/v8heapconst.py +++ b/deps/v8/tools/v8heapconst.py @@ -32,47 +32,47 @@ 68: "ABSTRACT_INTERNAL_CLASS_SUBCLASS1_TYPE", 69: "ABSTRACT_INTERNAL_CLASS_SUBCLASS2_TYPE", 70: "FOREIGN_TYPE", - 71: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE", - 72: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE", - 73: "CALLABLE_TASK_TYPE", - 74: "CALLBACK_TASK_TYPE", - 75: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE", - 76: "LOAD_HANDLER_TYPE", - 77: "STORE_HANDLER_TYPE", - 78: "FUNCTION_TEMPLATE_INFO_TYPE", - 79: "OBJECT_TEMPLATE_INFO_TYPE", - 80: "ACCESS_CHECK_INFO_TYPE", - 81: "ACCESSOR_INFO_TYPE", - 82: "ACCESSOR_PAIR_TYPE", - 83: "ALIASED_ARGUMENTS_ENTRY_TYPE", - 84: "ALLOCATION_MEMENTO_TYPE", - 85: "ALLOCATION_SITE_TYPE", - 86: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE", - 87: "ASM_WASM_DATA_TYPE", - 88: "ASYNC_GENERATOR_REQUEST_TYPE", - 89: "BREAK_POINT_TYPE", - 90: "BREAK_POINT_INFO_TYPE", - 91: "CACHED_TEMPLATE_OBJECT_TYPE", - 92: "CALL_HANDLER_INFO_TYPE", - 93: "CLASS_POSITIONS_TYPE", - 94: "DEBUG_INFO_TYPE", - 95: "ENUM_CACHE_TYPE", - 96: "FEEDBACK_CELL_TYPE", - 97: "FUNCTION_TEMPLATE_RARE_DATA_TYPE", - 98: "INTERCEPTOR_INFO_TYPE", - 99: "INTERPRETER_DATA_TYPE", - 100: "PROMISE_CAPABILITY_TYPE", - 101: "PROMISE_REACTION_TYPE", - 102: "PROPERTY_DESCRIPTOR_OBJECT_TYPE", - 103: "PROTOTYPE_INFO_TYPE", - 104: "SCRIPT_TYPE", - 105: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE", - 106: "STACK_FRAME_INFO_TYPE", - 107: "STACK_TRACE_FRAME_TYPE", - 108: "TEMPLATE_OBJECT_DESCRIPTION_TYPE", - 109: "TUPLE2_TYPE", - 110: "WASM_CAPI_FUNCTION_DATA_TYPE", - 111: "WASM_DEBUG_INFO_TYPE", + 71: "WASM_TYPE_INFO_TYPE", + 72: "PROMISE_FULFILL_REACTION_JOB_TASK_TYPE", + 73: "PROMISE_REJECT_REACTION_JOB_TASK_TYPE", + 74: "CALLABLE_TASK_TYPE", + 75: "CALLBACK_TASK_TYPE", + 76: "PROMISE_RESOLVE_THENABLE_JOB_TASK_TYPE", + 77: "LOAD_HANDLER_TYPE", + 78: "STORE_HANDLER_TYPE", + 79: "FUNCTION_TEMPLATE_INFO_TYPE", + 80: "OBJECT_TEMPLATE_INFO_TYPE", + 81: "ACCESS_CHECK_INFO_TYPE", + 82: "ACCESSOR_INFO_TYPE", + 83: "ACCESSOR_PAIR_TYPE", + 84: "ALIASED_ARGUMENTS_ENTRY_TYPE", + 85: "ALLOCATION_MEMENTO_TYPE", + 86: "ALLOCATION_SITE_TYPE", + 87: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE", + 88: "ASM_WASM_DATA_TYPE", + 89: "ASYNC_GENERATOR_REQUEST_TYPE", + 90: "BREAK_POINT_TYPE", + 91: "BREAK_POINT_INFO_TYPE", + 92: "CACHED_TEMPLATE_OBJECT_TYPE", + 93: "CALL_HANDLER_INFO_TYPE", + 94: "CLASS_POSITIONS_TYPE", + 95: "DEBUG_INFO_TYPE", + 96: "ENUM_CACHE_TYPE", + 97: "FEEDBACK_CELL_TYPE", + 98: "FUNCTION_TEMPLATE_RARE_DATA_TYPE", + 99: "INTERCEPTOR_INFO_TYPE", + 100: "INTERPRETER_DATA_TYPE", + 101: "PROMISE_CAPABILITY_TYPE", + 102: "PROMISE_REACTION_TYPE", + 103: "PROPERTY_DESCRIPTOR_OBJECT_TYPE", + 104: "PROTOTYPE_INFO_TYPE", + 105: "SCRIPT_TYPE", + 106: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE", + 107: "STACK_FRAME_INFO_TYPE", + 108: "STACK_TRACE_FRAME_TYPE", + 109: "TEMPLATE_OBJECT_DESCRIPTION_TYPE", + 110: "TUPLE2_TYPE", + 111: "WASM_CAPI_FUNCTION_DATA_TYPE", 112: "WASM_EXCEPTION_TAG_TYPE", 113: "WASM_EXPORTED_FUNCTION_DATA_TYPE", 114: "WASM_INDIRECT_FUNCTION_TABLE_TYPE", @@ -88,15 +88,15 @@ 124: "ORDERED_HASH_SET_TYPE", 125: "ORDERED_NAME_DICTIONARY_TYPE", 126: "SIMPLE_NUMBER_DICTIONARY_TYPE", - 127: "STRING_TABLE_TYPE", - 128: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE", - 129: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE", - 130: "SCOPE_INFO_TYPE", - 131: "SCRIPT_CONTEXT_TABLE_TYPE", - 132: "BYTE_ARRAY_TYPE", - 133: "BYTECODE_ARRAY_TYPE", - 134: "FIXED_DOUBLE_ARRAY_TYPE", - 135: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE", + 127: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE", + 128: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE", + 129: "SCOPE_INFO_TYPE", + 130: "SCRIPT_CONTEXT_TABLE_TYPE", + 131: "BYTE_ARRAY_TYPE", + 132: "BYTECODE_ARRAY_TYPE", + 133: "FIXED_DOUBLE_ARRAY_TYPE", + 134: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE", + 135: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE", 136: "AWAIT_CONTEXT_TYPE", 137: "BLOCK_CONTEXT_TYPE", 138: "CATCH_CONTEXT_TYPE", @@ -107,46 +107,48 @@ 143: "NATIVE_CONTEXT_TYPE", 144: "SCRIPT_CONTEXT_TYPE", 145: "WITH_CONTEXT_TYPE", - 146: "SMALL_ORDERED_HASH_MAP_TYPE", - 147: "SMALL_ORDERED_HASH_SET_TYPE", - 148: "SMALL_ORDERED_NAME_DICTIONARY_TYPE", - 149: "EXPORTED_SUB_CLASS_BASE_TYPE", - 150: "EXPORTED_SUB_CLASS_TYPE", - 151: "SOURCE_TEXT_MODULE_TYPE", - 152: "SYNTHETIC_MODULE_TYPE", - 153: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE", - 154: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE", - 155: "WEAK_FIXED_ARRAY_TYPE", - 156: "TRANSITION_ARRAY_TYPE", - 157: "CELL_TYPE", - 158: "CODE_TYPE", - 159: "CODE_DATA_CONTAINER_TYPE", - 160: "COVERAGE_INFO_TYPE", - 161: "DESCRIPTOR_ARRAY_TYPE", - 162: "EMBEDDER_DATA_ARRAY_TYPE", - 163: "FEEDBACK_METADATA_TYPE", - 164: "FEEDBACK_VECTOR_TYPE", - 165: "FILLER_TYPE", - 166: "FREE_SPACE_TYPE", - 167: "INTERNAL_CLASS_TYPE", - 168: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE", - 169: "MAP_TYPE", - 170: "PREPARSE_DATA_TYPE", - 171: "PROPERTY_ARRAY_TYPE", - 172: "PROPERTY_CELL_TYPE", - 173: "SHARED_FUNCTION_INFO_TYPE", - 174: "SMI_BOX_TYPE", - 175: "SMI_PAIR_TYPE", - 176: "SORT_STATE_TYPE", - 177: "WASM_ARRAY_TYPE", - 178: "WASM_STRUCT_TYPE", - 179: "WEAK_ARRAY_LIST_TYPE", - 180: "WEAK_CELL_TYPE", - 181: "JS_PROXY_TYPE", + 146: "EXPORTED_SUB_CLASS_BASE_TYPE", + 147: "EXPORTED_SUB_CLASS_TYPE", + 148: "EXPORTED_SUB_CLASS2_TYPE", + 149: "SMALL_ORDERED_HASH_MAP_TYPE", + 150: "SMALL_ORDERED_HASH_SET_TYPE", + 151: "SMALL_ORDERED_NAME_DICTIONARY_TYPE", + 152: "SOURCE_TEXT_MODULE_TYPE", + 153: "SYNTHETIC_MODULE_TYPE", + 154: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE", + 155: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE", + 156: "WEAK_FIXED_ARRAY_TYPE", + 157: "TRANSITION_ARRAY_TYPE", + 158: "CELL_TYPE", + 159: "CODE_TYPE", + 160: "CODE_DATA_CONTAINER_TYPE", + 161: "COVERAGE_INFO_TYPE", + 162: "DESCRIPTOR_ARRAY_TYPE", + 163: "EMBEDDER_DATA_ARRAY_TYPE", + 164: "FEEDBACK_METADATA_TYPE", + 165: "FEEDBACK_VECTOR_TYPE", + 166: "FILLER_TYPE", + 167: "FREE_SPACE_TYPE", + 168: "INTERNAL_CLASS_TYPE", + 169: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE", + 170: "MAP_TYPE", + 171: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE", + 172: "PREPARSE_DATA_TYPE", + 173: "PROPERTY_ARRAY_TYPE", + 174: "PROPERTY_CELL_TYPE", + 175: "SHARED_FUNCTION_INFO_TYPE", + 176: "SMI_BOX_TYPE", + 177: "SMI_PAIR_TYPE", + 178: "SORT_STATE_TYPE", + 179: "WASM_ARRAY_TYPE", + 180: "WASM_STRUCT_TYPE", + 181: "WEAK_ARRAY_LIST_TYPE", + 182: "WEAK_CELL_TYPE", + 183: "JS_PROXY_TYPE", 1057: "JS_OBJECT_TYPE", - 182: "JS_GLOBAL_OBJECT_TYPE", - 183: "JS_GLOBAL_PROXY_TYPE", - 184: "JS_MODULE_NAMESPACE_TYPE", + 184: "JS_GLOBAL_OBJECT_TYPE", + 185: "JS_GLOBAL_PROXY_TYPE", + 186: "JS_MODULE_NAMESPACE_TYPE", 1040: "JS_SPECIAL_API_OBJECT_TYPE", 1041: "JS_PRIMITIVE_WRAPPER_TYPE", 1042: "JS_MAP_KEY_ITERATOR_TYPE", @@ -164,30 +166,30 @@ 1054: "JS_WEAK_MAP_TYPE", 1055: "JS_WEAK_SET_TYPE", 1056: "JS_API_OBJECT_TYPE", - 1058: "JS_AGGREGATE_ERROR_TYPE", - 1059: "JS_ARGUMENTS_OBJECT_TYPE", - 1060: "JS_ARRAY_TYPE", - 1061: "JS_ARRAY_BUFFER_TYPE", - 1062: "JS_ARRAY_ITERATOR_TYPE", - 1063: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE", - 1064: "JS_COLLATOR_TYPE", - 1065: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", - 1066: "JS_DATE_TYPE", - 1067: "JS_DATE_TIME_FORMAT_TYPE", - 1068: "JS_DISPLAY_NAMES_TYPE", - 1069: "JS_ERROR_TYPE", - 1070: "JS_FINALIZATION_REGISTRY_TYPE", - 1071: "JS_LIST_FORMAT_TYPE", - 1072: "JS_LOCALE_TYPE", - 1073: "JS_MESSAGE_OBJECT_TYPE", - 1074: "JS_NUMBER_FORMAT_TYPE", - 1075: "JS_PLURAL_RULES_TYPE", - 1076: "JS_PROMISE_TYPE", - 1077: "JS_REG_EXP_TYPE", - 1078: "JS_REG_EXP_STRING_ITERATOR_TYPE", - 1079: "JS_RELATIVE_TIME_FORMAT_TYPE", - 1080: "JS_SEGMENT_ITERATOR_TYPE", - 1081: "JS_SEGMENTER_TYPE", + 1058: "JS_ARGUMENTS_OBJECT_TYPE", + 1059: "JS_ARRAY_TYPE", + 1060: "JS_ARRAY_BUFFER_TYPE", + 1061: "JS_ARRAY_ITERATOR_TYPE", + 1062: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE", + 1063: "JS_COLLATOR_TYPE", + 1064: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", + 1065: "JS_DATE_TYPE", + 1066: "JS_DATE_TIME_FORMAT_TYPE", + 1067: "JS_DISPLAY_NAMES_TYPE", + 1068: "JS_ERROR_TYPE", + 1069: "JS_FINALIZATION_REGISTRY_TYPE", + 1070: "JS_LIST_FORMAT_TYPE", + 1071: "JS_LOCALE_TYPE", + 1072: "JS_MESSAGE_OBJECT_TYPE", + 1073: "JS_NUMBER_FORMAT_TYPE", + 1074: "JS_PLURAL_RULES_TYPE", + 1075: "JS_PROMISE_TYPE", + 1076: "JS_REG_EXP_TYPE", + 1077: "JS_REG_EXP_STRING_ITERATOR_TYPE", + 1078: "JS_RELATIVE_TIME_FORMAT_TYPE", + 1079: "JS_SEGMENT_ITERATOR_TYPE", + 1080: "JS_SEGMENTER_TYPE", + 1081: "JS_SEGMENTS_TYPE", 1082: "JS_STRING_ITERATOR_TYPE", 1083: "JS_V8_BREAK_ITERATOR_TYPE", 1084: "JS_WEAK_REF_TYPE", @@ -203,260 +205,266 @@ # List of known V8 maps. KNOWN_MAPS = { - ("read_only_space", 0x00121): (166, "FreeSpaceMap"), - ("read_only_space", 0x00149): (169, "MetaMap"), - ("read_only_space", 0x0018d): (67, "NullMap"), - ("read_only_space", 0x001c5): (161, "DescriptorArrayMap"), - ("read_only_space", 0x001f5): (155, "WeakFixedArrayMap"), - ("read_only_space", 0x0021d): (165, "OnePointerFillerMap"), - ("read_only_space", 0x00245): (165, "TwoPointerFillerMap"), - ("read_only_space", 0x00289): (67, "UninitializedMap"), - ("read_only_space", 0x002cd): (8, "OneByteInternalizedStringMap"), - ("read_only_space", 0x00329): (67, "UndefinedMap"), - ("read_only_space", 0x0035d): (66, "HeapNumberMap"), - ("read_only_space", 0x003a1): (67, "TheHoleMap"), - ("read_only_space", 0x00401): (67, "BooleanMap"), - ("read_only_space", 0x00489): (132, "ByteArrayMap"), - ("read_only_space", 0x004b1): (117, "FixedArrayMap"), - ("read_only_space", 0x004d9): (117, "FixedCOWArrayMap"), - ("read_only_space", 0x00501): (118, "HashTableMap"), - ("read_only_space", 0x00529): (64, "SymbolMap"), - ("read_only_space", 0x00551): (40, "OneByteStringMap"), - ("read_only_space", 0x00579): (130, "ScopeInfoMap"), - ("read_only_space", 0x005a1): (173, "SharedFunctionInfoMap"), - ("read_only_space", 0x005c9): (158, "CodeMap"), - ("read_only_space", 0x005f1): (157, "CellMap"), - ("read_only_space", 0x00619): (172, "GlobalPropertyCellMap"), - ("read_only_space", 0x00641): (70, "ForeignMap"), - ("read_only_space", 0x00669): (156, "TransitionArrayMap"), - ("read_only_space", 0x00691): (45, "ThinOneByteStringMap"), - ("read_only_space", 0x006b9): (164, "FeedbackVectorMap"), - ("read_only_space", 0x0070d): (67, "ArgumentsMarkerMap"), - ("read_only_space", 0x0076d): (67, "ExceptionMap"), - ("read_only_space", 0x007c9): (67, "TerminationExceptionMap"), - ("read_only_space", 0x00831): (67, "OptimizedOutMap"), - ("read_only_space", 0x00891): (67, "StaleRegisterMap"), - ("read_only_space", 0x008d5): (131, "ScriptContextTableMap"), - ("read_only_space", 0x008fd): (128, "ClosureFeedbackCellArrayMap"), - ("read_only_space", 0x00925): (163, "FeedbackMetadataArrayMap"), - ("read_only_space", 0x0094d): (117, "ArrayListMap"), - ("read_only_space", 0x00975): (65, "BigIntMap"), - ("read_only_space", 0x0099d): (129, "ObjectBoilerplateDescriptionMap"), - ("read_only_space", 0x009c5): (133, "BytecodeArrayMap"), - ("read_only_space", 0x009ed): (159, "CodeDataContainerMap"), - ("read_only_space", 0x00a15): (160, "CoverageInfoMap"), - ("read_only_space", 0x00a3d): (134, "FixedDoubleArrayMap"), - ("read_only_space", 0x00a65): (120, "GlobalDictionaryMap"), - ("read_only_space", 0x00a8d): (96, "ManyClosuresCellMap"), - ("read_only_space", 0x00ab5): (117, "ModuleInfoMap"), - ("read_only_space", 0x00add): (121, "NameDictionaryMap"), - ("read_only_space", 0x00b05): (96, "NoClosuresCellMap"), - ("read_only_space", 0x00b2d): (122, "NumberDictionaryMap"), - ("read_only_space", 0x00b55): (96, "OneClosureCellMap"), - ("read_only_space", 0x00b7d): (123, "OrderedHashMapMap"), - ("read_only_space", 0x00ba5): (124, "OrderedHashSetMap"), - ("read_only_space", 0x00bcd): (125, "OrderedNameDictionaryMap"), - ("read_only_space", 0x00bf5): (170, "PreparseDataMap"), - ("read_only_space", 0x00c1d): (171, "PropertyArrayMap"), - ("read_only_space", 0x00c45): (92, "SideEffectCallHandlerInfoMap"), - ("read_only_space", 0x00c6d): (92, "SideEffectFreeCallHandlerInfoMap"), - ("read_only_space", 0x00c95): (92, "NextCallSideEffectFreeCallHandlerInfoMap"), - ("read_only_space", 0x00cbd): (126, "SimpleNumberDictionaryMap"), - ("read_only_space", 0x00ce5): (117, "SloppyArgumentsElementsMap"), - ("read_only_space", 0x00d0d): (146, "SmallOrderedHashMapMap"), - ("read_only_space", 0x00d35): (147, "SmallOrderedHashSetMap"), - ("read_only_space", 0x00d5d): (148, "SmallOrderedNameDictionaryMap"), - ("read_only_space", 0x00d85): (151, "SourceTextModuleMap"), - ("read_only_space", 0x00dad): (127, "StringTableMap"), - ("read_only_space", 0x00dd5): (152, "SyntheticModuleMap"), - ("read_only_space", 0x00dfd): (154, "UncompiledDataWithoutPreparseDataMap"), - ("read_only_space", 0x00e25): (153, "UncompiledDataWithPreparseDataMap"), - ("read_only_space", 0x00e4d): (179, "WeakArrayListMap"), - ("read_only_space", 0x00e75): (119, "EphemeronHashTableMap"), - ("read_only_space", 0x00e9d): (162, "EmbedderDataArrayMap"), - ("read_only_space", 0x00ec5): (180, "WeakCellMap"), - ("read_only_space", 0x00eed): (32, "StringMap"), - ("read_only_space", 0x00f15): (41, "ConsOneByteStringMap"), - ("read_only_space", 0x00f3d): (33, "ConsStringMap"), - ("read_only_space", 0x00f65): (37, "ThinStringMap"), - ("read_only_space", 0x00f8d): (35, "SlicedStringMap"), - ("read_only_space", 0x00fb5): (43, "SlicedOneByteStringMap"), - ("read_only_space", 0x00fdd): (34, "ExternalStringMap"), - ("read_only_space", 0x01005): (42, "ExternalOneByteStringMap"), - ("read_only_space", 0x0102d): (50, "UncachedExternalStringMap"), - ("read_only_space", 0x01055): (0, "InternalizedStringMap"), - ("read_only_space", 0x0107d): (2, "ExternalInternalizedStringMap"), - ("read_only_space", 0x010a5): (10, "ExternalOneByteInternalizedStringMap"), - ("read_only_space", 0x010cd): (18, "UncachedExternalInternalizedStringMap"), - ("read_only_space", 0x010f5): (26, "UncachedExternalOneByteInternalizedStringMap"), - ("read_only_space", 0x0111d): (58, "UncachedExternalOneByteStringMap"), - ("read_only_space", 0x01145): (67, "SelfReferenceMarkerMap"), - ("read_only_space", 0x01179): (95, "EnumCacheMap"), - ("read_only_space", 0x011c9): (86, "ArrayBoilerplateDescriptionMap"), - ("read_only_space", 0x012c5): (98, "InterceptorInfoMap"), - ("read_only_space", 0x03335): (71, "PromiseFulfillReactionJobTaskMap"), - ("read_only_space", 0x0335d): (72, "PromiseRejectReactionJobTaskMap"), - ("read_only_space", 0x03385): (73, "CallableTaskMap"), - ("read_only_space", 0x033ad): (74, "CallbackTaskMap"), - ("read_only_space", 0x033d5): (75, "PromiseResolveThenableJobTaskMap"), - ("read_only_space", 0x033fd): (78, "FunctionTemplateInfoMap"), - ("read_only_space", 0x03425): (79, "ObjectTemplateInfoMap"), - ("read_only_space", 0x0344d): (80, "AccessCheckInfoMap"), - ("read_only_space", 0x03475): (81, "AccessorInfoMap"), - ("read_only_space", 0x0349d): (82, "AccessorPairMap"), - ("read_only_space", 0x034c5): (83, "AliasedArgumentsEntryMap"), - ("read_only_space", 0x034ed): (84, "AllocationMementoMap"), - ("read_only_space", 0x03515): (87, "AsmWasmDataMap"), - ("read_only_space", 0x0353d): (88, "AsyncGeneratorRequestMap"), - ("read_only_space", 0x03565): (89, "BreakPointMap"), - ("read_only_space", 0x0358d): (90, "BreakPointInfoMap"), - ("read_only_space", 0x035b5): (91, "CachedTemplateObjectMap"), - ("read_only_space", 0x035dd): (93, "ClassPositionsMap"), - ("read_only_space", 0x03605): (94, "DebugInfoMap"), - ("read_only_space", 0x0362d): (97, "FunctionTemplateRareDataMap"), - ("read_only_space", 0x03655): (99, "InterpreterDataMap"), - ("read_only_space", 0x0367d): (100, "PromiseCapabilityMap"), - ("read_only_space", 0x036a5): (101, "PromiseReactionMap"), - ("read_only_space", 0x036cd): (102, "PropertyDescriptorObjectMap"), - ("read_only_space", 0x036f5): (103, "PrototypeInfoMap"), - ("read_only_space", 0x0371d): (104, "ScriptMap"), - ("read_only_space", 0x03745): (105, "SourceTextModuleInfoEntryMap"), - ("read_only_space", 0x0376d): (106, "StackFrameInfoMap"), - ("read_only_space", 0x03795): (107, "StackTraceFrameMap"), - ("read_only_space", 0x037bd): (108, "TemplateObjectDescriptionMap"), - ("read_only_space", 0x037e5): (109, "Tuple2Map"), - ("read_only_space", 0x0380d): (110, "WasmCapiFunctionDataMap"), - ("read_only_space", 0x03835): (111, "WasmDebugInfoMap"), - ("read_only_space", 0x0385d): (112, "WasmExceptionTagMap"), - ("read_only_space", 0x03885): (113, "WasmExportedFunctionDataMap"), - ("read_only_space", 0x038ad): (114, "WasmIndirectFunctionTableMap"), - ("read_only_space", 0x038d5): (115, "WasmJSFunctionDataMap"), - ("read_only_space", 0x038fd): (116, "WasmValueMap"), - ("read_only_space", 0x03925): (167, "InternalClassMap"), - ("read_only_space", 0x0394d): (175, "SmiPairMap"), - ("read_only_space", 0x03975): (174, "SmiBoxMap"), - ("read_only_space", 0x0399d): (149, "ExportedSubClassBaseMap"), - ("read_only_space", 0x039c5): (150, "ExportedSubClassMap"), - ("read_only_space", 0x039ed): (68, "AbstractInternalClassSubclass1Map"), - ("read_only_space", 0x03a15): (69, "AbstractInternalClassSubclass2Map"), - ("read_only_space", 0x03a3d): (135, "InternalClassWithSmiElementsMap"), - ("read_only_space", 0x03a65): (168, "InternalClassWithStructElementsMap"), - ("read_only_space", 0x03a8d): (176, "SortStateMap"), - ("read_only_space", 0x03ab5): (85, "AllocationSiteWithWeakNextMap"), - ("read_only_space", 0x03add): (85, "AllocationSiteWithoutWeakNextMap"), - ("read_only_space", 0x03b05): (76, "LoadHandler1Map"), - ("read_only_space", 0x03b2d): (76, "LoadHandler2Map"), - ("read_only_space", 0x03b55): (76, "LoadHandler3Map"), - ("read_only_space", 0x03b7d): (77, "StoreHandler0Map"), - ("read_only_space", 0x03ba5): (77, "StoreHandler1Map"), - ("read_only_space", 0x03bcd): (77, "StoreHandler2Map"), - ("read_only_space", 0x03bf5): (77, "StoreHandler3Map"), - ("map_space", 0x00121): (1057, "ExternalMap"), - ("map_space", 0x00149): (1073, "JSMessageObjectMap"), + ("read_only_space", 0x02115): (167, "FreeSpaceMap"), + ("read_only_space", 0x0213d): (170, "MetaMap"), + ("read_only_space", 0x02181): (67, "NullMap"), + ("read_only_space", 0x021b9): (162, "DescriptorArrayMap"), + ("read_only_space", 0x021e9): (156, "WeakFixedArrayMap"), + ("read_only_space", 0x02211): (166, "OnePointerFillerMap"), + ("read_only_space", 0x02239): (166, "TwoPointerFillerMap"), + ("read_only_space", 0x0227d): (67, "UninitializedMap"), + ("read_only_space", 0x022c1): (8, "OneByteInternalizedStringMap"), + ("read_only_space", 0x0231d): (67, "UndefinedMap"), + ("read_only_space", 0x02351): (66, "HeapNumberMap"), + ("read_only_space", 0x02395): (67, "TheHoleMap"), + ("read_only_space", 0x023f5): (67, "BooleanMap"), + ("read_only_space", 0x0247d): (131, "ByteArrayMap"), + ("read_only_space", 0x024a5): (117, "FixedArrayMap"), + ("read_only_space", 0x024cd): (117, "FixedCOWArrayMap"), + ("read_only_space", 0x024f5): (118, "HashTableMap"), + ("read_only_space", 0x0251d): (64, "SymbolMap"), + ("read_only_space", 0x02545): (40, "OneByteStringMap"), + ("read_only_space", 0x0256d): (129, "ScopeInfoMap"), + ("read_only_space", 0x02595): (175, "SharedFunctionInfoMap"), + ("read_only_space", 0x025bd): (159, "CodeMap"), + ("read_only_space", 0x025e5): (158, "CellMap"), + ("read_only_space", 0x0260d): (174, "GlobalPropertyCellMap"), + ("read_only_space", 0x02635): (70, "ForeignMap"), + ("read_only_space", 0x0265d): (157, "TransitionArrayMap"), + ("read_only_space", 0x02685): (45, "ThinOneByteStringMap"), + ("read_only_space", 0x026ad): (165, "FeedbackVectorMap"), + ("read_only_space", 0x02701): (67, "ArgumentsMarkerMap"), + ("read_only_space", 0x02761): (67, "ExceptionMap"), + ("read_only_space", 0x027bd): (67, "TerminationExceptionMap"), + ("read_only_space", 0x02825): (67, "OptimizedOutMap"), + ("read_only_space", 0x02885): (67, "StaleRegisterMap"), + ("read_only_space", 0x028c9): (130, "ScriptContextTableMap"), + ("read_only_space", 0x028f1): (127, "ClosureFeedbackCellArrayMap"), + ("read_only_space", 0x02919): (164, "FeedbackMetadataArrayMap"), + ("read_only_space", 0x02941): (117, "ArrayListMap"), + ("read_only_space", 0x02969): (65, "BigIntMap"), + ("read_only_space", 0x02991): (128, "ObjectBoilerplateDescriptionMap"), + ("read_only_space", 0x029b9): (132, "BytecodeArrayMap"), + ("read_only_space", 0x029e1): (160, "CodeDataContainerMap"), + ("read_only_space", 0x02a09): (161, "CoverageInfoMap"), + ("read_only_space", 0x02a31): (133, "FixedDoubleArrayMap"), + ("read_only_space", 0x02a59): (120, "GlobalDictionaryMap"), + ("read_only_space", 0x02a81): (97, "ManyClosuresCellMap"), + ("read_only_space", 0x02aa9): (117, "ModuleInfoMap"), + ("read_only_space", 0x02ad1): (121, "NameDictionaryMap"), + ("read_only_space", 0x02af9): (97, "NoClosuresCellMap"), + ("read_only_space", 0x02b21): (122, "NumberDictionaryMap"), + ("read_only_space", 0x02b49): (97, "OneClosureCellMap"), + ("read_only_space", 0x02b71): (123, "OrderedHashMapMap"), + ("read_only_space", 0x02b99): (124, "OrderedHashSetMap"), + ("read_only_space", 0x02bc1): (125, "OrderedNameDictionaryMap"), + ("read_only_space", 0x02be9): (172, "PreparseDataMap"), + ("read_only_space", 0x02c11): (173, "PropertyArrayMap"), + ("read_only_space", 0x02c39): (93, "SideEffectCallHandlerInfoMap"), + ("read_only_space", 0x02c61): (93, "SideEffectFreeCallHandlerInfoMap"), + ("read_only_space", 0x02c89): (93, "NextCallSideEffectFreeCallHandlerInfoMap"), + ("read_only_space", 0x02cb1): (126, "SimpleNumberDictionaryMap"), + ("read_only_space", 0x02cd9): (149, "SmallOrderedHashMapMap"), + ("read_only_space", 0x02d01): (150, "SmallOrderedHashSetMap"), + ("read_only_space", 0x02d29): (151, "SmallOrderedNameDictionaryMap"), + ("read_only_space", 0x02d51): (152, "SourceTextModuleMap"), + ("read_only_space", 0x02d79): (153, "SyntheticModuleMap"), + ("read_only_space", 0x02da1): (155, "UncompiledDataWithoutPreparseDataMap"), + ("read_only_space", 0x02dc9): (154, "UncompiledDataWithPreparseDataMap"), + ("read_only_space", 0x02df1): (71, "WasmTypeInfoMap"), + ("read_only_space", 0x02e19): (181, "WeakArrayListMap"), + ("read_only_space", 0x02e41): (119, "EphemeronHashTableMap"), + ("read_only_space", 0x02e69): (163, "EmbedderDataArrayMap"), + ("read_only_space", 0x02e91): (182, "WeakCellMap"), + ("read_only_space", 0x02eb9): (32, "StringMap"), + ("read_only_space", 0x02ee1): (41, "ConsOneByteStringMap"), + ("read_only_space", 0x02f09): (33, "ConsStringMap"), + ("read_only_space", 0x02f31): (37, "ThinStringMap"), + ("read_only_space", 0x02f59): (35, "SlicedStringMap"), + ("read_only_space", 0x02f81): (43, "SlicedOneByteStringMap"), + ("read_only_space", 0x02fa9): (34, "ExternalStringMap"), + ("read_only_space", 0x02fd1): (42, "ExternalOneByteStringMap"), + ("read_only_space", 0x02ff9): (50, "UncachedExternalStringMap"), + ("read_only_space", 0x03021): (0, "InternalizedStringMap"), + ("read_only_space", 0x03049): (2, "ExternalInternalizedStringMap"), + ("read_only_space", 0x03071): (10, "ExternalOneByteInternalizedStringMap"), + ("read_only_space", 0x03099): (18, "UncachedExternalInternalizedStringMap"), + ("read_only_space", 0x030c1): (26, "UncachedExternalOneByteInternalizedStringMap"), + ("read_only_space", 0x030e9): (58, "UncachedExternalOneByteStringMap"), + ("read_only_space", 0x03111): (67, "SelfReferenceMarkerMap"), + ("read_only_space", 0x03139): (67, "BasicBlockCountersMarkerMap"), + ("read_only_space", 0x0316d): (96, "EnumCacheMap"), + ("read_only_space", 0x031bd): (87, "ArrayBoilerplateDescriptionMap"), + ("read_only_space", 0x032a9): (99, "InterceptorInfoMap"), + ("read_only_space", 0x05399): (72, "PromiseFulfillReactionJobTaskMap"), + ("read_only_space", 0x053c1): (73, "PromiseRejectReactionJobTaskMap"), + ("read_only_space", 0x053e9): (74, "CallableTaskMap"), + ("read_only_space", 0x05411): (75, "CallbackTaskMap"), + ("read_only_space", 0x05439): (76, "PromiseResolveThenableJobTaskMap"), + ("read_only_space", 0x05461): (79, "FunctionTemplateInfoMap"), + ("read_only_space", 0x05489): (80, "ObjectTemplateInfoMap"), + ("read_only_space", 0x054b1): (81, "AccessCheckInfoMap"), + ("read_only_space", 0x054d9): (82, "AccessorInfoMap"), + ("read_only_space", 0x05501): (83, "AccessorPairMap"), + ("read_only_space", 0x05529): (84, "AliasedArgumentsEntryMap"), + ("read_only_space", 0x05551): (85, "AllocationMementoMap"), + ("read_only_space", 0x05579): (88, "AsmWasmDataMap"), + ("read_only_space", 0x055a1): (89, "AsyncGeneratorRequestMap"), + ("read_only_space", 0x055c9): (90, "BreakPointMap"), + ("read_only_space", 0x055f1): (91, "BreakPointInfoMap"), + ("read_only_space", 0x05619): (92, "CachedTemplateObjectMap"), + ("read_only_space", 0x05641): (94, "ClassPositionsMap"), + ("read_only_space", 0x05669): (95, "DebugInfoMap"), + ("read_only_space", 0x05691): (98, "FunctionTemplateRareDataMap"), + ("read_only_space", 0x056b9): (100, "InterpreterDataMap"), + ("read_only_space", 0x056e1): (101, "PromiseCapabilityMap"), + ("read_only_space", 0x05709): (102, "PromiseReactionMap"), + ("read_only_space", 0x05731): (103, "PropertyDescriptorObjectMap"), + ("read_only_space", 0x05759): (104, "PrototypeInfoMap"), + ("read_only_space", 0x05781): (105, "ScriptMap"), + ("read_only_space", 0x057a9): (106, "SourceTextModuleInfoEntryMap"), + ("read_only_space", 0x057d1): (107, "StackFrameInfoMap"), + ("read_only_space", 0x057f9): (108, "StackTraceFrameMap"), + ("read_only_space", 0x05821): (109, "TemplateObjectDescriptionMap"), + ("read_only_space", 0x05849): (110, "Tuple2Map"), + ("read_only_space", 0x05871): (111, "WasmCapiFunctionDataMap"), + ("read_only_space", 0x05899): (112, "WasmExceptionTagMap"), + ("read_only_space", 0x058c1): (113, "WasmExportedFunctionDataMap"), + ("read_only_space", 0x058e9): (114, "WasmIndirectFunctionTableMap"), + ("read_only_space", 0x05911): (115, "WasmJSFunctionDataMap"), + ("read_only_space", 0x05939): (116, "WasmValueMap"), + ("read_only_space", 0x05961): (135, "SloppyArgumentsElementsMap"), + ("read_only_space", 0x05989): (171, "OnHeapBasicBlockProfilerDataMap"), + ("read_only_space", 0x059b1): (168, "InternalClassMap"), + ("read_only_space", 0x059d9): (177, "SmiPairMap"), + ("read_only_space", 0x05a01): (176, "SmiBoxMap"), + ("read_only_space", 0x05a29): (146, "ExportedSubClassBaseMap"), + ("read_only_space", 0x05a51): (147, "ExportedSubClassMap"), + ("read_only_space", 0x05a79): (68, "AbstractInternalClassSubclass1Map"), + ("read_only_space", 0x05aa1): (69, "AbstractInternalClassSubclass2Map"), + ("read_only_space", 0x05ac9): (134, "InternalClassWithSmiElementsMap"), + ("read_only_space", 0x05af1): (169, "InternalClassWithStructElementsMap"), + ("read_only_space", 0x05b19): (148, "ExportedSubClass2Map"), + ("read_only_space", 0x05b41): (178, "SortStateMap"), + ("read_only_space", 0x05b69): (86, "AllocationSiteWithWeakNextMap"), + ("read_only_space", 0x05b91): (86, "AllocationSiteWithoutWeakNextMap"), + ("read_only_space", 0x05bb9): (77, "LoadHandler1Map"), + ("read_only_space", 0x05be1): (77, "LoadHandler2Map"), + ("read_only_space", 0x05c09): (77, "LoadHandler3Map"), + ("read_only_space", 0x05c31): (78, "StoreHandler0Map"), + ("read_only_space", 0x05c59): (78, "StoreHandler1Map"), + ("read_only_space", 0x05c81): (78, "StoreHandler2Map"), + ("read_only_space", 0x05ca9): (78, "StoreHandler3Map"), + ("map_space", 0x02115): (1057, "ExternalMap"), + ("map_space", 0x0213d): (1072, "JSMessageObjectMap"), + ("map_space", 0x02165): (180, "WasmRttEqrefMap"), + ("map_space", 0x0218d): (180, "WasmRttExternrefMap"), + ("map_space", 0x021b5): (180, "WasmRttFuncrefMap"), + ("map_space", 0x021dd): (180, "WasmRttI31refMap"), } # List of known V8 objects. KNOWN_OBJECTS = { - ("read_only_space", 0x00171): "NullValue", - ("read_only_space", 0x001b5): "EmptyDescriptorArray", - ("read_only_space", 0x001ed): "EmptyWeakFixedArray", - ("read_only_space", 0x0026d): "UninitializedValue", - ("read_only_space", 0x0030d): "UndefinedValue", - ("read_only_space", 0x00351): "NanValue", - ("read_only_space", 0x00385): "TheHoleValue", - ("read_only_space", 0x003d9): "HoleNanValue", - ("read_only_space", 0x003e5): "TrueValue", - ("read_only_space", 0x0044d): "FalseValue", - ("read_only_space", 0x0047d): "empty_string", - ("read_only_space", 0x006e1): "EmptyScopeInfo", - ("read_only_space", 0x006e9): "EmptyFixedArray", - ("read_only_space", 0x006f1): "ArgumentsMarker", - ("read_only_space", 0x00751): "Exception", - ("read_only_space", 0x007ad): "TerminationException", - ("read_only_space", 0x00815): "OptimizedOut", - ("read_only_space", 0x00875): "StaleRegister", - ("read_only_space", 0x0116d): "EmptyEnumCache", - ("read_only_space", 0x011a1): "EmptyPropertyArray", - ("read_only_space", 0x011a9): "EmptyByteArray", - ("read_only_space", 0x011b1): "EmptyObjectBoilerplateDescription", - ("read_only_space", 0x011bd): "EmptyArrayBoilerplateDescription", - ("read_only_space", 0x011f1): "EmptyClosureFeedbackCellArray", - ("read_only_space", 0x011f9): "EmptySloppyArgumentsElements", - ("read_only_space", 0x01209): "EmptySlowElementDictionary", - ("read_only_space", 0x0122d): "EmptyOrderedHashMap", - ("read_only_space", 0x01241): "EmptyOrderedHashSet", - ("read_only_space", 0x01255): "EmptyFeedbackMetadata", - ("read_only_space", 0x01261): "EmptyPropertyCell", - ("read_only_space", 0x01275): "EmptyPropertyDictionary", - ("read_only_space", 0x0129d): "NoOpInterceptorInfo", - ("read_only_space", 0x012ed): "EmptyWeakArrayList", - ("read_only_space", 0x012f9): "InfinityValue", - ("read_only_space", 0x01305): "MinusZeroValue", - ("read_only_space", 0x01311): "MinusInfinityValue", - ("read_only_space", 0x0131d): "SelfReferenceMarker", - ("read_only_space", 0x0135d): "OffHeapTrampolineRelocationInfo", - ("read_only_space", 0x01369): "TrampolineTrivialCodeDataContainer", - ("read_only_space", 0x01375): "TrampolinePromiseRejectionCodeDataContainer", - ("read_only_space", 0x01381): "GlobalThisBindingScopeInfo", - ("read_only_space", 0x013b9): "EmptyFunctionScopeInfo", - ("read_only_space", 0x013e1): "NativeScopeInfo", - ("read_only_space", 0x013fd): "HashSeed", - ("old_space", 0x00121): "ArgumentsIteratorAccessor", - ("old_space", 0x00165): "ArrayLengthAccessor", - ("old_space", 0x001a9): "BoundFunctionLengthAccessor", - ("old_space", 0x001ed): "BoundFunctionNameAccessor", - ("old_space", 0x00231): "ErrorStackAccessor", - ("old_space", 0x00275): "FunctionArgumentsAccessor", - ("old_space", 0x002b9): "FunctionCallerAccessor", - ("old_space", 0x002fd): "FunctionNameAccessor", - ("old_space", 0x00341): "FunctionLengthAccessor", - ("old_space", 0x00385): "FunctionPrototypeAccessor", - ("old_space", 0x003c9): "RegExpResultIndicesAccessor", - ("old_space", 0x0040d): "StringLengthAccessor", - ("old_space", 0x00451): "InvalidPrototypeValidityCell", - ("old_space", 0x00459): "EmptyScript", - ("old_space", 0x00499): "ManyClosuresCell", - ("old_space", 0x004a5): "ArrayConstructorProtector", - ("old_space", 0x004b9): "NoElementsProtector", - ("old_space", 0x004cd): "IsConcatSpreadableProtector", - ("old_space", 0x004e1): "ArraySpeciesProtector", - ("old_space", 0x004f5): "TypedArraySpeciesProtector", - ("old_space", 0x00509): "PromiseSpeciesProtector", - ("old_space", 0x0051d): "RegExpSpeciesProtector", - ("old_space", 0x00531): "StringLengthProtector", - ("old_space", 0x00545): "ArrayIteratorProtector", - ("old_space", 0x00559): "ArrayBufferDetachingProtector", - ("old_space", 0x0056d): "PromiseHookProtector", - ("old_space", 0x00581): "PromiseResolveProtector", - ("old_space", 0x00595): "MapIteratorProtector", - ("old_space", 0x005a9): "PromiseThenProtector", - ("old_space", 0x005bd): "SetIteratorProtector", - ("old_space", 0x005d1): "StringIteratorProtector", - ("old_space", 0x005e5): "SingleCharacterStringCache", - ("old_space", 0x009ed): "StringSplitCache", - ("old_space", 0x00df5): "RegExpMultipleCache", - ("old_space", 0x011fd): "BuiltinsConstantsTable", - ("old_space", 0x015a1): "AsyncFunctionAwaitRejectSharedFun", - ("old_space", 0x015c9): "AsyncFunctionAwaitResolveSharedFun", - ("old_space", 0x015f1): "AsyncGeneratorAwaitRejectSharedFun", - ("old_space", 0x01619): "AsyncGeneratorAwaitResolveSharedFun", - ("old_space", 0x01641): "AsyncGeneratorYieldResolveSharedFun", - ("old_space", 0x01669): "AsyncGeneratorReturnResolveSharedFun", - ("old_space", 0x01691): "AsyncGeneratorReturnClosedRejectSharedFun", - ("old_space", 0x016b9): "AsyncGeneratorReturnClosedResolveSharedFun", - ("old_space", 0x016e1): "AsyncIteratorValueUnwrapSharedFun", - ("old_space", 0x01709): "PromiseAllResolveElementSharedFun", - ("old_space", 0x01731): "PromiseAllSettledResolveElementSharedFun", - ("old_space", 0x01759): "PromiseAllSettledRejectElementSharedFun", - ("old_space", 0x01781): "PromiseAnyRejectElementSharedFun", - ("old_space", 0x017a9): "PromiseCapabilityDefaultRejectSharedFun", - ("old_space", 0x017d1): "PromiseCapabilityDefaultResolveSharedFun", - ("old_space", 0x017f9): "PromiseCatchFinallySharedFun", - ("old_space", 0x01821): "PromiseGetCapabilitiesExecutorSharedFun", - ("old_space", 0x01849): "PromiseThenFinallySharedFun", - ("old_space", 0x01871): "PromiseThrowerFinallySharedFun", - ("old_space", 0x01899): "PromiseValueThunkFinallySharedFun", - ("old_space", 0x018c1): "ProxyRevokeSharedFun", + ("read_only_space", 0x02165): "NullValue", + ("read_only_space", 0x021a9): "EmptyDescriptorArray", + ("read_only_space", 0x021e1): "EmptyWeakFixedArray", + ("read_only_space", 0x02261): "UninitializedValue", + ("read_only_space", 0x02301): "UndefinedValue", + ("read_only_space", 0x02345): "NanValue", + ("read_only_space", 0x02379): "TheHoleValue", + ("read_only_space", 0x023cd): "HoleNanValue", + ("read_only_space", 0x023d9): "TrueValue", + ("read_only_space", 0x02441): "FalseValue", + ("read_only_space", 0x02471): "empty_string", + ("read_only_space", 0x026d5): "EmptyScopeInfo", + ("read_only_space", 0x026dd): "EmptyFixedArray", + ("read_only_space", 0x026e5): "ArgumentsMarker", + ("read_only_space", 0x02745): "Exception", + ("read_only_space", 0x027a1): "TerminationException", + ("read_only_space", 0x02809): "OptimizedOut", + ("read_only_space", 0x02869): "StaleRegister", + ("read_only_space", 0x03161): "EmptyEnumCache", + ("read_only_space", 0x03195): "EmptyPropertyArray", + ("read_only_space", 0x0319d): "EmptyByteArray", + ("read_only_space", 0x031a5): "EmptyObjectBoilerplateDescription", + ("read_only_space", 0x031b1): "EmptyArrayBoilerplateDescription", + ("read_only_space", 0x031e5): "EmptyClosureFeedbackCellArray", + ("read_only_space", 0x031ed): "EmptySlowElementDictionary", + ("read_only_space", 0x03211): "EmptyOrderedHashMap", + ("read_only_space", 0x03225): "EmptyOrderedHashSet", + ("read_only_space", 0x03239): "EmptyFeedbackMetadata", + ("read_only_space", 0x03245): "EmptyPropertyCell", + ("read_only_space", 0x03259): "EmptyPropertyDictionary", + ("read_only_space", 0x03281): "NoOpInterceptorInfo", + ("read_only_space", 0x032d1): "EmptyWeakArrayList", + ("read_only_space", 0x032dd): "InfinityValue", + ("read_only_space", 0x032e9): "MinusZeroValue", + ("read_only_space", 0x032f5): "MinusInfinityValue", + ("read_only_space", 0x03301): "SelfReferenceMarker", + ("read_only_space", 0x03341): "BasicBlockCountersMarker", + ("read_only_space", 0x03385): "OffHeapTrampolineRelocationInfo", + ("read_only_space", 0x03391): "TrampolineTrivialCodeDataContainer", + ("read_only_space", 0x0339d): "TrampolinePromiseRejectionCodeDataContainer", + ("read_only_space", 0x033a9): "GlobalThisBindingScopeInfo", + ("read_only_space", 0x033e1): "EmptyFunctionScopeInfo", + ("read_only_space", 0x03409): "NativeScopeInfo", + ("read_only_space", 0x03425): "HashSeed", + ("old_space", 0x02115): "ArgumentsIteratorAccessor", + ("old_space", 0x02159): "ArrayLengthAccessor", + ("old_space", 0x0219d): "BoundFunctionLengthAccessor", + ("old_space", 0x021e1): "BoundFunctionNameAccessor", + ("old_space", 0x02225): "ErrorStackAccessor", + ("old_space", 0x02269): "FunctionArgumentsAccessor", + ("old_space", 0x022ad): "FunctionCallerAccessor", + ("old_space", 0x022f1): "FunctionNameAccessor", + ("old_space", 0x02335): "FunctionLengthAccessor", + ("old_space", 0x02379): "FunctionPrototypeAccessor", + ("old_space", 0x023bd): "RegExpResultIndicesAccessor", + ("old_space", 0x02401): "StringLengthAccessor", + ("old_space", 0x02445): "InvalidPrototypeValidityCell", + ("old_space", 0x024cd): "EmptyScript", + ("old_space", 0x0250d): "ManyClosuresCell", + ("old_space", 0x02519): "ArrayConstructorProtector", + ("old_space", 0x0252d): "NoElementsProtector", + ("old_space", 0x02541): "IsConcatSpreadableProtector", + ("old_space", 0x02555): "ArraySpeciesProtector", + ("old_space", 0x02569): "TypedArraySpeciesProtector", + ("old_space", 0x0257d): "PromiseSpeciesProtector", + ("old_space", 0x02591): "RegExpSpeciesProtector", + ("old_space", 0x025a5): "StringLengthProtector", + ("old_space", 0x025b9): "ArrayIteratorProtector", + ("old_space", 0x025cd): "ArrayBufferDetachingProtector", + ("old_space", 0x025e1): "PromiseHookProtector", + ("old_space", 0x025f5): "PromiseResolveProtector", + ("old_space", 0x02609): "MapIteratorProtector", + ("old_space", 0x0261d): "PromiseThenProtector", + ("old_space", 0x02631): "SetIteratorProtector", + ("old_space", 0x02645): "StringIteratorProtector", + ("old_space", 0x02659): "SingleCharacterStringCache", + ("old_space", 0x02a61): "StringSplitCache", + ("old_space", 0x02e69): "RegExpMultipleCache", + ("old_space", 0x03271): "BuiltinsConstantsTable", + ("old_space", 0x03645): "AsyncFunctionAwaitRejectSharedFun", + ("old_space", 0x0366d): "AsyncFunctionAwaitResolveSharedFun", + ("old_space", 0x03695): "AsyncGeneratorAwaitRejectSharedFun", + ("old_space", 0x036bd): "AsyncGeneratorAwaitResolveSharedFun", + ("old_space", 0x036e5): "AsyncGeneratorYieldResolveSharedFun", + ("old_space", 0x0370d): "AsyncGeneratorReturnResolveSharedFun", + ("old_space", 0x03735): "AsyncGeneratorReturnClosedRejectSharedFun", + ("old_space", 0x0375d): "AsyncGeneratorReturnClosedResolveSharedFun", + ("old_space", 0x03785): "AsyncIteratorValueUnwrapSharedFun", + ("old_space", 0x037ad): "PromiseAllResolveElementSharedFun", + ("old_space", 0x037d5): "PromiseAllSettledResolveElementSharedFun", + ("old_space", 0x037fd): "PromiseAllSettledRejectElementSharedFun", + ("old_space", 0x03825): "PromiseAnyRejectElementSharedFun", + ("old_space", 0x0384d): "PromiseCapabilityDefaultRejectSharedFun", + ("old_space", 0x03875): "PromiseCapabilityDefaultResolveSharedFun", + ("old_space", 0x0389d): "PromiseCatchFinallySharedFun", + ("old_space", 0x038c5): "PromiseGetCapabilitiesExecutorSharedFun", + ("old_space", 0x038ed): "PromiseThenFinallySharedFun", + ("old_space", 0x03915): "PromiseThrowerFinallySharedFun", + ("old_space", 0x0393d): "PromiseValueThunkFinallySharedFun", + ("old_space", 0x03965): "ProxyRevokeSharedFun", } # Lower 32 bits of first page addresses for various heap spaces. diff --git a/deps/v8/tools/v8windbg/base/utilities.cc b/deps/v8/tools/v8windbg/base/utilities.cc index a59e95f46fd211..1f0e2bc6708e22 100644 --- a/deps/v8/tools/v8windbg/base/utilities.cc +++ b/deps/v8/tools/v8windbg/base/utilities.cc @@ -133,6 +133,15 @@ HRESULT UnboxULong64(IModelObject* object, ULONG64* value, bool convert) { return S_OK; } +HRESULT GetInt32(IDebugHostConstant* object, int* value) { + variant_t variant; + RETURN_IF_FAIL(object->GetValue(&variant)); + + if (variant.vt != VT_I4) return E_FAIL; + *value = variant.ullVal; + return S_OK; +} + HRESULT CreateInt32(int value, IModelObject** pp_int) { HRESULT hr = S_OK; *pp_int = nullptr; diff --git a/deps/v8/tools/v8windbg/base/utilities.h b/deps/v8/tools/v8windbg/base/utilities.h index e26bb2878049ed..06af6c35875d64 100644 --- a/deps/v8/tools/v8windbg/base/utilities.h +++ b/deps/v8/tools/v8windbg/base/utilities.h @@ -55,6 +55,8 @@ HRESULT CreateULong64(ULONG64 value, IModelObject** pp_int); HRESULT UnboxULong64(IModelObject* object, ULONG64* value, bool convert = false); +HRESULT GetInt32(IDebugHostConstant* object, int* value); + HRESULT CreateInt32(int value, IModelObject** pp_int); HRESULT CreateUInt32(uint32_t value, IModelObject** pp_int); diff --git a/deps/v8/tools/v8windbg/src/object-inspection.cc b/deps/v8/tools/v8windbg/src/object-inspection.cc index ce0370a697f59d..6f90614bd5c95e 100644 --- a/deps/v8/tools/v8windbg/src/object-inspection.cc +++ b/deps/v8/tools/v8windbg/src/object-inspection.cc @@ -585,6 +585,79 @@ IFACEMETHODIMP V8LocalValueProperty::SetValue( return E_NOTIMPL; } +IFACEMETHODIMP V8InternalCompilerNodeIdProperty::GetValue( + PCWSTR pwsz_key, IModelObject* p_v8_compiler_node_instance, + IModelObject** pp_value) noexcept { + WRL::ComPtr sp_bit_field; + RETURN_IF_FAIL(p_v8_compiler_node_instance->GetRawValue( + SymbolKind::SymbolField, L"bit_field_", RawSearchNone, &sp_bit_field)); + + uint64_t bit_field_value; + RETURN_IF_FAIL( + UnboxULong64(sp_bit_field.Get(), &bit_field_value, true /*convert*/)); + + WRL::ComPtr sp_host_context; + RETURN_IF_FAIL(p_v8_compiler_node_instance->GetContext(&sp_host_context)); + + WRL::ComPtr sp_id_field_type; + RETURN_IF_FAIL(Extension::Current() + ->GetV8Module(sp_host_context) + ->FindTypeByName(L"v8::internal::compiler::Node::IdField", + &sp_id_field_type)); + + // Get 2nd template parameter as 24 in class. + // v8::base::BitField. + bool is_generic; + RETURN_IF_FAIL(sp_id_field_type->IsGeneric(&is_generic)); + if (!is_generic) return E_FAIL; + + WRL::ComPtr sp_k_size_arg; + RETURN_IF_FAIL(sp_id_field_type->GetGenericArgumentAt(2, &sp_k_size_arg)); + + WRL::ComPtr sp_k_size_constant; + RETURN_IF_FAIL(sp_k_size_arg.As(&sp_k_size_constant)); + + int k_size; + RETURN_IF_FAIL(GetInt32(sp_k_size_constant.Get(), &k_size)); + + // Compute node_id. + uint32_t node_id = bit_field_value & (0xFFFFFFFF >> k_size); + RETURN_IF_FAIL(CreateUInt32(node_id, pp_value)); + + return S_OK; +} + +IFACEMETHODIMP V8InternalCompilerNodeIdProperty::SetValue( + PCWSTR /*pwsz_key*/, IModelObject* /*p_process_instance*/, + IModelObject* /*p_value*/) noexcept { + return E_NOTIMPL; +} + +IFACEMETHODIMP V8InternalCompilerBitsetNameProperty::GetValue( + PCWSTR pwsz_key, IModelObject* p_v8_compiler_type_instance, + IModelObject** pp_value) noexcept { + WRL::ComPtr sp_payload; + RETURN_IF_FAIL(p_v8_compiler_type_instance->GetRawValue( + SymbolKind::SymbolField, L"payload_", RawSearchNone, &sp_payload)); + + uint64_t payload_value; + RETURN_IF_FAIL( + UnboxULong64(sp_payload.Get(), &payload_value, true /*convert*/)); + + const char* bitset_name = ::BitsetName(payload_value); + if (!bitset_name) return E_FAIL; + std::string name(bitset_name); + RETURN_IF_FAIL(CreateString(ConvertToU16String(name), pp_value)); + + return S_OK; +} + +IFACEMETHODIMP V8InternalCompilerBitsetNameProperty::SetValue( + PCWSTR /*pwsz_key*/, IModelObject* /*p_process_instance*/, + IModelObject* /*p_value*/) noexcept { + return E_NOTIMPL; +} + constexpr wchar_t usage[] = LR"(Invalid arguments. First argument should be a uint64 representing the tagged value to investigate. diff --git a/deps/v8/tools/v8windbg/src/object-inspection.h b/deps/v8/tools/v8windbg/src/object-inspection.h index 27283ca5569c8d..a280b05cadedfe 100644 --- a/deps/v8/tools/v8windbg/src/object-inspection.h +++ b/deps/v8/tools/v8windbg/src/object-inspection.h @@ -245,6 +245,38 @@ class V8LocalValueProperty IModelObject* /*p_value*/); }; +// The implemention of the "NodeId" getter for v8::internal::compiler::Node +// type. +class V8InternalCompilerNodeIdProperty + : public WRL::RuntimeClass< + WRL::RuntimeClassFlags, + IModelPropertyAccessor> { + public: + IFACEMETHOD(GetValue) + (PCWSTR pwsz_key, IModelObject* p_v8_object_instance, + IModelObject** pp_value); + + IFACEMETHOD(SetValue) + (PCWSTR /*pwsz_key*/, IModelObject* /*p_process_instance*/, + IModelObject* /*p_value*/); +}; + +// The implemention of the "bitset_name" getter for v8::internal::compiler::Type +// type. +class V8InternalCompilerBitsetNameProperty + : public WRL::RuntimeClass< + WRL::RuntimeClassFlags, + IModelPropertyAccessor> { + public: + IFACEMETHOD(GetValue) + (PCWSTR pwsz_key, IModelObject* p_v8_compiler_type_instance, + IModelObject** pp_value); + + IFACEMETHOD(SetValue) + (PCWSTR /*pwsz_key*/, IModelObject* /*p_process_instance*/, + IModelObject* /*p_value*/); +}; + // A way that someone can directly inspect a tagged value, even if that value // isn't in memory (from a register, or the user's imagination, etc.). class InspectV8ObjectMethod diff --git a/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.cc b/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.cc index 4a8dcc9add7c6b..0767ff5f09e974 100644 --- a/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.cc +++ b/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.cc @@ -155,3 +155,5 @@ std::vector ListObjectClasses() { } return result; } + +const char* BitsetName(uint64_t payload) { return d::BitsetName(payload); } diff --git a/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.h b/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.h index 96bd59b30ea99e..9208f0983270d7 100644 --- a/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.h +++ b/deps/v8/tools/v8windbg/src/v8-debug-helper-interop.h @@ -135,4 +135,6 @@ inline uint64_t ExpandCompressedPointer(uint32_t ptr) { return ptr; } std::vector ListObjectClasses(); +const char* BitsetName(uint64_t payload); + #endif // V8_TOOLS_V8WINDBG_SRC_V8_DEBUG_HELPER_INTEROP_H_ diff --git a/deps/v8/tools/v8windbg/src/v8windbg-extension.cc b/deps/v8/tools/v8windbg/src/v8windbg-extension.cc index 68c90d2833b511..58a520cff1fc56 100644 --- a/deps/v8/tools/v8windbg/src/v8windbg-extension.cc +++ b/deps/v8/tools/v8windbg/src/v8windbg-extension.cc @@ -215,7 +215,8 @@ HRESULT Extension::Initialize() { &sp_object_type_signature)); RETURN_IF_FAIL(sp_data_model_manager->RegisterModelForTypeSignature( sp_object_type_signature.Get(), sp_object_data_model_.Get())); - registered_object_types_.push_back(sp_object_type_signature); + registered_types_.push_back( + {sp_object_type_signature.Get(), sp_object_data_model_.Get()}); } // Create an instance of the DataModel parent for custom iterable fields. @@ -244,7 +245,7 @@ HRESULT Extension::Initialize() { sp_debug_host_symbols->CreateTypeSignature(name, nullptr, &signature)); RETURN_IF_FAIL(sp_data_model_manager->RegisterModelForTypeSignature( signature.Get(), sp_local_data_model_.Get())); - registered_handle_types_.push_back(signature); + registered_types_.push_back({signature.Get(), sp_local_data_model_.Get()}); } // Add the 'Value' property to the parent model. @@ -279,6 +280,46 @@ HRESULT Extension::Initialize() { RETURN_IF_FAIL(OverrideLocalsGetter(stack_frame.Get(), L"Parameters", /*is_parameters=*/true)); + // Add node_id property for v8::internal::compiler::Node. + RETURN_IF_FAIL( + RegisterAndAddPropertyForClass( + L"v8::internal::compiler::Node", L"node_id", + sp_compiler_node_data_model_)); + + // Add bitset_name property for v8::internal::compiler::Type. + RETURN_IF_FAIL( + RegisterAndAddPropertyForClass( + L"v8::internal::compiler::Type", L"bitset_name", + sp_compiler_type_data_model_)); + + return S_OK; +} + +template +HRESULT Extension::RegisterAndAddPropertyForClass( + const wchar_t* class_name, const wchar_t* property_name, + WRL::ComPtr sp_data_model) { + // Create an instance of the DataModel parent class. + auto instance_data_model{WRL::Make()}; + RETURN_IF_FAIL(sp_data_model_manager->CreateDataModelObject( + instance_data_model.Get(), &sp_data_model)); + + // Register that parent model. + WRL::ComPtr class_signature; + RETURN_IF_FAIL(sp_debug_host_symbols->CreateTypeSignature(class_name, nullptr, + &class_signature)); + RETURN_IF_FAIL(sp_data_model_manager->RegisterModelForTypeSignature( + class_signature.Get(), sp_data_model.Get())); + registered_types_.push_back({class_signature.Get(), sp_data_model.Get()}); + + // Add the property to the parent model. + auto property{WRL::Make()}; + WRL::ComPtr sp_property_model; + RETURN_IF_FAIL(CreateProperty(sp_data_model_manager.Get(), property.Get(), + &sp_property_model)); + RETURN_IF_FAIL( + sp_data_model->SetKey(property_name, sp_property_model.Get(), nullptr)); + return S_OK; } @@ -318,18 +359,24 @@ Extension::PropertyOverride::PropertyOverride(const PropertyOverride&) = Extension::PropertyOverride& Extension::PropertyOverride::operator=( const PropertyOverride&) = default; +Extension::RegistrationType::RegistrationType() = default; +Extension::RegistrationType::RegistrationType( + IDebugHostTypeSignature* sp_signature, IModelObject* sp_data_model) + : sp_signature(sp_signature), sp_data_model(sp_data_model) {} +Extension::RegistrationType::~RegistrationType() = default; +Extension::RegistrationType::RegistrationType(const RegistrationType&) = + default; +Extension::RegistrationType& Extension::RegistrationType::operator=( + const RegistrationType&) = default; + Extension::~Extension() { sp_debug_host_extensibility->DestroyFunctionAlias(pcur_isolate); sp_debug_host_extensibility->DestroyFunctionAlias(plist_chunks); sp_debug_host_extensibility->DestroyFunctionAlias(pv8_object); - for (const auto& registered : registered_object_types_) { - sp_data_model_manager->UnregisterModelForTypeSignature( - sp_object_data_model_.Get(), registered.Get()); - } - for (const auto& registered : registered_handle_types_) { + for (const auto& registered : registered_types_) { sp_data_model_manager->UnregisterModelForTypeSignature( - sp_local_data_model_.Get(), registered.Get()); + registered.sp_data_model.Get(), registered.sp_signature.Get()); } for (const auto& override : overridden_properties_) { diff --git a/deps/v8/tools/v8windbg/src/v8windbg-extension.h b/deps/v8/tools/v8windbg/src/v8windbg-extension.h index d54f43c847eaf1..46331611523286 100644 --- a/deps/v8/tools/v8windbg/src/v8windbg-extension.h +++ b/deps/v8/tools/v8windbg/src/v8windbg-extension.h @@ -46,6 +46,11 @@ class Extension { HRESULT OverrideLocalsGetter(IModelObject* parent, const wchar_t* key_name, bool is_parameters); + template + HRESULT RegisterAndAddPropertyForClass( + const wchar_t* class_name, const wchar_t* property_name, + WRL::ComPtr sp_data_model); + // A property that has been overridden by this extension. The original value // must be put back in place during ~Extension. struct PropertyOverride { @@ -62,20 +67,32 @@ class Extension { WRL::ComPtr original_metadata; }; + struct RegistrationType { + RegistrationType(); + RegistrationType(IDebugHostTypeSignature* sp_signature, + IModelObject* sp_data_model); + ~RegistrationType(); + RegistrationType(const RegistrationType&); + RegistrationType& operator=(const RegistrationType&); + + WRL::ComPtr sp_signature; + WRL::ComPtr sp_data_model; + }; + static std::unique_ptr current_extension_; WRL::ComPtr sp_object_data_model_; WRL::ComPtr sp_local_data_model_; + WRL::ComPtr sp_compiler_node_data_model_; + WRL::ComPtr sp_compiler_type_data_model_; WRL::ComPtr sp_indexed_field_model_; WRL::ComPtr sp_v8_module_; std::unordered_map> cached_v8_module_types_; - std::vector> registered_object_types_; - std::vector> registered_handle_types_; + std::vector registered_types_; std::vector overridden_properties_; WRL::ComPtr sp_v8_module_ctx_; ULONG v8_module_proc_id_; }; - #endif // V8_TOOLS_V8WINDBG_SRC_V8WINDBG_EXTENSION_H_ diff --git a/deps/v8/tools/wasm/update-wasm-spec-tests.sh b/deps/v8/tools/wasm/update-wasm-spec-tests.sh index b3e9185c4dfaa8..2d26c2f5aeb96a 100755 --- a/deps/v8/tools/wasm/update-wasm-spec-tests.sh +++ b/deps/v8/tools/wasm/update-wasm-spec-tests.sh @@ -71,7 +71,7 @@ log_and_run cp -r ${TMP_DIR}/spec/test/js-api/* ${JS_API_TEST_DIR}/tests # Generate the proposal tests. ############################################################################### -repos='bulk-memory-operations reference-types js-types JS-BigInt-integration' +repos='bulk-memory-operations reference-types js-types tail-call' for repo in ${repos}; do echo "Process ${repo}" @@ -95,7 +95,10 @@ for repo in ${repos}; do log_and_run ./run.py --wasm ../../interpreter/wasm ${rel_filename} --out _build 2> /dev/null fi done - log_and_run cp _build/*.js ${SPEC_TEST_DIR}/tests/proposals/${repo}/ + + if ls _build/*.js > /dev/null; then + log_and_run cp _build/*.js ${SPEC_TEST_DIR}/tests/proposals/${repo}/ + fi echo ">> Process js-api tests" log_and_run mkdir ${JS_API_TEST_DIR}/tests/proposals/${repo} diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt index e246f93f14d83a..168b574f9ce033 100644 --- a/deps/v8/tools/whitespace.txt +++ b/deps/v8/tools/whitespace.txt @@ -7,7 +7,7 @@ A Smi balks into a war and says: The doubles heard this and started to unbox. The Smi looked at them when a crazy v8-autoroll account showed up... The autoroller bought a round of Himbeerbrause. Suddenly..... -The bartender starts to shake the bottles..................... +The bartender starts to shake the bottles...................... I can't add trailing whitespaces, so I'm adding this line..... I'm starting to think that just adding trailing whitespaces might not be bad. diff --git a/deps/v8/tools/zone-stats/categories.js b/deps/v8/tools/zone-stats/categories.js new file mode 100644 index 00000000000000..581a69a01e70f3 --- /dev/null +++ b/deps/v8/tools/zone-stats/categories.js @@ -0,0 +1,129 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +const UNCLASSIFIED_CATEGORY = 'unclassified'; +const UNCLASSIFIED_CATEGORY_NAME = 'Unclassified'; + +// Categories for zones. +export const CATEGORIES = new Map([ + [ + 'parser', new Set([ + 'AstStringConstants', + 'parser-zone', + 'pre-parser-zone', + ]) + ], + [ + 'misc', new Set([ + 'Run', + 'CanonicalHandleScope', + 'Temporary scoped zone', + 'UpdateFieldType', + ]) + ], + [ + 'interpreter', new Set([ + 'InterpreterCompilationJob', + ]) + ], + [ + 'regexp', new Set([ + 'CompileIrregexp', + ]) + ], + [ + 'compiler-huge', new Set([ + 'graph-zone', + 'instruction-zone', + 'pipeline-compilation-job-zone', + 'register-allocation-zone', + 'register-allocator-verifier-zone', + ]) + ], + [ + 'compiler-other', new Set([ + 'Compile', + 'V8.TFAllocateFPRegisters', + 'V8.TFAllocateGeneralRegisters', + 'V8.TFAssembleCode', + 'V8.TFAssignSpillSlots', + 'V8.TFBuildLiveRangeBundles', + 'V8.TFBuildLiveRanges', + 'V8.TFBytecodeGraphBuilder', + 'V8.TFCommitAssignment', + 'V8.TFConnectRanges', + 'V8.TFControlFlowOptimization', + 'V8.TFDecideSpillingMode', + 'V8.TFDecompressionOptimization', + 'V8.TFEarlyOptimization', + 'V8.TFEarlyTrimming', + 'V8.TFEffectLinearization', + 'V8.TFEscapeAnalysis', + 'V8.TFFinalizeCode', + 'V8.TFFrameElision', + 'V8.TFGenericLowering', + 'V8.TFHeapBrokerInitialization', + 'V8.TFInlining', + 'V8.TFJumpThreading', + 'V8.TFLateGraphTrimming', + 'V8.TFLateOptimization', + 'V8.TFLoadElimination', + 'V8.TFLocateSpillSlots', + 'V8.TFLoopPeeling', + 'V8.TFMachineOperatorOptimization', + 'V8.TFMeetRegisterConstraints', + 'V8.TFMemoryOptimization', + 'V8.TFOptimizeMoves', + 'V8.TFPopulatePointerMaps', + 'V8.TFResolveControlFlow', + 'V8.TFResolvePhis', + 'V8.TFScheduling', + 'V8.TFSelectInstructions', + 'V8.TFSerializeMetadata', + 'V8.TFSimplifiedLowering', + 'V8.TFStoreStoreElimination', + 'V8.TFTypedLowering', + 'V8.TFTyper', + 'V8.TFUntyper', + 'V8.TFVerifyGraph', + 'ValidatePendingAssessment', + 'codegen-zone', + ]) + ], + [UNCLASSIFIED_CATEGORY, new Set()], +]); + +// Maps category to description text that is shown in html. +export const CATEGORY_NAMES = new Map([ + ['parser', 'Parser'], + ['misc', 'Misc'], + ['interpreter', 'Ignition'], + ['regexp', 'Regexp compiler'], + ['compiler-huge', 'TurboFan (huge zones)'], + ['compiler-other', 'TurboFan (other zones)'], + [UNCLASSIFIED_CATEGORY, UNCLASSIFIED_CATEGORY_NAME], +]); + +function buildZoneToCategoryMap() { + const map = new Map(); + for (let [category, zone_names] of CATEGORIES.entries()) { + for (let zone_name of zone_names) { + if (map.has(zone_name)) { + console.error("Zone belongs to multiple categories: " + zone_name); + } else { + map.set(zone_name, category); + } + } + } + return map; +} + +const CATEGORY_BY_ZONE = buildZoneToCategoryMap(); + +// Maps zone name to category. +export function categoryByZoneName(zone_name) { + const category = CATEGORY_BY_ZONE.get(zone_name); + if (category !== undefined) return category; + return UNCLASSIFIED_CATEGORY; +} diff --git a/deps/v8/tools/zone-stats/details-selection-template.html b/deps/v8/tools/zone-stats/details-selection-template.html new file mode 100644 index 00000000000000..ef1e2f68b9b022 --- /dev/null +++ b/deps/v8/tools/zone-stats/details-selection-template.html @@ -0,0 +1,146 @@ + + +
+

Data selection

+
    +
  • + + +
  • +
  • + + +
  • +
  • + + +
  • +
  • + + +
  • +
  • + + ms +
  • +
  • + + ms +
  • +
  • + + +
  • +
+ +
+
diff --git a/deps/v8/tools/zone-stats/details-selection.js b/deps/v8/tools/zone-stats/details-selection.js new file mode 100644 index 00000000000000..039847b0cf2a15 --- /dev/null +++ b/deps/v8/tools/zone-stats/details-selection.js @@ -0,0 +1,367 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +'use strict'; + +import {CATEGORIES, CATEGORY_NAMES, categoryByZoneName} from './categories.js'; + +export const VIEW_TOTALS = 'by-totals'; +export const VIEW_BY_ZONE_NAME = 'by-zone-name'; +export const VIEW_BY_ZONE_CATEGORY = 'by-zone-category'; + +export const KIND_ALLOCATED_MEMORY = 'kind-detailed-allocated'; +export const KIND_USED_MEMORY = 'kind-detailed-used'; +export const KIND_FREED_MEMORY = 'kind-detailed-freed'; + +defineCustomElement('details-selection', (templateText) => + class DetailsSelection extends HTMLElement { + constructor() { + super(); + const shadowRoot = this.attachShadow({mode: 'open'}); + shadowRoot.innerHTML = templateText; + this.isolateSelect.addEventListener( + 'change', e => this.handleIsolateChange(e)); + this.dataViewSelect.addEventListener( + 'change', e => this.notifySelectionChanged(e)); + this.dataKindSelect.addEventListener( + 'change', e => this.notifySelectionChanged(e)); + this.showTotalsSelect.addEventListener( + 'change', e => this.notifySelectionChanged(e)); + this.memoryUsageSampleSelect.addEventListener( + 'change', e => this.notifySelectionChanged(e)); + this.timeStartSelect.addEventListener( + 'change', e => this.notifySelectionChanged(e)); + this.timeEndSelect.addEventListener( + 'change', e => this.notifySelectionChanged(e)); + } + + connectedCallback() { + for (let category of CATEGORIES.keys()) { + this.$('#categories').appendChild(this.buildCategory(category)); + } + } + + set data(value) { + this._data = value; + this.dataChanged(); + } + + get data() { + return this._data; + } + + get selectedIsolate() { + return this._data[this.selection.isolate]; + } + + get selectedData() { + console.assert(this.data, 'invalid data'); + console.assert(this.selection, 'invalid selection'); + const time = this.selection.time; + return this.selectedIsolate.samples.get(time); + } + + $(id) { + return this.shadowRoot.querySelector(id); + } + + querySelectorAll(query) { + return this.shadowRoot.querySelectorAll(query); + } + + get dataViewSelect() { + return this.$('#data-view-select'); + } + + get dataKindSelect() { + return this.$('#data-kind-select'); + } + + get isolateSelect() { + return this.$('#isolate-select'); + } + + get memoryUsageSampleSelect() { + return this.$('#memory-usage-sample-select'); + } + + get showTotalsSelect() { + return this.$('#show-totals-select'); + } + + get timeStartSelect() { + return this.$('#time-start-select'); + } + + get timeEndSelect() { + return this.$('#time-end-select'); + } + + buildCategory(name) { + const div = document.createElement('div'); + div.id = name; + div.classList.add('box'); + const ul = document.createElement('ul'); + div.appendChild(ul); + const name_li = document.createElement('li'); + ul.appendChild(name_li); + name_li.innerHTML = CATEGORY_NAMES.get(name); + const percent_li = document.createElement('li'); + ul.appendChild(percent_li); + percent_li.innerHTML = '0%'; + percent_li.id = name + 'PercentContent'; + const all_li = document.createElement('li'); + ul.appendChild(all_li); + const all_button = document.createElement('button'); + all_li.appendChild(all_button); + all_button.innerHTML = 'All'; + all_button.addEventListener('click', e => this.selectCategory(name)); + const none_li = document.createElement('li'); + ul.appendChild(none_li); + const none_button = document.createElement('button'); + none_li.appendChild(none_button); + none_button.innerHTML = 'None'; + none_button.addEventListener('click', e => this.unselectCategory(name)); + const innerDiv = document.createElement('div'); + div.appendChild(innerDiv); + innerDiv.id = name + 'Content'; + const percentDiv = document.createElement('div'); + div.appendChild(percentDiv); + percentDiv.className = 'percentBackground'; + percentDiv.id = name + 'PercentBackground'; + return div; + } + + dataChanged() { + this.selection = {categories: {}, zones: new Map()}; + this.resetUI(true); + this.populateIsolateSelect(); + this.handleIsolateChange(); + this.$('#dataSelectionSection').style.display = 'block'; + } + + populateIsolateSelect() { + let isolates = Object.entries(this.data); + // Sort by peak heap memory consumption. + isolates.sort((a, b) => b[1].peakAllocatedMemory - a[1].peakAllocatedMemory); + this.populateSelect( + '#isolate-select', isolates, (key, isolate) => isolate.getLabel()); + } + + resetUI(resetIsolateSelect) { + if (resetIsolateSelect) removeAllChildren(this.isolateSelect); + + removeAllChildren(this.dataViewSelect); + removeAllChildren(this.dataKindSelect); + removeAllChildren(this.memoryUsageSampleSelect); + this.clearCategories(); + } + + handleIsolateChange(e) { + this.selection.isolate = this.isolateSelect.value; + if (this.selection.isolate.length === 0) { + this.selection.isolate = null; + return; + } + this.resetUI(false); + this.populateSelect( + '#data-view-select', [ + [VIEW_TOTALS, 'Total memory usage'], + [VIEW_BY_ZONE_NAME, 'Selected zones types'], + [VIEW_BY_ZONE_CATEGORY, 'Selected zone categories'], + ], + (key, label) => label, VIEW_TOTALS); + this.populateSelect( + '#data-kind-select', [ + [KIND_ALLOCATED_MEMORY, 'Allocated memory per zone'], + [KIND_USED_MEMORY, 'Used memory per zone'], + [KIND_FREED_MEMORY, 'Freed memory per zone'], + ], + (key, label) => label, KIND_ALLOCATED_MEMORY); + + this.populateSelect( + '#memory-usage-sample-select', + [...this.selectedIsolate.samples.entries()].filter(([time, sample]) => { + // Remove samples that does not have detailed per-zone data. + return sample.zones !== undefined; + }), + (time, sample, index) => { + return ((index + ': ').padStart(6, '\u00A0') + + formatSeconds(time).padStart(8, '\u00A0') + ' ' + + formatBytes(sample.allocated).padStart(12, '\u00A0')); + }, + this.selectedIsolate.peakUsageTime); + + this.timeStartSelect.value = this.selectedIsolate.start; + this.timeEndSelect.value = this.selectedIsolate.end; + + this.populateCategories(); + this.notifySelectionChanged(); + } + + notifySelectionChanged(e) { + if (!this.selection.isolate) return; + + this.selection.data_view = this.dataViewSelect.value; + this.selection.data_kind = this.dataKindSelect.value; + this.selection.categories = Object.create(null); + this.selection.zones = new Map(); + this.$('#categories').style.display = 'none'; + for (let category of CATEGORIES.keys()) { + const selected = this.selectedInCategory(category); + if (selected.length > 0) this.selection.categories[category] = selected; + for (const zone_name of selected) { + this.selection.zones.set(zone_name, category); + } + } + this.$('#categories').style.display = 'block'; + this.selection.category_names = CATEGORY_NAMES; + this.selection.show_totals = this.showTotalsSelect.checked; + this.selection.time = Number(this.memoryUsageSampleSelect.value); + this.selection.timeStart = Number(this.timeStartSelect.value); + this.selection.timeEnd = Number(this.timeEndSelect.value); + this.updatePercentagesInCategory(); + this.updatePercentagesInZones(); + this.dispatchEvent(new CustomEvent( + 'change', {bubbles: true, composed: true, detail: this.selection})); + } + + updatePercentagesInCategory() { + const overalls = Object.create(null); + let overall = 0; + // Reset all categories. + this.selection.category_names.forEach((_, category) => { + overalls[category] = 0; + }); + // Only update categories that have selections. + Object.entries(this.selection.categories).forEach(([category, value]) => { + overalls[category] = + Object.values(value).reduce( + (accu, current) => { + const zone_data = this.selectedData.zones.get(current); + return zone_data === undefined ? accu + : accu + zone_data.allocated; + }, 0) / + KB; + overall += overalls[category]; + }); + Object.entries(overalls).forEach(([category, category_overall]) => { + let percents = category_overall / overall * 100; + this.$(`#${category}PercentContent`).innerHTML = + `${percents.toFixed(1)}%`; + this.$('#' + category + 'PercentBackground').style.left = percents + '%'; + }); + } + + updatePercentagesInZones() { + const selected_data = this.selectedData; + const zones_data = selected_data.zones; + const total_allocated = selected_data.allocated; + this.querySelectorAll('.zonesSelectBox input').forEach(checkbox => { + const zone_name = checkbox.value; + const zone_data = zones_data.get(zone_name); + const zone_allocated = zone_data === undefined ? 0 : zone_data.allocated; + if (zone_allocated == 0) { + checkbox.parentNode.style.display = 'none'; + } else { + const percents = zone_allocated / total_allocated; + const percent_div = checkbox.parentNode.querySelector('.percentBackground'); + percent_div.style.left = (percents * 100) + '%'; + checkbox.parentNode.style.display = 'block'; + } + }); + } + + selectedInCategory(category) { + let tmp = []; + this.querySelectorAll('input[name=' + category + 'Checkbox]:checked') + .forEach(checkbox => tmp.push(checkbox.value)); + return tmp; + } + + createOption(value, text) { + const option = document.createElement('option'); + option.value = value; + option.text = text; + return option; + } + + populateSelect(id, iterable, labelFn = null, autoselect = null) { + if (labelFn == null) labelFn = e => e; + let index = 0; + for (let [key, value] of iterable) { + index++; + const label = labelFn(key, value, index); + const option = this.createOption(key, label); + if (autoselect === key) { + option.selected = 'selected'; + } + this.$(id).appendChild(option); + } + } + + clearCategories() { + for (const category of CATEGORIES.keys()) { + let f = this.$('#' + category + 'Content'); + while (f.firstChild) { + f.removeChild(f.firstChild); + } + } + } + + populateCategories() { + this.clearCategories(); + const categories = Object.create(null); + for (let cat of CATEGORIES.keys()) { + categories[cat] = []; + } + + for (const [zone_name, zone_stats] of this.selectedIsolate.zones) { + const category = categoryByZoneName(zone_name); + categories[category].push(zone_name); + } + for (let category of Object.keys(categories)) { + categories[category].sort(); + for (let zone_name of categories[category]) { + this.$('#' + category + 'Content') + .appendChild(this.createCheckBox(zone_name, category)); + } + } + } + + unselectCategory(category) { + this.querySelectorAll('input[name=' + category + 'Checkbox]') + .forEach(checkbox => checkbox.checked = false); + this.notifySelectionChanged(); + } + + selectCategory(category) { + this.querySelectorAll('input[name=' + category + 'Checkbox]') + .forEach(checkbox => checkbox.checked = true); + this.notifySelectionChanged(); + } + + createCheckBox(instance_type, category) { + const div = document.createElement('div'); + div.classList.add('zonesSelectBox'); + div.style.width = "200px"; + const input = document.createElement('input'); + div.appendChild(input); + input.type = 'checkbox'; + input.name = category + 'Checkbox'; + input.checked = 'checked'; + input.id = instance_type + 'Checkbox'; + input.instance_type = instance_type; + input.value = instance_type; + input.addEventListener('change', e => this.notifySelectionChanged(e)); + const label = document.createElement('label'); + div.appendChild(label); + label.innerText = instance_type; + label.htmlFor = instance_type + 'Checkbox'; + const percentDiv = document.createElement('div'); + percentDiv.className = 'percentBackground'; + div.appendChild(percentDiv); + return div; + } +}); diff --git a/deps/v8/tools/zone-stats/global-timeline-template.html b/deps/v8/tools/zone-stats/global-timeline-template.html new file mode 100644 index 00000000000000..49e75646f16715 --- /dev/null +++ b/deps/v8/tools/zone-stats/global-timeline-template.html @@ -0,0 +1,16 @@ + + + diff --git a/deps/v8/tools/zone-stats/global-timeline.js b/deps/v8/tools/zone-stats/global-timeline.js new file mode 100644 index 00000000000000..e9d7280508a02e --- /dev/null +++ b/deps/v8/tools/zone-stats/global-timeline.js @@ -0,0 +1,340 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +'use strict'; + +import {categoryByZoneName} from './categories.js'; + +import { + VIEW_TOTALS, + VIEW_BY_ZONE_NAME, + VIEW_BY_ZONE_CATEGORY, + + KIND_ALLOCATED_MEMORY, + KIND_USED_MEMORY, + KIND_FREED_MEMORY, +} from './details-selection.js'; + +defineCustomElement('global-timeline', (templateText) => + class GlobalTimeline extends HTMLElement { + constructor() { + super(); + const shadowRoot = this.attachShadow({mode: 'open'}); + shadowRoot.innerHTML = templateText; + } + + $(id) { + return this.shadowRoot.querySelector(id); + } + + set data(value) { + this._data = value; + this.stateChanged(); + } + + get data() { + return this._data; + } + + set selection(value) { + this._selection = value; + this.stateChanged(); + } + + get selection() { + return this._selection; + } + + isValid() { + return this.data && this.selection; + } + + hide() { + this.$('#container').style.display = 'none'; + } + + show() { + this.$('#container').style.display = 'block'; + } + + stateChanged() { + if (this.isValid()) { + const isolate_data = this.data[this.selection.isolate]; + const peakAllocatedMemory = isolate_data.peakAllocatedMemory; + this.$('#peak-memory-label').innerText = formatBytes(peakAllocatedMemory); + this.drawChart(); + } else { + this.hide(); + } + } + + getZoneLabels(zone_names) { + switch (this.selection.data_kind) { + case KIND_ALLOCATED_MEMORY: + return zone_names.map(name => { + return {label: name + " (allocated)", type: 'number'}; + }); + + case KIND_USED_MEMORY: + return zone_names.map(name => { + return {label: name + " (used)", type: 'number'}; + }); + + case KIND_FREED_MEMORY: + return zone_names.map(name => { + return {label: name + " (freed)", type: 'number'}; + }); + + default: + // Don't show detailed per-zone information. + return []; + } + } + + getTotalsData() { + const isolate_data = this.data[this.selection.isolate]; + const labels = [ + { label: "Time", type: "number" }, + { label: "Total allocated", type: "number" }, + { label: "Total used", type: "number" }, + { label: "Total freed", type: "number" }, + ]; + const chart_data = [labels]; + + const timeStart = this.selection.timeStart; + const timeEnd = this.selection.timeEnd; + const filter_entries = timeStart > 0 || timeEnd > 0; + + for (const [time, zone_data] of isolate_data.samples) { + if (filter_entries && (time < timeStart || time > timeEnd)) continue; + const data = []; + data.push(time * kMillis2Seconds); + data.push(zone_data.allocated / KB); + data.push(zone_data.used / KB); + data.push(zone_data.freed / KB); + chart_data.push(data); + } + return chart_data; + } + + getZoneData() { + const isolate_data = this.data[this.selection.isolate]; + const zone_names = isolate_data.sorted_zone_names; + const selected_zones = this.selection.zones; + const data_kind = this.selection.data_kind; + const show_totals = this.selection.show_totals; + const zones_labels = this.getZoneLabels(zone_names); + + const totals_labels = show_totals + ? [ + { label: "Total allocated", type: "number" }, + { label: "Total used", type: "number" }, + { label: "Total freed", type: "number" }, + ] + : []; + + const labels = [ + { label: "Time", type: "number" }, + ...totals_labels, + ...zones_labels, + ]; + const chart_data = [labels]; + + const timeStart = this.selection.timeStart; + const timeEnd = this.selection.timeEnd; + const filter_entries = timeStart > 0 || timeEnd > 0; + + for (const [time, zone_data] of isolate_data.samples) { + if (filter_entries && (time < timeStart || time > timeEnd)) continue; + const active_zone_stats = Object.create(null); + if (zone_data.zones !== undefined) { + for (const [zone_name, zone_stats] of zone_data.zones) { + if (!selected_zones.has(zone_name)) continue; // Not selected, skip. + + const current_stats = active_zone_stats[zone_name]; + if (current_stats === undefined) { + active_zone_stats[zone_name] = + { allocated: zone_stats.allocated, + used: zone_stats.used, + freed: zone_stats.freed, + }; + } else { + // We've got two zones with the same name. + console.log("=== Duplicate zone names: " + zone_name); + // Sum stats. + current_stats.allocated += zone_stats.allocated; + current_stats.used += zone_stats.used; + current_stats.freed += zone_stats.freed; + } + } + } + + const data = []; + data.push(time * kMillis2Seconds); + if (show_totals) { + data.push(zone_data.allocated / KB); + data.push(zone_data.used / KB); + data.push(zone_data.freed / KB); + } + + zone_names.forEach(zone => { + const sample = active_zone_stats[zone]; + let value = null; + if (sample !== undefined) { + if (data_kind == KIND_ALLOCATED_MEMORY) { + value = sample.allocated / KB; + } else if (data_kind == KIND_FREED_MEMORY) { + value = sample.freed / KB; + } else { + // KIND_USED_MEMORY + value = sample.used / KB; + } + } + data.push(value); + }); + chart_data.push(data); + } + return chart_data; + } + + getCategoryData() { + const isolate_data = this.data[this.selection.isolate]; + const categories = Object.keys(this.selection.categories); + const categories_names = + categories.map(k => this.selection.category_names.get(k)); + const selected_zones = this.selection.zones; + const data_kind = this.selection.data_kind; + const show_totals = this.selection.show_totals; + + const categories_labels = this.getZoneLabels(categories_names); + + const totals_labels = show_totals + ? [ + { label: "Total allocated", type: "number" }, + { label: "Total used", type: "number" }, + { label: "Total freed", type: "number" }, + ] + : []; + + const labels = [ + { label: "Time", type: "number" }, + ...totals_labels, + ...categories_labels, + ]; + const chart_data = [labels]; + + const timeStart = this.selection.timeStart; + const timeEnd = this.selection.timeEnd; + const filter_entries = timeStart > 0 || timeEnd > 0; + + for (const [time, zone_data] of isolate_data.samples) { + if (filter_entries && (time < timeStart || time > timeEnd)) continue; + const active_category_stats = Object.create(null); + if (zone_data.zones !== undefined) { + for (const [zone_name, zone_stats] of zone_data.zones) { + const category = selected_zones.get(zone_name); + if (category === undefined) continue; // Zone was not selected. + + const current_stats = active_category_stats[category]; + if (current_stats === undefined) { + active_category_stats[category] = + { allocated: zone_stats.allocated, + used: zone_stats.used, + freed: zone_stats.freed, + }; + } else { + // Sum stats. + current_stats.allocated += zone_stats.allocated; + current_stats.used += zone_stats.used; + current_stats.freed += zone_stats.freed; + } + } + } + + const data = []; + data.push(time * kMillis2Seconds); + if (show_totals) { + data.push(zone_data.allocated / KB); + data.push(zone_data.used / KB); + data.push(zone_data.freed / KB); + } + + categories.forEach(category => { + const sample = active_category_stats[category]; + let value = null; + if (sample !== undefined) { + if (data_kind == KIND_ALLOCATED_MEMORY) { + value = sample.allocated / KB; + } else if (data_kind == KIND_FREED_MEMORY) { + value = sample.freed / KB; + } else { + // KIND_USED_MEMORY + value = sample.used / KB; + } + } + data.push(value); + }); + chart_data.push(data); + } + return chart_data; + } + + getChartData() { + switch (this.selection.data_view) { + case VIEW_BY_ZONE_NAME: + return this.getZoneData(); + case VIEW_BY_ZONE_CATEGORY: + return this.getCategoryData(); + case VIEW_TOTALS: + default: + return this.getTotalsData(); + } + } + + getChartOptions() { + const options = { + isStacked: true, + interpolateNulls: true, + hAxis: { + format: '###.##s', + title: 'Time [s]', + }, + vAxis: { + format: '#,###KB', + title: 'Memory consumption [KBytes]' + }, + chartArea: {left:100, width: '85%', height: '70%'}, + legend: {position: 'top', maxLines: '1'}, + pointsVisible: true, + pointSize: 3, + explorer: {}, + }; + + // Overlay total allocated/used points on top of the graph. + const series = {} + if (this.selection.data_view == VIEW_TOTALS) { + series[0] = {type: 'line', color: "red"}; + series[1] = {type: 'line', color: "blue"}; + series[2] = {type: 'line', color: "orange"}; + } else if (this.selection.show_totals) { + series[0] = {type: 'line', color: "red", lineDashStyle: [13, 13]}; + series[1] = {type: 'line', color: "blue", lineDashStyle: [13, 13]}; + series[2] = {type: 'line', color: "orange", lineDashStyle: [13, 13]}; + } + return Object.assign(options, {series: series}); + } + + drawChart() { + console.assert(this.data, 'invalid data'); + console.assert(this.selection, 'invalid selection'); + + const chart_data = this.getChartData(); + + const data = google.visualization.arrayToDataTable(chart_data); + const options = this.getChartOptions(); + const chart = new google.visualization.AreaChart(this.$('#chart')); + this.show(); + chart.draw(data, google.charts.Line.convertOptions(options)); + } +}); diff --git a/deps/v8/tools/zone-stats/helper.js b/deps/v8/tools/zone-stats/helper.js new file mode 100644 index 00000000000000..a0d04859d1c175 --- /dev/null +++ b/deps/v8/tools/zone-stats/helper.js @@ -0,0 +1,30 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +const KB = 1024; +const MB = KB * KB; +const GB = MB * KB; +const kMillis2Seconds = 1 / 1000; + +function formatBytes(bytes) { + const units = [' B', ' KB', ' MB', ' GB']; + const divisor = 1024; + let index = 0; + while (index < units.length && bytes >= divisor) { + index++; + bytes /= divisor; + } + return bytes.toFixed(2) + units[index]; +} + +function formatSeconds(millis) { + return (millis * kMillis2Seconds).toFixed(2) + 's'; +} + +function defineCustomElement(name, generator) { + let htmlTemplatePath = name + '-template.html'; + fetch(htmlTemplatePath) + .then(stream => stream.text()) + .then(templateText => customElements.define(name, generator(templateText))); +} diff --git a/deps/v8/tools/zone-stats/index.html b/deps/v8/tools/zone-stats/index.html new file mode 100644 index 00000000000000..a7dfa2ddfd8fe5 --- /dev/null +++ b/deps/v8/tools/zone-stats/index.html @@ -0,0 +1,93 @@ + + + + + + + + V8 Zone Statistics + + + + + + + + + + + + + + + + +

V8 Zone memory usage statistics

+ + + + + +

Visualize zone usage profile and statistics that have been gathered using

+ +

+ Note that the visualizer needs to run on a web server due to HTML imports + requiring CORS. +

+ + + diff --git a/deps/v8/tools/zone-stats/model.js b/deps/v8/tools/zone-stats/model.js new file mode 100644 index 00000000000000..80f45237631837 --- /dev/null +++ b/deps/v8/tools/zone-stats/model.js @@ -0,0 +1,92 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +'use strict'; + +export class Isolate { + constructor(address) { + this.address = address; + this.start = null; + this.end = null; + this.peakUsageTime = null; + // Maps zone name to per-zone statistics. + this.zones = new Map(); + // Zone names sorted by memory usage (from low to high). + this.sorted_zone_names = []; + // Maps time to total and per-zone memory usages. + this.samples = new Map(); + + this.peakAllocatedMemory = 0; + + // Maps zone name to their max memory consumption. + this.zonePeakMemory = Object.create(null); + // Peak memory consumed by a single zone. + this.singleZonePeakMemory = 0; + } + + finalize() { + this.samples.forEach(sample => this.finalizeSample(sample)); + this.start = Math.floor(this.start); + this.end = Math.ceil(this.end); + this.sortZoneNamesByPeakMemory(); + } + + getLabel() { + let label = `${this.address}: `; + label += ` peak=${formatBytes(this.peakAllocatedMemory)}`; + label += ` time=[${this.start}, ${this.end}] ms`; + return label; + } + + finalizeSample(sample) { + const time = sample.time; + if (this.start == null) { + this.start = time; + this.end = time; + } else { + this.end = Math.max(this.end, time); + } + + const allocated = sample.allocated; + if (allocated > this.peakAllocatedMemory) { + this.peakUsageTime = time; + this.peakAllocatedMemory = allocated; + } + + const sample_zones = sample.zones; + if (sample_zones !== undefined) { + sample.zones.forEach((zone_sample, zone_name) => { + let zone_stats = this.zones.get(zone_name); + if (zone_stats === undefined) { + zone_stats = {max_allocated: 0, max_used: 0}; + this.zones.set(zone_name, zone_stats); + } + + zone_stats.max_allocated = + Math.max(zone_stats.max_allocated, zone_sample.allocated); + zone_stats.max_used = Math.max(zone_stats.max_used, zone_sample.used); + }); + } + } + + sortZoneNamesByPeakMemory() { + let entries = [...this.zones.keys()]; + entries.sort((a, b) => + this.zones.get(a).max_allocated - this.zones.get(b).max_allocated + ); + this.sorted_zone_names = entries; + + let max = 0; + for (let [key, value] of entries) { + this.zonePeakMemory[key] = value; + max = Math.max(max, value); + } + this.singleZonePeakMemory = max; + } + + getInstanceTypePeakMemory(type) { + if (!(type in this.zonePeakMemory)) return 0; + return this.zonePeakMemory[type]; + } +} diff --git a/deps/v8/tools/zone-stats/trace-file-reader-template.html b/deps/v8/tools/zone-stats/trace-file-reader-template.html new file mode 100644 index 00000000000000..ede7ee9a75bb8b --- /dev/null +++ b/deps/v8/tools/zone-stats/trace-file-reader-template.html @@ -0,0 +1,81 @@ + + + +
+
+ + Drag and drop a trace file into this area, or click to choose from disk. + + +
+
+
+
+
diff --git a/deps/v8/tools/zone-stats/trace-file-reader.js b/deps/v8/tools/zone-stats/trace-file-reader.js new file mode 100644 index 00000000000000..7b7cb6c5e46bd2 --- /dev/null +++ b/deps/v8/tools/zone-stats/trace-file-reader.js @@ -0,0 +1,298 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +'use strict'; + +import {Isolate} from './model.js'; + +defineCustomElement('trace-file-reader', (templateText) => + class TraceFileReader extends HTMLElement { + constructor() { + super(); + const shadowRoot = this.attachShadow({mode: 'open'}); + shadowRoot.innerHTML = templateText; + this.addEventListener('click', e => this.handleClick(e)); + this.addEventListener('dragover', e => this.handleDragOver(e)); + this.addEventListener('drop', e => this.handleChange(e)); + this.$('#file').addEventListener('change', e => this.handleChange(e)); + this.$('#fileReader').addEventListener('keydown', e => this.handleKeyEvent(e)); + } + + $(id) { + return this.shadowRoot.querySelector(id); + } + + get section() { + return this.$('#fileReaderSection'); + } + + updateLabel(text) { + this.$('#label').innerText = text; + } + + handleKeyEvent(event) { + if (event.key == "Enter") this.handleClick(event); + } + + handleClick(event) { + this.$('#file').click(); + } + + handleChange(event) { + // Used for drop and file change. + event.preventDefault(); + var host = event.dataTransfer ? event.dataTransfer : event.target; + this.readFile(host.files[0]); + } + + handleDragOver(event) { + event.preventDefault(); + } + + connectedCallback() { + this.$('#fileReader').focus(); + } + + readFile(file) { + if (!file) { + this.updateLabel('Failed to load file.'); + return; + } + this.$('#fileReader').blur(); + + this.section.className = 'loading'; + const reader = new FileReader(); + + if (['application/gzip', 'application/x-gzip'].includes(file.type)) { + reader.onload = (e) => { + try { + // Decode data as strings of 64Kb chunks. Bigger chunks may cause + // parsing failures in Oboe.js. + const chunkedInflate = new pako.Inflate( + {to: 'string', chunkSize: 65536} + ); + let processingState = undefined; + chunkedInflate.onData = (chunk) => { + if (processingState === undefined) { + processingState = this.startProcessing(file, chunk); + } else { + processingState.processChunk(chunk); + } + }; + chunkedInflate.onEnd = () => { + if (processingState !== undefined) { + const result_data = processingState.endProcessing(); + this.processLoadedData(file, result_data); + } + }; + console.log("======"); + const textResult = chunkedInflate.push(e.target.result); + + this.section.className = 'success'; + this.$('#fileReader').classList.add('done'); + } catch (err) { + console.error(err); + this.section.className = 'failure'; + } + }; + // Delay the loading a bit to allow for CSS animations to happen. + setTimeout(() => reader.readAsArrayBuffer(file), 0); + } else { + reader.onload = (e) => { + try { + // Process the whole file in at once. + const processingState = this.startProcessing(file, e.target.result); + const dataModel = processingState.endProcessing(); + this.processLoadedData(file, dataModel); + + this.section.className = 'success'; + this.$('#fileReader').classList.add('done'); + } catch (err) { + console.error(err); + this.section.className = 'failure'; + } + }; + // Delay the loading a bit to allow for CSS animations to happen. + setTimeout(() => reader.readAsText(file), 0); + } + } + + processLoadedData(file, dataModel) { + console.log("Trace file parsed successfully."); + this.extendAndSanitizeModel(dataModel); + this.updateLabel('Finished loading \'' + file.name + '\'.'); + this.dispatchEvent(new CustomEvent( + 'change', {bubbles: true, composed: true, detail: dataModel})); + } + + createOrUpdateEntryIfNeeded(data, entry) { + console.assert(entry.isolate, 'entry should have an isolate'); + if (!(entry.isolate in data)) { + data[entry.isolate] = new Isolate(entry.isolate); + } + } + + extendAndSanitizeModel(data) { + const checkNonNegativeProperty = (obj, property) => { + console.assert(obj[property] >= 0, 'negative property', obj, property); + }; + + Object.values(data).forEach(isolate => isolate.finalize()); + } + + processOneZoneStatsEntry(data, entry_stats) { + this.createOrUpdateEntryIfNeeded(data, entry_stats); + const isolate_data = data[entry_stats.isolate]; + let zones = undefined; + const entry_zones = entry_stats.zones; + if (entry_zones !== undefined) { + zones = new Map(); + entry_zones.forEach(zone => { + // There might be multiple occurrences of the same zone in the set, + // combine numbers in this case. + const existing_zone_stats = zones.get(zone.name); + if (existing_zone_stats !== undefined) { + existing_zone_stats.allocated += zone.allocated; + existing_zone_stats.used += zone.used; + existing_zone_stats.freed += zone.freed; + } else { + zones.set(zone.name, { allocated: zone.allocated, + used: zone.used, + freed: zone.freed }); + } + }); + } + const time = entry_stats.time; + const sample = { + time: time, + allocated: entry_stats.allocated, + used: entry_stats.used, + freed: entry_stats.freed, + zones: zones + }; + isolate_data.samples.set(time, sample); + } + + startProcessing(file, chunk) { + const isV8TraceFile = chunk.includes('v8-zone-trace'); + const processingState = + isV8TraceFile ? this.startProcessingAsV8TraceFile(file) + : this.startProcessingAsChromeTraceFile(file); + + processingState.processChunk(chunk); + return processingState; + } + + startProcessingAsChromeTraceFile(file) { + console.log(`Processing log as chrome trace file.`); + const data = Object.create(null); // Final data container. + const parseOneZoneEvent = (actual_data) => { + if ('stats' in actual_data) { + try { + const entry_stats = JSON.parse(actual_data.stats); + this.processOneZoneStatsEntry(data, entry_stats); + } catch (e) { + console.error('Unable to parse data set entry', e); + } + } + }; + const zone_events_filter = (event) => { + if (event.name == 'V8.Zone_Stats') { + parseOneZoneEvent(event.args); + } + return oboe.drop; + }; + + const oboe_stream = oboe(); + // Trace files support two formats. + oboe_stream + // 1) {traceEvents: [ data ]} + .node('traceEvents.*', zone_events_filter) + // 2) [ data ] + .node('!.*', zone_events_filter) + .fail((errorReport) => { + throw new Error("Trace data parse failed: " + errorReport.thrown); + }); + + let failed = false; + + const processingState = { + file: file, + + processChunk(chunk) { + if (failed) return false; + try { + oboe_stream.emit('data', chunk); + return true; + } catch (e) { + console.error('Unable to parse chrome trace file.', e); + failed = true; + return false; + } + }, + + endProcessing() { + if (failed) return null; + oboe_stream.emit('end'); + return data; + }, + }; + return processingState; + } + + startProcessingAsV8TraceFile(file) { + console.log('Processing log as V8 trace file.'); + const data = Object.create(null); // Final data container. + + const processOneLine = (line) => { + try { + // Strip away a potentially present adb logcat prefix. + line = line.replace(/^I\/v8\s*\(\d+\):\s+/g, ''); + + const entry = JSON.parse(line); + if (entry === null || entry.type === undefined) return; + if ((entry.type === 'v8-zone-trace') && ('stats' in entry)) { + const entry_stats = entry.stats; + this.processOneZoneStatsEntry(data, entry_stats); + } else { + console.log('Unknown entry type: ' + entry.type); + } + } catch (e) { + console.log('Unable to parse line: \'' + line + '\' (' + e + ')'); + } + }; + + let prev_chunk_leftover = ""; + + const processingState = { + file: file, + + processChunk(chunk) { + const contents = chunk.split('\n'); + const last_line = contents.pop(); + const linesCount = contents.length; + if (linesCount == 0) { + // There was only one line in the chunk, it may still be unfinished. + prev_chunk_leftover += last_line; + } else { + contents[0] = prev_chunk_leftover + contents[0]; + prev_chunk_leftover = last_line; + for (let line of contents) { + processOneLine(line); + } + } + return true; + }, + + endProcessing() { + if (prev_chunk_leftover.length > 0) { + processOneLine(prev_chunk_leftover); + prev_chunk_leftover = ""; + } + return data; + }, + }; + return processingState; + } +}); diff --git a/doc/abi_version_registry.json b/doc/abi_version_registry.json index 4c4c44f6d4d2ad..96d83e608fb81f 100644 --- a/doc/abi_version_registry.json +++ b/doc/abi_version_registry.json @@ -1,5 +1,6 @@ { "NODE_MODULE_VERSION": [ + { "modules": 88, "runtime": "node", "variant": "v8_8.6", "versions": "15.0.0-pre" }, { "modules": 87, "runtime": "electron", "variant": "electron", "versions": "12" }, { "modules": 86, "runtime": "node", "variant": "v8_8.4", "versions": "15.0.0-pre" }, { "modules": 85, "runtime": "electron", "variant": "electron", "versions": "11" }, diff --git a/doc/api/wasi.md b/doc/api/wasi.md index decb9641e161f2..5430ae63d50d83 100644 --- a/doc/api/wasi.md +++ b/doc/api/wasi.md @@ -70,8 +70,8 @@ Use [wabt](https://github.com/WebAssembly/wabt) to compile `.wat` to `.wasm` $ wat2wasm demo.wat ``` -The `--experimental-wasi-unstable-preview1` and `--experimental-wasm-bigint` -CLI arguments are needed for this example to run. +The `--experimental-wasi-unstable-preview1` CLI argument is needed for this +example to run. ## Class: `WASI`