From 01e7d1f3492317f7fa6069d389b7863fae9997e3 Mon Sep 17 00:00:00 2001 From: XadillaX Date: Mon, 6 Jun 2022 17:26:06 +0800 Subject: [PATCH] deps: cherry-pick 4ab70f6 from V8 upstream MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [Compiler] Remove untrusted code mitigations. These are no longer enabled, so remove the code mitigation logic from the codebase. BUG=chromium:1003890 Change-Id: I536bb1732e8463281c21da446bbba8f47ede8ebe Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3045704 Commit-Queue: Ross McIlroy Reviewed-by: Jakob Gruber Reviewed-by: Clemens Backes Cr-Commit-Position: refs/heads/master@{#76256} Refs: https://github.com/v8/v8/commit/4ab70f6b218b719d9ba282a6a733c978216943d6 Fixes: https://github.com/nodejs/node/issues/43296 PR-URL: https://github.com/nodejs/node/pull/43328 Reviewed-By: Ben Noordhuis Reviewed-By: Jiawen Geng Reviewed-By: Mohammed Keyvanzadeh Reviewed-By: Tobias Nießen --- deps/v8/BUILD.bazel | 4 - deps/v8/BUILD.gn | 41 --- deps/v8/gni/v8.gni | 10 +- deps/v8/src/builtins/arm/builtins-arm.cc | 6 - deps/v8/src/builtins/arm64/builtins-arm64.cc | 6 - deps/v8/src/builtins/mips/builtins-mips.cc | 6 - .../v8/src/builtins/mips64/builtins-mips64.cc | 6 - deps/v8/src/builtins/ppc/builtins-ppc.cc | 6 - .../src/builtins/riscv64/builtins-riscv64.cc | 6 - deps/v8/src/builtins/s390/builtins-s390.cc | 6 - .../src/builtins/setup-builtins-internal.cc | 10 +- deps/v8/src/builtins/x64/builtins-x64.cc | 6 - .../v8/src/codegen/arm/macro-assembler-arm.cc | 4 - deps/v8/src/codegen/arm/macro-assembler-arm.h | 2 - deps/v8/src/codegen/arm/register-arm.h | 1 - .../codegen/arm64/macro-assembler-arm64.cc | 4 - .../src/codegen/arm64/macro-assembler-arm64.h | 2 - deps/v8/src/codegen/arm64/register-arm64.h | 2 - deps/v8/src/codegen/code-stub-assembler.cc | 35 +-- deps/v8/src/codegen/code-stub-assembler.h | 31 +- .../src/codegen/ia32/macro-assembler-ia32.cc | 57 ---- .../src/codegen/ia32/macro-assembler-ia32.h | 8 - deps/v8/src/codegen/ia32/register-ia32.h | 3 - .../src/codegen/mips/macro-assembler-mips.cc | 4 - .../src/codegen/mips/macro-assembler-mips.h | 2 - deps/v8/src/codegen/mips/register-mips.h | 1 - .../codegen/mips64/macro-assembler-mips64.cc | 4 - .../codegen/mips64/macro-assembler-mips64.h | 2 - deps/v8/src/codegen/mips64/register-mips64.h | 1 - .../src/codegen/optimized-compilation-info.cc | 25 -- .../src/codegen/optimized-compilation-info.h | 44 +-- .../v8/src/codegen/ppc/macro-assembler-ppc.cc | 4 - deps/v8/src/codegen/ppc/macro-assembler-ppc.h | 2 - deps/v8/src/codegen/ppc/register-ppc.h | 1 - deps/v8/src/codegen/register-configuration.cc | 40 --- .../riscv64/macro-assembler-riscv64.cc | 4 - .../codegen/riscv64/macro-assembler-riscv64.h | 2 - .../v8/src/codegen/riscv64/register-riscv64.h | 1 - .../src/codegen/s390/macro-assembler-s390.cc | 4 - .../src/codegen/s390/macro-assembler-s390.h | 1 - deps/v8/src/codegen/s390/register-s390.h | 1 - .../v8/src/codegen/x64/macro-assembler-x64.cc | 46 --- deps/v8/src/codegen/x64/macro-assembler-x64.h | 7 - deps/v8/src/codegen/x64/register-x64.h | 1 - deps/v8/src/common/globals.h | 14 - deps/v8/src/compiler/access-builder.cc | 84 +++--- deps/v8/src/compiler/access-builder.h | 9 +- .../backend/arm/code-generator-arm.cc | 94 +----- .../backend/arm/instruction-selector-arm.cc | 6 - .../backend/arm64/code-generator-arm64.cc | 100 +------ .../arm64/instruction-selector-arm64.cc | 40 +-- .../v8/src/compiler/backend/code-generator.cc | 102 ++----- deps/v8/src/compiler/backend/code-generator.h | 37 +-- .../backend/ia32/code-generator-ia32.cc | 57 +--- .../backend/ia32/instruction-selector-ia32.cc | 6 - .../src/compiler/backend/instruction-codes.h | 12 +- .../compiler/backend/instruction-scheduler.cc | 6 - .../compiler/backend/instruction-selector.cc | 133 ++------- .../compiler/backend/instruction-selector.h | 40 +-- deps/v8/src/compiler/backend/instruction.cc | 4 - deps/v8/src/compiler/backend/instruction.h | 3 +- .../v8/src/compiler/backend/jump-threading.cc | 156 +++++----- .../backend/mips/code-generator-mips.cc | 127 --------- .../mips/instruction-scheduler-mips.cc | 2 - .../backend/mips/instruction-selector-mips.cc | 6 - .../backend/mips64/code-generator-mips64.cc | 150 ---------- .../mips64/instruction-scheduler-mips64.cc | 2 - .../mips64/instruction-selector-mips64.cc | 10 +- .../backend/ppc/code-generator-ppc.cc | 59 ---- .../backend/ppc/instruction-selector-ppc.cc | 7 - .../backend/riscv64/code-generator-riscv64.cc | 157 ---------- .../riscv64/instruction-scheduler-riscv64.cc | 2 - .../riscv64/instruction-selector-riscv64.cc | 10 +- .../backend/s390/code-generator-s390.cc | 58 ---- .../backend/s390/instruction-selector-s390.cc | 6 - .../backend/x64/code-generator-x64.cc | 109 +------ .../backend/x64/instruction-selector-x64.cc | 8 +- deps/v8/src/compiler/branch-elimination.cc | 17 -- deps/v8/src/compiler/branch-elimination.h | 1 - .../v8/src/compiler/bytecode-graph-builder.cc | 15 +- deps/v8/src/compiler/code-assembler.cc | 49 +--- deps/v8/src/compiler/code-assembler.h | 37 +-- deps/v8/src/compiler/common-operator.cc | 212 ++++---------- deps/v8/src/compiler/common-operator.h | 62 +--- .../src/compiler/decompression-optimizer.cc | 7 +- .../src/compiler/effect-control-linearizer.cc | 54 +--- .../src/compiler/effect-control-linearizer.h | 4 +- deps/v8/src/compiler/graph-assembler.cc | 41 +-- deps/v8/src/compiler/graph-assembler.h | 42 +-- deps/v8/src/compiler/js-call-reducer.cc | 16 +- deps/v8/src/compiler/js-create-lowering.cc | 1 - .../js-native-context-specialization.cc | 38 +-- deps/v8/src/compiler/linkage.h | 18 +- .../v8/src/compiler/machine-graph-verifier.cc | 20 -- deps/v8/src/compiler/machine-operator.cc | 55 ---- deps/v8/src/compiler/machine-operator.h | 7 - deps/v8/src/compiler/memory-lowering.cc | 33 +-- deps/v8/src/compiler/memory-lowering.h | 3 - deps/v8/src/compiler/memory-optimizer.cc | 11 +- deps/v8/src/compiler/memory-optimizer.h | 1 - deps/v8/src/compiler/node-matchers.h | 1 - deps/v8/src/compiler/opcodes.h | 5 - deps/v8/src/compiler/pipeline.cc | 49 +--- deps/v8/src/compiler/pipeline.h | 3 +- .../src/compiler/property-access-builder.cc | 4 - deps/v8/src/compiler/raw-machine-assembler.cc | 23 +- deps/v8/src/compiler/raw-machine-assembler.h | 40 +-- deps/v8/src/compiler/simplified-lowering.cc | 26 +- deps/v8/src/compiler/simplified-lowering.h | 3 - deps/v8/src/compiler/simplified-operator.cc | 267 ++++++++---------- deps/v8/src/compiler/simplified-operator.h | 19 +- deps/v8/src/compiler/typer.cc | 4 - deps/v8/src/compiler/verifier.cc | 8 - deps/v8/src/compiler/wasm-compiler.cc | 125 ++------ deps/v8/src/compiler/wasm-compiler.h | 13 +- deps/v8/src/diagnostics/objects-printer.cc | 1 - deps/v8/src/execution/isolate.cc | 32 --- deps/v8/src/flags/flag-definitions.h | 9 - deps/v8/src/init/startup-data-util.cc | 5 - .../src/interpreter/interpreter-assembler.cc | 125 ++++---- .../src/interpreter/interpreter-assembler.h | 53 ++-- .../src/interpreter/interpreter-generator.cc | 3 - .../src/snapshot/embedded/embedded-empty.cc | 12 - .../baseline/ia32/liftoff-assembler-ia32.h | 12 +- deps/v8/src/wasm/baseline/liftoff-compiler.cc | 61 +--- .../wasm/baseline/x64/liftoff-assembler-x64.h | 12 +- deps/v8/src/wasm/graph-builder-interface.cc | 1 - deps/v8/src/wasm/wasm-external-refs.cc | 14 - deps/v8/src/wasm/wasm-objects-inl.h | 1 - deps/v8/src/wasm/wasm-objects.cc | 8 - deps/v8/src/wasm/wasm-objects.h | 2 - deps/v8/test/cctest/BUILD.gn | 3 - deps/v8/test/cctest/cctest.status | 1 - .../cctest/compiler/code-assembler-tester.h | 7 +- .../cctest/compiler/test-code-generator.cc | 3 - deps/v8/test/common/wasm/wasm-interpreter.cc | 3 +- .../codegen/code-stub-assembler-unittest.cc | 7 +- .../backend/instruction-selector-unittest.cc | 41 ++- .../effect-control-linearizer-unittest.cc | 24 +- .../unittests/compiler/node-test-utils.cc | 12 - .../test/unittests/compiler/node-test-utils.h | 6 - .../compiler/simplified-lowering-unittest.cc | 5 +- .../interpreter-assembler-unittest.cc | 120 +++----- .../interpreter-assembler-unittest.h | 32 +-- deps/v8/tools/clusterfuzz/v8_foozzie.py | 7 - deps/v8/tools/testrunner/local/android.py | 6 - deps/v8/tools/testrunner/local/variants.py | 3 +- .../test-v8-untrusted-code-mitigations.js | 19 -- 148 files changed, 769 insertions(+), 3290 deletions(-) delete mode 100644 test/parallel/test-v8-untrusted-code-mitigations.js diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel index c5b4a94f911a7b..b8040db86f0ee5 100644 --- a/deps/v8/BUILD.bazel +++ b/deps/v8/BUILD.bazel @@ -150,7 +150,6 @@ config_setting( # v8_can_use_fpu_instructions # v8_use_mips_abi_hardfloat # v8_enable_gdbjit -# v8_untrusted_code_mitigations # v8_enable_minor_mc # v8_check_header_includes # v8_enable_shared_ro_heap @@ -305,9 +304,6 @@ v8_config( "V8_HAVE_TARGET_OS", "V8_TARGET_OS_MACOSX", ], - }) + select({ - ":is_android_x86": [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ], - "//conditions:default": [], }) + select({ ":is_v8_enable_pointer_compression": [ "V8_COMPRESS_POINTERS", diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn index 3e48fb11bff9f9..727c4f8da6c97c 100644 --- a/deps/v8/BUILD.gn +++ b/deps/v8/BUILD.gn @@ -228,11 +228,6 @@ declare_args() { (is_linux || is_chromeos || is_mac)) || (v8_current_cpu == "ppc64" && (is_linux || is_chromeos)) - # Enable mitigations for executing untrusted code. - # Disabled by default on ia32 due to conflicting requirements with embedded - # builtins. - v8_untrusted_code_mitigations = false - # Enable minor mark compact. v8_enable_minor_mc = true @@ -461,9 +456,6 @@ if (build_with_chromium && v8_current_cpu == "arm64" && assert(!v8_disable_write_barriers || v8_enable_single_generation, "Disabling write barriers works only with single generation") -assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations, - "Untrusted code mitigations are unsupported on ia32") - assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity, "Control-flow integrity is only supported on arm64") @@ -480,9 +472,6 @@ assert(!v8_enable_map_packing || !v8_enable_pointer_compression, assert(!v8_enable_map_packing || v8_current_cpu == "x64", "Map packing is only supported on x64") -assert(!v8_use_multi_snapshots || !v8_control_flow_integrity, - "Control-flow integrity does not support multisnapshots") - assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression, "V8 Heap Sandbox requires pointer compression") @@ -872,9 +861,6 @@ config("features") { if (v8_enable_lazy_source_positions) { defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ] } - if (v8_use_multi_snapshots) { - defines += [ "V8_MULTI_SNAPSHOTS" ] - } if (v8_use_siphash) { defines += [ "V8_USE_SIPHASH" ] } @@ -1170,10 +1156,6 @@ config("toolchain") { defines += [ "V8_RUNTIME_CALL_STATS" ] } - if (!v8_untrusted_code_mitigations) { - defines += [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ] - } - if (v8_no_inline) { if (is_win) { cflags += [ "/Ob0" ] @@ -1309,8 +1291,6 @@ template("asm_to_inline_asm") { if (is_android && enable_java_templates) { android_assets("v8_external_startup_data_assets") { if (v8_use_external_startup_data) { - # We don't support side-by-side snapshots on Android within Chromium. - assert(!v8_use_multi_snapshots) deps = [ "//v8" ] renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ] if (current_cpu == "arm" || current_cpu == "x86" || @@ -1987,17 +1967,6 @@ if (emit_builtins_as_inline_asm) { args = [] } } -if (v8_use_multi_snapshots) { - run_mksnapshot("trusted") { - args = [ "--no-untrusted-code-mitigations" ] - embedded_variant = "Trusted" - } - if (emit_builtins_as_inline_asm) { - asm_to_inline_asm("trusted") { - args = [] - } - } -} action("v8_dump_build_config") { script = "tools/testrunner/utils/dump_build_config.py" @@ -2086,16 +2055,6 @@ v8_source_set("v8_snapshot") { deps += [ ":v8_base" ] sources += [ "src/snapshot/snapshot-external.cc" ] - - if (v8_use_multi_snapshots) { - public_deps += [ ":run_mksnapshot_trusted" ] - if (emit_builtins_as_inline_asm) { - deps += [ ":asm_to_inline_asm_trusted" ] - sources += [ "$target_gen_dir/embedded_trusted.cc" ] - } else { - sources += [ "$target_gen_dir/embedded_trusted.S" ] - } - } } else { # Also top-level visibility targets can depend on this. visibility += [ "//:gn_visibility" ] diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni index a3346517978b32..0c22a858da0c6c 100644 --- a/deps/v8/gni/v8.gni +++ b/deps/v8/gni/v8.gni @@ -35,7 +35,8 @@ declare_args() { # as an argument to profiler's method `takeHeapSnapshot`. v8_enable_raw_heap_snapshots = false - # Enable several snapshots side-by-side (e.g. default and for trusted code). + # Deprecated flag that no longer does anything. + # TODO(rmcilroy): Remove this gn arg once it's no longer used by the bots. v8_use_multi_snapshots = false # Use external files for startup data blobs: @@ -99,13 +100,6 @@ if (v8_use_external_startup_data == "") { v8_use_external_startup_data = !is_ios } -if (v8_use_multi_snapshots) { - # Silently disable multi snapshots if they're incompatible with the current - # build configuration. This allows us to set v8_use_multi_snapshots=true on - # all bots, and e.g. no-snapshot bots will automatically do the right thing. - v8_use_multi_snapshots = v8_use_external_startup_data && !build_with_chromium -} - if (v8_enable_backtrace == "") { v8_enable_backtrace = is_debug && !v8_optimized_debug } diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc index f45c927e67546e..6c0de814872d92 100644 --- a/deps/v8/src/builtins/arm/builtins-arm.cc +++ b/deps/v8/src/builtins/arm/builtins-arm.cc @@ -2777,12 +2777,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ cmp(cp, Operand(0)); __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); - // Reset the masking register. This is done independent of the underlying - // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work - // with both configurations. It is safe to always do this, because the - // underlying register is caller-saved and can be arbitrarily clobbered. - __ ResetSpeculationPoisonRegister(); - // Clear c_entry_fp, like we do in `LeaveExitFrame`. { UseScratchRegisterScope temps(masm); diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc index b1f9a63e3c7e9c..db8a198c7a2f9f 100644 --- a/deps/v8/src/builtins/arm64/builtins-arm64.cc +++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc @@ -3250,12 +3250,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ Bind(¬_js_frame); - // Reset the masking register. This is done independent of the underlying - // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work - // with both configurations. It is safe to always do this, because the - // underlying register is caller-saved and can be arbitrarily clobbered. - __ ResetSpeculationPoisonRegister(); - { // Clear c_entry_fp, like we do in `LeaveExitFrame`. UseScratchRegisterScope temps(masm); diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc index 8f4bf4d06bd13f..a07c786c57984d 100644 --- a/deps/v8/src/builtins/mips/builtins-mips.cc +++ b/deps/v8/src/builtins/mips/builtins-mips.cc @@ -2723,12 +2723,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ bind(&zero); - // Reset the masking register. This is done independent of the underlying - // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work - // with both configurations. It is safe to always do this, because the - // underlying register is caller-saved and can be arbitrarily clobbered. - __ ResetSpeculationPoisonRegister(); - // Clear c_entry_fp, like we do in `LeaveExitFrame`. { UseScratchRegisterScope temps(masm); diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc index 45e1c32f82f990..70176b37702e82 100644 --- a/deps/v8/src/builtins/mips64/builtins-mips64.cc +++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc @@ -2814,12 +2814,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ bind(&zero); - // Reset the masking register. This is done independent of the underlying - // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work - // with both configurations. It is safe to always do this, because the - // underlying register is caller-saved and can be arbitrarily clobbered. - __ ResetSpeculationPoisonRegister(); - // Clear c_entry_fp, like we do in `LeaveExitFrame`. { UseScratchRegisterScope temps(masm); diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc index 02b76175ec128f..888daf0b73d152 100644 --- a/deps/v8/src/builtins/ppc/builtins-ppc.cc +++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc @@ -2646,12 +2646,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ bind(&skip); - // Reset the masking register. This is done independent of the underlying - // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work - // with both configurations. It is safe to always do this, because the - // underlying register is caller-saved and can be arbitrarily clobbered. - __ ResetSpeculationPoisonRegister(); - // Clear c_entry_fp, like we do in `LeaveExitFrame`. { UseScratchRegisterScope temps(masm); diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc index 764ef97952cf13..4b5ec97448d358 100644 --- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc +++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc @@ -2903,12 +2903,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ bind(&zero); - // Reset the masking register. This is done independent of the underlying - // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work - // with both configurations. It is safe to always do this, because the - // underlying register is caller-saved and can be arbitrarily clobbered. - __ ResetSpeculationPoisonRegister(); - // Compute the handler entry address and jump to it. UseScratchRegisterScope temp(masm); Register scratch = temp.Acquire(); diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc index 5129cc6ee31550..879c1ed4059e0b 100644 --- a/deps/v8/src/builtins/s390/builtins-s390.cc +++ b/deps/v8/src/builtins/s390/builtins-s390.cc @@ -2679,12 +2679,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); __ bind(&skip); - // Reset the masking register. This is done independent of the underlying - // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work - // with both configurations. It is safe to always do this, because the - // underlying register is caller-saved and can be arbitrarily clobbered. - __ ResetSpeculationPoisonRegister(); - // Clear c_entry_fp, like we do in `LeaveExitFrame`. { UseScratchRegisterScope temps(masm); diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc index 2724f9a2001e4e..c51020119f5213 100644 --- a/deps/v8/src/builtins/setup-builtins-internal.cc +++ b/deps/v8/src/builtins/setup-builtins-internal.cc @@ -158,9 +158,8 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, Builtin builtin, Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone); const int argc_with_recv = (argc == kDontAdaptArgumentsSentinel) ? 0 : argc + 1; - compiler::CodeAssemblerState state( - isolate, &zone, argc_with_recv, CodeKind::BUILTIN, name, - PoisoningMitigationLevel::kDontPoison, builtin); + compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv, + CodeKind::BUILTIN, name, builtin); generator(&state); Handle code = compiler::CodeAssembler::GenerateCode( &state, BuiltinAssemblerOptions(isolate, builtin), @@ -183,9 +182,8 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin, CallInterfaceDescriptor descriptor(interface_descriptor); // Ensure descriptor is already initialized. DCHECK_LE(0, descriptor.GetRegisterParameterCount()); - compiler::CodeAssemblerState state( - isolate, &zone, descriptor, CodeKind::BUILTIN, name, - PoisoningMitigationLevel::kDontPoison, builtin); + compiler::CodeAssemblerState state(isolate, &zone, descriptor, + CodeKind::BUILTIN, name, builtin); generator(&state); Handle code = compiler::CodeAssembler::GenerateCode( &state, BuiltinAssemblerOptions(isolate, builtin), diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc index 14186e3be6d1eb..a3fc113b899834 100644 --- a/deps/v8/src/builtins/x64/builtins-x64.cc +++ b/deps/v8/src/builtins/x64/builtins-x64.cc @@ -3691,12 +3691,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size, __ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi); __ bind(&skip); - // Reset the masking register. This is done independent of the underlying - // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work - // with both configurations. It is safe to always do this, because the - // underlying register is caller-saved and can be arbitrarily clobbered. - __ ResetSpeculationPoisonRegister(); - // Clear c_entry_fp, like we do in `LeaveExitFrame`. ExternalReference c_entry_fp_address = ExternalReference::Create( IsolateAddressId::kCEntryFPAddress, masm->isolate()); diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index 26d16406a6277e..5d6512fc6b71e9 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -2660,10 +2660,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta)); } -void TurboAssembler::ResetSpeculationPoisonRegister() { - mov(kSpeculationPoisonRegister, Operand(-1)); -} - void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h index 41bc5ec54432a7..bcecaec42928fc 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.h +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h @@ -560,8 +560,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // This is an alternative to embedding the {CodeObject} handle as a reference. void ComputeCodeStartAddress(Register dst); - void ResetSpeculationPoisonRegister(); - // Control-flow integrity: // Define a function entrypoint. This doesn't emit any code for this diff --git a/deps/v8/src/codegen/arm/register-arm.h b/deps/v8/src/codegen/arm/register-arm.h index 6608ad4edebb49..8cc838945d25bc 100644 --- a/deps/v8/src/codegen/arm/register-arm.h +++ b/deps/v8/src/codegen/arm/register-arm.h @@ -336,7 +336,6 @@ constexpr Register kReturnRegister2 = r2; constexpr Register kJSFunctionRegister = r1; constexpr Register kContextRegister = r7; constexpr Register kAllocateSizeRegister = r1; -constexpr Register kSpeculationPoisonRegister = r9; constexpr Register kInterpreterAccumulatorRegister = r0; constexpr Register kInterpreterBytecodeOffsetRegister = r5; constexpr Register kInterpreterBytecodeArrayRegister = r6; diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index ef95b4e8132400..be26ddb7e21290 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -3540,10 +3540,6 @@ void TurboAssembler::ComputeCodeStartAddress(const Register& rd) { adr(rd, -pc_offset()); } -void TurboAssembler::ResetSpeculationPoisonRegister() { - Mov(kSpeculationPoisonRegister, -1); -} - void TurboAssembler::RestoreFPAndLR() { static_assert(StandardFrameConstants::kCallerFPOffset + kSystemPointerSize == StandardFrameConstants::kCallerPCOffset, diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index 9128ba2c18edb7..b2a0f91d8c76de 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -1347,8 +1347,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // This is an alternative to embedding the {CodeObject} handle as a reference. void ComputeCodeStartAddress(const Register& rd); - void ResetSpeculationPoisonRegister(); - // --------------------------------------------------------------------------- // Pointer compression Support diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h index 5b234526a4361a..ae6c4c920037c0 100644 --- a/deps/v8/src/codegen/arm64/register-arm64.h +++ b/deps/v8/src/codegen/arm64/register-arm64.h @@ -701,8 +701,6 @@ constexpr Register kJSFunctionRegister = x1; constexpr Register kContextRegister = cp; constexpr Register kAllocateSizeRegister = x1; -constexpr Register kSpeculationPoisonRegister = x23; - constexpr Register kInterpreterAccumulatorRegister = x0; constexpr Register kInterpreterBytecodeOffsetRegister = x19; constexpr Register kInterpreterBytecodeArrayRegister = x20; diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index e25135decee421..7ad013e2fca6a9 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -2193,9 +2193,10 @@ TNode CodeStubAssembler::LoadArrayLength( } template -TNode CodeStubAssembler::LoadArrayElement( - TNode array, int array_header_size, TNode index_node, - int additional_offset, LoadSensitivity needs_poisoning) { +TNode CodeStubAssembler::LoadArrayElement(TNode array, + int array_header_size, + TNode index_node, + int additional_offset) { // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants? static_assert(std::is_same::value || std::is_same::value || @@ -2210,23 +2211,17 @@ TNode CodeStubAssembler::LoadArrayElement( CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array), array_header_size)); constexpr MachineType machine_type = MachineTypeOf::value; - // TODO(gsps): Remove the Load case once LoadFromObject supports poisoning - if (needs_poisoning == LoadSensitivity::kSafe) { - return UncheckedCast(LoadFromObject(machine_type, array, offset)); - } else { - return UncheckedCast( - Load(machine_type, array, offset, needs_poisoning)); - } + return UncheckedCast(LoadFromObject(machine_type, array, offset)); } template V8_EXPORT_PRIVATE TNode CodeStubAssembler::LoadArrayElement( - TNode, int, TNode, int, LoadSensitivity); + TNode, int, TNode, int); template TNode CodeStubAssembler::LoadFixedArrayElement( TNode object, TNode index, int additional_offset, - LoadSensitivity needs_poisoning, CheckBounds check_bounds) { + CheckBounds check_bounds) { // TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants? static_assert(std::is_same::value || std::is_same::value || @@ -2238,25 +2233,22 @@ TNode CodeStubAssembler::LoadFixedArrayElement( if (NeedsBoundsCheck(check_bounds)) { FixedArrayBoundsCheck(object, index, additional_offset); } - TNode element = - LoadArrayElement(object, FixedArray::kHeaderSize, index, - additional_offset, needs_poisoning); + TNode element = LoadArrayElement(object, FixedArray::kHeaderSize, + index, additional_offset); return CAST(element); } template V8_EXPORT_PRIVATE TNode CodeStubAssembler::LoadFixedArrayElement(TNode, TNode, - int, LoadSensitivity, - CheckBounds); + int, CheckBounds); template V8_EXPORT_PRIVATE TNode CodeStubAssembler::LoadFixedArrayElement(TNode, TNode, int, - LoadSensitivity, CheckBounds); template V8_EXPORT_PRIVATE TNode CodeStubAssembler::LoadFixedArrayElement(TNode, TNode, int, - LoadSensitivity, CheckBounds); + CheckBounds); void CodeStubAssembler::FixedArrayBoundsCheck(TNode array, TNode index, @@ -2291,9 +2283,8 @@ void CodeStubAssembler::FixedArrayBoundsCheck(TNode array, TNode CodeStubAssembler::LoadPropertyArrayElement( TNode object, TNode index) { int additional_offset = 0; - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe; return CAST(LoadArrayElement(object, PropertyArray::kHeaderSize, index, - additional_offset, needs_poisoning)); + additional_offset)); } TNode CodeStubAssembler::LoadPropertyArrayLength( @@ -2648,7 +2639,7 @@ TNode CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement( TNode CodeStubAssembler::LoadWeakFixedArrayElement( TNode object, TNode index, int additional_offset) { return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index, - additional_offset, LoadSensitivity::kSafe); + additional_offset); } TNode CodeStubAssembler::LoadFixedDoubleArrayElement( diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index 008af6006f5b63..b3930c0a5231d7 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -1448,40 +1448,35 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Array is any array-like type that has a fixed header followed by // tagged elements. template - TNode LoadArrayElement( - TNode array, int array_header_size, TNode index, - int additional_offset = 0, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); + TNode LoadArrayElement(TNode array, int array_header_size, + TNode index, + int additional_offset = 0); template TNode LoadFixedArrayElement( TNode object, TNode index, int additional_offset = 0, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe, CheckBounds check_bounds = CheckBounds::kAlways); // This doesn't emit a bounds-check. As part of the security-performance // tradeoff, only use it if it is performance critical. - TNode UnsafeLoadFixedArrayElement( - TNode object, TNode index, int additional_offset = 0, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { + TNode UnsafeLoadFixedArrayElement(TNode object, + TNode index, + int additional_offset = 0) { return LoadFixedArrayElement(object, index, additional_offset, - needs_poisoning, CheckBounds::kDebugOnly); + CheckBounds::kDebugOnly); } - TNode LoadFixedArrayElement( - TNode object, int index, int additional_offset = 0, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { + TNode LoadFixedArrayElement(TNode object, int index, + int additional_offset = 0) { return LoadFixedArrayElement(object, IntPtrConstant(index), - additional_offset, needs_poisoning); + additional_offset); } // This doesn't emit a bounds-check. As part of the security-performance // tradeoff, only use it if it is performance critical. - TNode UnsafeLoadFixedArrayElement( - TNode object, int index, int additional_offset = 0, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { + TNode UnsafeLoadFixedArrayElement(TNode object, int index, + int additional_offset = 0) { return LoadFixedArrayElement(object, IntPtrConstant(index), - additional_offset, needs_poisoning, - CheckBounds::kDebugOnly); + additional_offset, CheckBounds::kDebugOnly); } TNode LoadPropertyArrayElement(TNode object, diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc index c95ea8ad2c6d21..09fb17a6cea109 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc @@ -2385,63 +2385,6 @@ void TurboAssembler::Jump(Handle code_object, RelocInfo::Mode rmode) { jmp(code_object, rmode); } -void TurboAssembler::RetpolineCall(Register reg) { - ASM_CODE_COMMENT(this); - Label setup_return, setup_target, inner_indirect_branch, capture_spec; - - jmp(&setup_return); // Jump past the entire retpoline below. - - bind(&inner_indirect_branch); - call(&setup_target); - - bind(&capture_spec); - pause(); - jmp(&capture_spec); - - bind(&setup_target); - mov(Operand(esp, 0), reg); - ret(0); - - bind(&setup_return); - call(&inner_indirect_branch); // Callee will return after this instruction. -} - -void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) { - ASM_CODE_COMMENT(this); - Label setup_return, setup_target, inner_indirect_branch, capture_spec; - - jmp(&setup_return); // Jump past the entire retpoline below. - - bind(&inner_indirect_branch); - call(&setup_target); - - bind(&capture_spec); - pause(); - jmp(&capture_spec); - - bind(&setup_target); - mov(Operand(esp, 0), destination, rmode); - ret(0); - - bind(&setup_return); - call(&inner_indirect_branch); // Callee will return after this instruction. -} - -void TurboAssembler::RetpolineJump(Register reg) { - ASM_CODE_COMMENT(this); - Label setup_target, capture_spec; - - call(&setup_target); - - bind(&capture_spec); - pause(); - jmp(&capture_spec); - - bind(&setup_target); - mov(Operand(esp, 0), reg); - ret(0); -} - void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met, Label::Distance condition_met_distance) { diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h index 527c3570470626..1d5243f51815d4 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h @@ -158,15 +158,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler { JumpMode jump_mode = JumpMode::kJump); void Jump(const ExternalReference& reference); - void RetpolineCall(Register reg); - void RetpolineCall(Address destination, RelocInfo::Mode rmode); - void Jump(Handle code_object, RelocInfo::Mode rmode); void LoadMap(Register destination, Register object); - void RetpolineJump(Register reg); - void Trap(); void DebugBreak(); @@ -480,9 +475,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler { // This is an alternative to embedding the {CodeObject} handle as a reference. void ComputeCodeStartAddress(Register dst); - // TODO(860429): Remove remaining poisoning infrastructure on ia32. - void ResetSpeculationPoisonRegister() { UNREACHABLE(); } - // Control-flow integrity: // Define a function entrypoint. This doesn't emit any code for this diff --git a/deps/v8/src/codegen/ia32/register-ia32.h b/deps/v8/src/codegen/ia32/register-ia32.h index 5dc035d96690e4..37a5783deda0d9 100644 --- a/deps/v8/src/codegen/ia32/register-ia32.h +++ b/deps/v8/src/codegen/ia32/register-ia32.h @@ -161,9 +161,6 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = edi; constexpr Register kRootRegister = ebx; -// TODO(860429): Remove remaining poisoning infrastructure on ia32. -constexpr Register kSpeculationPoisonRegister = no_reg; - constexpr DoubleRegister kFPReturnRegister0 = xmm1; // xmm0 isn't allocatable. } // namespace internal diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc index 9c1af1cb056c7f..72166769eb6ca2 100644 --- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc +++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc @@ -5519,10 +5519,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { pop(ra); // Restore ra } -void TurboAssembler::ResetSpeculationPoisonRegister() { - li(kSpeculationPoisonRegister, -1); -} - void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h index ffa5f5820d2d8e..365640a617ab7a 100644 --- a/deps/v8/src/codegen/mips/macro-assembler-mips.h +++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h @@ -817,8 +817,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // This is an alternative to embedding the {CodeObject} handle as a reference. void ComputeCodeStartAddress(Register dst); - void ResetSpeculationPoisonRegister(); - // Control-flow integrity: // Define a function entrypoint. This doesn't emit any code for this diff --git a/deps/v8/src/codegen/mips/register-mips.h b/deps/v8/src/codegen/mips/register-mips.h index 95164a86c1c3a7..7fd259bf9ba328 100644 --- a/deps/v8/src/codegen/mips/register-mips.h +++ b/deps/v8/src/codegen/mips/register-mips.h @@ -362,7 +362,6 @@ constexpr Register kReturnRegister2 = a0; constexpr Register kJSFunctionRegister = a1; constexpr Register kContextRegister = s7; constexpr Register kAllocateSizeRegister = a0; -constexpr Register kSpeculationPoisonRegister = t3; constexpr Register kInterpreterAccumulatorRegister = v0; constexpr Register kInterpreterBytecodeOffsetRegister = t4; constexpr Register kInterpreterBytecodeArrayRegister = t5; diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc index 708cf4baa6736a..2d684b9087ff4a 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc @@ -6059,10 +6059,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { pop(ra); // Restore ra } -void TurboAssembler::ResetSpeculationPoisonRegister() { - li(kSpeculationPoisonRegister, -1); -} - void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h index a4991bcb1e642d..1ad5f4fdae64df 100644 --- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h +++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h @@ -836,8 +836,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // This is an alternative to embedding the {CodeObject} handle as a reference. void ComputeCodeStartAddress(Register dst); - void ResetSpeculationPoisonRegister(); - // Control-flow integrity: // Define a function entrypoint. This doesn't emit any code for this diff --git a/deps/v8/src/codegen/mips64/register-mips64.h b/deps/v8/src/codegen/mips64/register-mips64.h index 51b03aba1fa27a..1fbe3ec7ac2c7c 100644 --- a/deps/v8/src/codegen/mips64/register-mips64.h +++ b/deps/v8/src/codegen/mips64/register-mips64.h @@ -373,7 +373,6 @@ constexpr Register kReturnRegister2 = a0; constexpr Register kJSFunctionRegister = a1; constexpr Register kContextRegister = s7; constexpr Register kAllocateSizeRegister = a0; -constexpr Register kSpeculationPoisonRegister = t3; constexpr Register kInterpreterAccumulatorRegister = v0; constexpr Register kInterpreterBytecodeOffsetRegister = t0; constexpr Register kInterpreterBytecodeArrayRegister = t1; diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc index e3ca07a3c9d015..b8eb5e62fe4cf7 100644 --- a/deps/v8/src/codegen/optimized-compilation-info.cc +++ b/deps/v8/src/codegen/optimized-compilation-info.cc @@ -63,31 +63,7 @@ OptimizedCompilationInfo::OptimizedCompilationInfo( ConfigureFlags(); } -#ifdef DEBUG -bool OptimizedCompilationInfo::FlagSetIsValid(Flag flag) const { - switch (flag) { - case kPoisonRegisterArguments: - return untrusted_code_mitigations(); - default: - return true; - } - UNREACHABLE(); -} - -bool OptimizedCompilationInfo::FlagGetIsValid(Flag flag) const { - switch (flag) { - case kPoisonRegisterArguments: - if (!GetFlag(kPoisonRegisterArguments)) return true; - return untrusted_code_mitigations() && called_with_code_start_register(); - default: - return true; - } - UNREACHABLE(); -} -#endif // DEBUG - void OptimizedCompilationInfo::ConfigureFlags() { - if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations(); if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls(); if (!is_osr() && (IsTurboprop() || FLAG_concurrent_inlining)) { @@ -104,7 +80,6 @@ void OptimizedCompilationInfo::ConfigureFlags() { case CodeKind::TURBOPROP: set_called_with_code_start_register(); set_switch_jump_table(); - if (FLAG_untrusted_code_mitigations) set_poison_register_arguments(); // TODO(yangguo): Disable this in case of debugging for crbug.com/826613 if (FLAG_analyze_environment_liveness) set_analyze_environment_liveness(); break; diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h index b7ed0d29c4f477..d92964c79613d9 100644 --- a/deps/v8/src/codegen/optimized-compilation-info.h +++ b/deps/v8/src/codegen/optimized-compilation-info.h @@ -58,21 +58,19 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { V(SourcePositions, source_positions, 4) \ V(BailoutOnUninitialized, bailout_on_uninitialized, 5) \ V(LoopPeeling, loop_peeling, 6) \ - V(UntrustedCodeMitigations, untrusted_code_mitigations, 7) \ - V(SwitchJumpTable, switch_jump_table, 8) \ - V(CalledWithCodeStartRegister, called_with_code_start_register, 9) \ - V(PoisonRegisterArguments, poison_register_arguments, 10) \ - V(AllocationFolding, allocation_folding, 11) \ - V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 12) \ - V(TraceTurboJson, trace_turbo_json, 13) \ - V(TraceTurboGraph, trace_turbo_graph, 14) \ - V(TraceTurboScheduled, trace_turbo_scheduled, 15) \ - V(TraceTurboAllocation, trace_turbo_allocation, 16) \ - V(TraceHeapBroker, trace_heap_broker, 17) \ - V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 18) \ - V(ConcurrentInlining, concurrent_inlining, 19) \ - V(DiscardResultForTesting, discard_result_for_testing, 20) \ - V(InlineJSWasmCalls, inline_js_wasm_calls, 21) + V(SwitchJumpTable, switch_jump_table, 7) \ + V(CalledWithCodeStartRegister, called_with_code_start_register, 8) \ + V(AllocationFolding, allocation_folding, 9) \ + V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 10) \ + V(TraceTurboJson, trace_turbo_json, 11) \ + V(TraceTurboGraph, trace_turbo_graph, 12) \ + V(TraceTurboScheduled, trace_turbo_scheduled, 13) \ + V(TraceTurboAllocation, trace_turbo_allocation, 14) \ + V(TraceHeapBroker, trace_heap_broker, 15) \ + V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 16) \ + V(ConcurrentInlining, concurrent_inlining, 17) \ + V(DiscardResultForTesting, discard_result_for_testing, 18) \ + V(InlineJSWasmCalls, inline_js_wasm_calls, 19) enum Flag { #define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit, @@ -82,7 +80,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { #define DEF_GETTER(Camel, Lower, Bit) \ bool Lower() const { \ - DCHECK(FlagGetIsValid(k##Camel)); \ return GetFlag(k##Camel); \ } FLAGS(DEF_GETTER) @@ -90,17 +87,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { #define DEF_SETTER(Camel, Lower, Bit) \ void set_##Lower() { \ - DCHECK(FlagSetIsValid(k##Camel)); \ SetFlag(k##Camel); \ } FLAGS(DEF_SETTER) #undef DEF_SETTER -#ifdef DEBUG - bool FlagGetIsValid(Flag flag) const; - bool FlagSetIsValid(Flag flag) const; -#endif // DEBUG - // Construct a compilation info for optimized compilation. OptimizedCompilationInfo(Zone* zone, Isolate* isolate, Handle shared, @@ -141,13 +132,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { } compiler::NodeObserver* node_observer() const { return node_observer_; } - void SetPoisoningMitigationLevel(PoisoningMitigationLevel poisoning_level) { - poisoning_level_ = poisoning_level; - } - PoisoningMitigationLevel GetPoisoningMitigationLevel() const { - return poisoning_level_; - } - // Code getters and setters. void SetCode(Handle code); @@ -269,8 +253,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final { // Compilation flags. unsigned flags_ = 0; - PoisoningMitigationLevel poisoning_level_ = - PoisoningMitigationLevel::kDontPoison; const CodeKind code_kind_; Builtin builtin_ = Builtin::kNoBuiltinId; diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc index f243055490cfa5..447cc8e4dbc314 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc @@ -3453,10 +3453,6 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst, addi(sp, sp, Operand(2 * kSimd128Size)); } -void TurboAssembler::ResetSpeculationPoisonRegister() { - mov(kSpeculationPoisonRegister, Operand(-1)); -} - void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) { CmpS64(x, Operand(y), r0); beq(dest); diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h index 035c29b1e5266e..95290af5aa8120 100644 --- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h +++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h @@ -735,8 +735,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // The return address on the stack is used by frame iteration. void StoreReturnAddressAndCall(Register target); - void ResetSpeculationPoisonRegister(); - // Control-flow integrity: // Define a function entrypoint. This doesn't emit any code for this diff --git a/deps/v8/src/codegen/ppc/register-ppc.h b/deps/v8/src/codegen/ppc/register-ppc.h index ffeb327055fb55..68adfdb1557bce 100644 --- a/deps/v8/src/codegen/ppc/register-ppc.h +++ b/deps/v8/src/codegen/ppc/register-ppc.h @@ -349,7 +349,6 @@ constexpr Register kReturnRegister2 = r5; constexpr Register kJSFunctionRegister = r4; constexpr Register kContextRegister = r30; constexpr Register kAllocateSizeRegister = r4; -constexpr Register kSpeculationPoisonRegister = r14; constexpr Register kInterpreterAccumulatorRegister = r3; constexpr Register kInterpreterBytecodeOffsetRegister = r15; constexpr Register kInterpreterBytecodeArrayRegister = r16; diff --git a/deps/v8/src/codegen/register-configuration.cc b/deps/v8/src/codegen/register-configuration.cc index aca5295c11929e..17dddcd88278ae 100644 --- a/deps/v8/src/codegen/register-configuration.cc +++ b/deps/v8/src/codegen/register-configuration.cc @@ -102,42 +102,6 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration { DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration, GetDefaultRegisterConfiguration) -// Allocatable registers with the masking register removed. -class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration { - public: - ArchDefaultPoisoningRegisterConfiguration() - : RegisterConfiguration( - Register::kNumRegisters, DoubleRegister::kNumRegisters, - kMaxAllocatableGeneralRegisterCount - 1, - get_num_allocatable_double_registers(), - InitializeGeneralRegisterCodes(), get_allocatable_double_codes(), - kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) { - } - - private: - static const int* InitializeGeneralRegisterCodes() { - int filtered_index = 0; - for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) { - if (kAllocatableGeneralCodes[i] != kSpeculationPoisonRegister.code()) { - allocatable_general_codes_[filtered_index] = - kAllocatableGeneralCodes[i]; - filtered_index++; - } - } - DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1); - return allocatable_general_codes_; - } - - static int - allocatable_general_codes_[kMaxAllocatableGeneralRegisterCount - 1]; -}; - -int ArchDefaultPoisoningRegisterConfiguration::allocatable_general_codes_ - [kMaxAllocatableGeneralRegisterCount - 1]; - -DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultPoisoningRegisterConfiguration, - GetDefaultPoisoningRegisterConfiguration) - // RestrictedRegisterConfiguration uses the subset of allocatable general // registers the architecture support, which results into generating assembly // to use less registers. Currently, it's only used by RecordWrite code stub. @@ -184,10 +148,6 @@ const RegisterConfiguration* RegisterConfiguration::Default() { return GetDefaultRegisterConfiguration(); } -const RegisterConfiguration* RegisterConfiguration::Poisoning() { - return GetDefaultPoisoningRegisterConfiguration(); -} - const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters( RegList registers) { int num = NumRegs(registers); diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc index 4d231adfb4823e..b49cd669441080 100644 --- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc +++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc @@ -4744,10 +4744,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { pop(ra); // Restore ra } -void TurboAssembler::ResetSpeculationPoisonRegister() { - li(kSpeculationPoisonRegister, -1); -} - void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h index 75d99a34059b1b..cd7310e00aa671 100644 --- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h +++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h @@ -858,8 +858,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { // This is an alternative to embedding the {CodeObject} handle as a reference. void ComputeCodeStartAddress(Register dst); - void ResetSpeculationPoisonRegister(); - // Control-flow integrity: // Define a function entrypoint. This doesn't emit any code for this diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h index 69654a4f54d156..af652f0e61a86e 100644 --- a/deps/v8/src/codegen/riscv64/register-riscv64.h +++ b/deps/v8/src/codegen/riscv64/register-riscv64.h @@ -344,7 +344,6 @@ constexpr Register kReturnRegister2 = a2; constexpr Register kJSFunctionRegister = a1; constexpr Register kContextRegister = s7; constexpr Register kAllocateSizeRegister = a1; -constexpr Register kSpeculationPoisonRegister = a7; constexpr Register kInterpreterAccumulatorRegister = a0; constexpr Register kInterpreterBytecodeOffsetRegister = t0; constexpr Register kInterpreterBytecodeArrayRegister = t1; diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc index 4de7f2cf4bb292..481105cdd36b3d 100644 --- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc +++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc @@ -4670,10 +4670,6 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst, lay(sp, MemOperand(sp, kSimd128Size)); } -void TurboAssembler::ResetSpeculationPoisonRegister() { - mov(kSpeculationPoisonRegister, Operand(-1)); -} - void TurboAssembler::ComputeCodeStartAddress(Register dst) { larl(dst, Operand(-pc_offset() / 2)); } diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h index 51cdb48326380e..1e3567cf9cf222 100644 --- a/deps/v8/src/codegen/s390/macro-assembler-s390.h +++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h @@ -1015,7 +1015,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CheckPageFlag(Register object, Register scratch, int mask, Condition cc, Label* condition_met); - void ResetSpeculationPoisonRegister(); void ComputeCodeStartAddress(Register dst); void LoadPC(Register dst); diff --git a/deps/v8/src/codegen/s390/register-s390.h b/deps/v8/src/codegen/s390/register-s390.h index 48accf08c5d0c4..6e3b6a3e2b226a 100644 --- a/deps/v8/src/codegen/s390/register-s390.h +++ b/deps/v8/src/codegen/s390/register-s390.h @@ -253,7 +253,6 @@ constexpr Register kReturnRegister2 = r4; constexpr Register kJSFunctionRegister = r3; constexpr Register kContextRegister = r13; constexpr Register kAllocateSizeRegister = r3; -constexpr Register kSpeculationPoisonRegister = r9; constexpr Register kInterpreterAccumulatorRegister = r2; constexpr Register kInterpreterBytecodeOffsetRegister = r6; constexpr Register kInterpreterBytecodeArrayRegister = r7; diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc index 5a8dc356b8f45e..0ac3b3697914da 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc @@ -1993,47 +1993,6 @@ void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) { } } -void TurboAssembler::RetpolineCall(Register reg) { - ASM_CODE_COMMENT(this); - Label setup_return, setup_target, inner_indirect_branch, capture_spec; - - jmp(&setup_return); // Jump past the entire retpoline below. - - bind(&inner_indirect_branch); - call(&setup_target); - - bind(&capture_spec); - pause(); - jmp(&capture_spec); - - bind(&setup_target); - movq(Operand(rsp, 0), reg); - ret(0); - - bind(&setup_return); - call(&inner_indirect_branch); // Callee will return after this instruction. -} - -void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) { - Move(kScratchRegister, destination, rmode); - RetpolineCall(kScratchRegister); -} - -void TurboAssembler::RetpolineJump(Register reg) { - ASM_CODE_COMMENT(this); - Label setup_target, capture_spec; - - call(&setup_target); - - bind(&capture_spec); - pause(); - jmp(&capture_spec); - - bind(&setup_target); - movq(Operand(rsp, 0), reg); - ret(0); -} - void TurboAssembler::Pmaddwd(XMMRegister dst, XMMRegister src1, Operand src2) { if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope avx_scope(this, AVX); @@ -3523,11 +3482,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) { leaq(dst, Operand(¤t, -pc)); } -void TurboAssembler::ResetSpeculationPoisonRegister() { - // TODO(turbofan): Perhaps, we want to put an lfence here. - Move(kSpeculationPoisonRegister, -1); -} - void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit, DeoptimizeKind kind, Label* ret, Label*) { diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h index 02b9eb410ec837..25689b01d970a9 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.h +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h @@ -432,17 +432,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler { void CallCodeTObject(Register code); void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump); - void RetpolineCall(Register reg); - void RetpolineCall(Address destination, RelocInfo::Mode rmode); - void Jump(Address destination, RelocInfo::Mode rmode); void Jump(const ExternalReference& reference); void Jump(Operand op); void Jump(Handle code_object, RelocInfo::Mode rmode, Condition cc = always); - void RetpolineJump(Register reg); - void CallForDeoptimization(Builtin target, int deopt_id, Label* exit, DeoptimizeKind kind, Label* ret, Label* jump_deoptimization_entry_label); @@ -632,8 +627,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler { // This is an alternative to embedding the {CodeObject} handle as a reference. void ComputeCodeStartAddress(Register dst); - void ResetSpeculationPoisonRegister(); - // Control-flow integrity: // Define a function entrypoint. This doesn't emit any code for this diff --git a/deps/v8/src/codegen/x64/register-x64.h b/deps/v8/src/codegen/x64/register-x64.h index 61e7ccf396a3e1..b8d97e104b0a73 100644 --- a/deps/v8/src/codegen/x64/register-x64.h +++ b/deps/v8/src/codegen/x64/register-x64.h @@ -212,7 +212,6 @@ constexpr Register kReturnRegister2 = r8; constexpr Register kJSFunctionRegister = rdi; constexpr Register kContextRegister = rsi; constexpr Register kAllocateSizeRegister = rdx; -constexpr Register kSpeculationPoisonRegister = r11; constexpr Register kInterpreterAccumulatorRegister = rax; constexpr Register kInterpreterBytecodeOffsetRegister = r9; constexpr Register kInterpreterBytecodeArrayRegister = r12; diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h index 6aee59eb83fd54..a2506ef63c7124 100644 --- a/deps/v8/src/common/globals.h +++ b/deps/v8/src/common/globals.h @@ -1701,20 +1701,6 @@ enum IsolateAddressId { kIsolateAddressCount }; -enum class PoisoningMitigationLevel { - kPoisonAll, - kDontPoison, - kPoisonCriticalOnly -}; - -enum class LoadSensitivity { - kCritical, // Critical loads are poisoned whenever we can run untrusted - // code (i.e., when --untrusted-code-mitigations is on). - kUnsafe, // Unsafe loads are poisoned when full poisoning is on - // (--branch-load-poisoning). - kSafe // Safe loads are never poisoned. -}; - // The reason for a WebAssembly trap. #define FOREACH_WASM_TRAPREASON(V) \ V(TrapUnreachable) \ diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc index 675371df57a99b..ea2c35d9537951 100644 --- a/deps/v8/src/compiler/access-builder.cc +++ b/deps/v8/src/compiler/access-builder.cc @@ -82,25 +82,25 @@ FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() { FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset, MaybeHandle(), MaybeHandle(), Type::Any(), MachineType::AnyTagged(), - kFullWriteBarrier, LoadSensitivity::kCritical}; + kFullWriteBarrier}; return access; } // static FieldAccess AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer() { - FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset, - MaybeHandle(), MaybeHandle(), - Type::Any(), MachineType::TaggedPointer(), - kPointerWriteBarrier, LoadSensitivity::kCritical}; + FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset, + MaybeHandle(), MaybeHandle(), + Type::Any(), MachineType::TaggedPointer(), + kPointerWriteBarrier}; return access; } // static FieldAccess AccessBuilder::ForJSObjectElements() { - FieldAccess access = {kTaggedBase, JSObject::kElementsOffset, - MaybeHandle(), MaybeHandle(), - Type::Internal(), MachineType::TaggedPointer(), - kPointerWriteBarrier, LoadSensitivity::kCritical}; + FieldAccess access = {kTaggedBase, JSObject::kElementsOffset, + MaybeHandle(), MaybeHandle(), + Type::Internal(), MachineType::TaggedPointer(), + kPointerWriteBarrier}; return access; } @@ -410,7 +410,7 @@ FieldAccess AccessBuilder::ForJSTypedArrayBasePointer() { FieldAccess access = {kTaggedBase, JSTypedArray::kBasePointerOffset, MaybeHandle(), MaybeHandle(), Type::OtherInternal(), MachineType::AnyTagged(), - kFullWriteBarrier, LoadSensitivity::kCritical}; + kFullWriteBarrier}; return access; } @@ -424,7 +424,6 @@ FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() { : Type::ExternalPointer(), MachineType::Pointer(), kNoWriteBarrier, - LoadSensitivity::kCritical, ConstFieldInfo::None(), false, #ifdef V8_HEAP_SANDBOX @@ -445,7 +444,6 @@ FieldAccess AccessBuilder::ForJSDataViewDataPointer() { : Type::ExternalPointer(), MachineType::Pointer(), kNoWriteBarrier, - LoadSensitivity::kUnsafe, ConstFieldInfo::None(), false, #ifdef V8_HEAP_SANDBOX @@ -756,7 +754,6 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() { : Type::ExternalPointer(), MachineType::Pointer(), kNoWriteBarrier, - LoadSensitivity::kUnsafe, ConstFieldInfo::None(), false, #ifdef V8_HEAP_SANDBOX @@ -902,10 +899,10 @@ FieldAccess AccessBuilder::ForWeakFixedArraySlot(int index) { } // static FieldAccess AccessBuilder::ForCellValue() { - FieldAccess access = {kTaggedBase, Cell::kValueOffset, - Handle(), MaybeHandle(), - Type::Any(), MachineType::AnyTagged(), - kFullWriteBarrier, LoadSensitivity::kCritical}; + FieldAccess access = {kTaggedBase, Cell::kValueOffset, + Handle(), MaybeHandle(), + Type::Any(), MachineType::AnyTagged(), + kFullWriteBarrier}; return access; } @@ -966,11 +963,9 @@ ElementAccess AccessBuilder::ForSloppyArgumentsElementsMappedEntry() { } // statics -ElementAccess AccessBuilder::ForFixedArrayElement( - ElementsKind kind, LoadSensitivity load_sensitivity) { - ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, - Type::Any(), MachineType::AnyTagged(), - kFullWriteBarrier, load_sensitivity}; +ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) { + ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), + MachineType::AnyTagged(), kFullWriteBarrier}; switch (kind) { case PACKED_SMI_ELEMENTS: access.type = Type::SignedSmall(); @@ -1038,59 +1033,50 @@ FieldAccess AccessBuilder::ForEnumCacheIndices() { } // static -ElementAccess AccessBuilder::ForTypedArrayElement( - ExternalArrayType type, bool is_external, - LoadSensitivity load_sensitivity) { +ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type, + bool is_external) { BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase; int header_size = is_external ? 0 : ByteArray::kHeaderSize; switch (type) { case kExternalInt8Array: { - ElementAccess access = {taggedness, header_size, - Type::Signed32(), MachineType::Int8(), - kNoWriteBarrier, load_sensitivity}; + ElementAccess access = {taggedness, header_size, Type::Signed32(), + MachineType::Int8(), kNoWriteBarrier}; return access; } case kExternalUint8Array: case kExternalUint8ClampedArray: { - ElementAccess access = {taggedness, header_size, - Type::Unsigned32(), MachineType::Uint8(), - kNoWriteBarrier, load_sensitivity}; + ElementAccess access = {taggedness, header_size, Type::Unsigned32(), + MachineType::Uint8(), kNoWriteBarrier}; return access; } case kExternalInt16Array: { - ElementAccess access = {taggedness, header_size, - Type::Signed32(), MachineType::Int16(), - kNoWriteBarrier, load_sensitivity}; + ElementAccess access = {taggedness, header_size, Type::Signed32(), + MachineType::Int16(), kNoWriteBarrier}; return access; } case kExternalUint16Array: { - ElementAccess access = {taggedness, header_size, - Type::Unsigned32(), MachineType::Uint16(), - kNoWriteBarrier, load_sensitivity}; + ElementAccess access = {taggedness, header_size, Type::Unsigned32(), + MachineType::Uint16(), kNoWriteBarrier}; return access; } case kExternalInt32Array: { - ElementAccess access = {taggedness, header_size, - Type::Signed32(), MachineType::Int32(), - kNoWriteBarrier, load_sensitivity}; + ElementAccess access = {taggedness, header_size, Type::Signed32(), + MachineType::Int32(), kNoWriteBarrier}; return access; } case kExternalUint32Array: { - ElementAccess access = {taggedness, header_size, - Type::Unsigned32(), MachineType::Uint32(), - kNoWriteBarrier, load_sensitivity}; + ElementAccess access = {taggedness, header_size, Type::Unsigned32(), + MachineType::Uint32(), kNoWriteBarrier}; return access; } case kExternalFloat32Array: { - ElementAccess access = {taggedness, header_size, - Type::Number(), MachineType::Float32(), - kNoWriteBarrier, load_sensitivity}; + ElementAccess access = {taggedness, header_size, Type::Number(), + MachineType::Float32(), kNoWriteBarrier}; return access; } case kExternalFloat64Array: { - ElementAccess access = {taggedness, header_size, - Type::Number(), MachineType::Float64(), - kNoWriteBarrier, load_sensitivity}; + ElementAccess access = {taggedness, header_size, Type::Number(), + MachineType::Float64(), kNoWriteBarrier}; return access; } case kExternalBigInt64Array: diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h index fa68628cf80007..3aa29a2d55a115 100644 --- a/deps/v8/src/compiler/access-builder.h +++ b/deps/v8/src/compiler/access-builder.h @@ -299,9 +299,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final // Provides access to FixedArray elements. static ElementAccess ForFixedArrayElement(); - static ElementAccess ForFixedArrayElement( - ElementsKind kind, - LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe); + static ElementAccess ForFixedArrayElement(ElementsKind kind); // Provides access to SloppyArgumentsElements elements. static ElementAccess ForSloppyArgumentsElementsMappedEntry(); @@ -319,9 +317,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final static FieldAccess ForEnumCacheIndices(); // Provides access to Fixed{type}TypedArray and External{type}Array elements. - static ElementAccess ForTypedArrayElement( - ExternalArrayType type, bool is_external, - LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe); + static ElementAccess ForTypedArrayElement(ExternalArrayType type, + bool is_external); // Provides access to HashTable fields. static FieldAccess ForHashTableBaseNumberOfElements(); diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc index 29c7897ec9ef60..9617003880eed5 100644 --- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc +++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc @@ -36,9 +36,7 @@ class ArmOperandConverter final : public InstructionOperandConverter { SBit OutputSBit() const { switch (instr_->flags_mode()) { case kFlags_branch: - case kFlags_branch_and_poison: case kFlags_deoptimize: - case kFlags_deoptimize_and_poison: case kFlags_set: case kFlags_trap: case kFlags_select: @@ -322,35 +320,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, - ArmOperandConverter const& i) { - const MemoryAccessMode access_mode = AccessModeField::decode(opcode); - if (access_mode == kMemoryAccessPoisoned) { - Register value = i.OutputRegister(); - codegen->tasm()->and_(value, value, Operand(kSpeculationPoisonRegister)); - } -} - -void ComputePoisonedAddressForLoad(CodeGenerator* codegen, - InstructionCode opcode, - ArmOperandConverter const& i, - Register address) { - DCHECK_EQ(kMemoryAccessPoisoned, AccessModeField::decode(opcode)); - switch (AddressingModeField::decode(opcode)) { - case kMode_Offset_RI: - codegen->tasm()->mov(address, i.InputImmediate(1)); - codegen->tasm()->add(address, address, i.InputRegister(0)); - break; - case kMode_Offset_RR: - codegen->tasm()->add(address, i.InputRegister(0), i.InputRegister(1)); - break; - default: - UNREACHABLE(); - } - codegen->tasm()->and_(address, address, Operand(kSpeculationPoisonRegister)); -} - } // namespace #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ @@ -691,25 +660,6 @@ void CodeGenerator::BailoutIfDeoptimized() { RelocInfo::CODE_TARGET, ne); } -void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { - UseScratchRegisterScope temps(tasm()); - Register scratch = temps.Acquire(); - - // Set a mask which has all bits set in the normal case, but has all - // bits cleared if we are speculatively executing the wrong PC. - __ ComputeCodeStartAddress(scratch); - __ cmp(kJavaScriptCallCodeStartRegister, scratch); - __ mov(kSpeculationPoisonRegister, Operand(-1), SBit::LeaveCC, eq); - __ mov(kSpeculationPoisonRegister, Operand(0), SBit::LeaveCC, ne); - __ csdb(); -} - -void CodeGenerator::AssembleRegisterArgumentPoisoning() { - __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister); - __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister); - __ and_(sp, sp, kSpeculationPoisonRegister); -} - // Assembles an instruction after register allocation, producing machine code. CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Instruction* instr) { @@ -1619,12 +1569,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArmLdrb: __ ldrb(i.OutputRegister(), i.InputOffset()); DCHECK_EQ(LeaveCC, i.OutputSBit()); - EmitWordLoadPoisoningIfNeeded(this, opcode, i); break; case kArmLdrsb: __ ldrsb(i.OutputRegister(), i.InputOffset()); DCHECK_EQ(LeaveCC, i.OutputSBit()); - EmitWordLoadPoisoningIfNeeded(this, opcode, i); break; case kArmStrb: __ strb(i.InputRegister(0), i.InputOffset(1)); @@ -1632,11 +1580,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArmLdrh: __ ldrh(i.OutputRegister(), i.InputOffset()); - EmitWordLoadPoisoningIfNeeded(this, opcode, i); break; case kArmLdrsh: __ ldrsh(i.OutputRegister(), i.InputOffset()); - EmitWordLoadPoisoningIfNeeded(this, opcode, i); break; case kArmStrh: __ strh(i.InputRegister(0), i.InputOffset(1)); @@ -1644,22 +1590,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArmLdr: __ ldr(i.OutputRegister(), i.InputOffset()); - EmitWordLoadPoisoningIfNeeded(this, opcode, i); break; case kArmStr: __ str(i.InputRegister(0), i.InputOffset(1)); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; case kArmVldrF32: { - const MemoryAccessMode access_mode = AccessModeField::decode(opcode); - if (access_mode == kMemoryAccessPoisoned) { - UseScratchRegisterScope temps(tasm()); - Register address = temps.Acquire(); - ComputePoisonedAddressForLoad(this, opcode, i, address); - __ vldr(i.OutputFloatRegister(), address, 0); - } else { - __ vldr(i.OutputFloatRegister(), i.InputOffset()); - } + __ vldr(i.OutputFloatRegister(), i.InputOffset()); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; } @@ -1688,15 +1625,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } case kArmVldrF64: { - const MemoryAccessMode access_mode = AccessModeField::decode(opcode); - if (access_mode == kMemoryAccessPoisoned) { - UseScratchRegisterScope temps(tasm()); - Register address = temps.Acquire(); - ComputePoisonedAddressForLoad(this, opcode, i, address); - __ vldr(i.OutputDoubleRegister(), address, 0); - } else { - __ vldr(i.OutputDoubleRegister(), i.InputOffset()); - } + __ vldr(i.OutputDoubleRegister(), i.InputOffset()); DCHECK_EQ(LeaveCC, i.OutputSBit()); break; } @@ -1832,10 +1761,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ isb(SY); break; } - case kArchWordPoisonOnSpeculation: - __ and_(i.OutputRegister(0), i.InputRegister(0), - Operand(kSpeculationPoisonRegister)); - break; case kArmVmullLow: { auto dt = static_cast(MiscField::decode(instr->opcode())); __ vmull(dt, i.OutputSimd128Register(), i.InputSimd128Register(0).low(), @@ -3597,20 +3522,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { if (!branch->fallthru) __ b(flabel); // no fallthru to flabel. } -void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, - Instruction* instr) { - // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal). - if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) { - return; - } - - condition = NegateFlagsCondition(condition); - __ eor(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - Operand(kSpeculationPoisonRegister), SBit::LeaveCC, - FlagsConditionToCondition(condition)); - __ csdb(); -} - void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch) { AssembleArchBranch(instr, branch); @@ -3805,7 +3716,6 @@ void CodeGenerator::AssembleConstructFrame() { __ RecordComment("-- OSR entrypoint --"); osr_pc_offset_ = __ pc_offset(); required_slots -= osr_helper()->UnoptimizedFrameSlots(); - ResetSpeculationPoison(); } const RegList saves = call_descriptor->CalleeSavedRegisters(); diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc index 2698d45ae7f4de..a624b1864064f1 100644 --- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc +++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc @@ -630,17 +630,11 @@ void InstructionSelector::VisitLoad(Node* node) { case MachineRepresentation::kNone: UNREACHABLE(); } - if (node->opcode() == IrOpcode::kPoisonedLoad) { - CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); - opcode |= AccessModeField::encode(kMemoryAccessPoisoned); - } InstructionOperand output = g.DefineAsRegister(node); EmitLoad(this, opcode, &output, base, index); } -void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } - void InstructionSelector::VisitProtectedLoad(Node* node) { // TODO(eholk) UNIMPLEMENTED(); diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc index c1213834269645..ff88c93cdcf80e 100644 --- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc @@ -460,47 +460,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, } #endif // V8_ENABLE_WEBASSEMBLY -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - Arm64OperandConverter const& i) { - const MemoryAccessMode access_mode = AccessModeField::decode(opcode); - if (access_mode == kMemoryAccessPoisoned) { - Register value = i.OutputRegister(); - Register poison = value.Is64Bits() ? kSpeculationPoisonRegister - : kSpeculationPoisonRegister.W(); - codegen->tasm()->And(value, value, Operand(poison)); - } -} - -void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode, - Arm64OperandConverter* i, VRegister output_reg) { - const MemoryAccessMode access_mode = AccessModeField::decode(opcode); - AddressingMode address_mode = AddressingModeField::decode(opcode); - if (access_mode == kMemoryAccessPoisoned && address_mode != kMode_Root) { - UseScratchRegisterScope temps(codegen->tasm()); - Register address = temps.AcquireX(); - switch (address_mode) { - case kMode_MRI: // Fall through. - case kMode_MRR: - codegen->tasm()->Add(address, i->InputRegister(0), i->InputOperand(1)); - break; - case kMode_Operand2_R_LSL_I: - codegen->tasm()->Add(address, i->InputRegister(0), - i->InputOperand2_64(1)); - break; - default: - // Note: we don't need poisoning for kMode_Root loads as those loads - // target a fixed offset from root register which is set once when - // initializing the vm. - UNREACHABLE(); - } - codegen->tasm()->And(address, address, Operand(kSpeculationPoisonRegister)); - codegen->tasm()->Ldr(output_reg, MemOperand(address)); - } else { - codegen->tasm()->Ldr(output_reg, i->MemoryOperand()); - } -} - // Handles unary ops that work for float (scalar), double (scalar), or NEON. template void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr, @@ -714,29 +673,6 @@ void CodeGenerator::BailoutIfDeoptimized() { __ Bind(¬_deoptimized); } -void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { - UseScratchRegisterScope temps(tasm()); - Register scratch = temps.AcquireX(); - - // Set a mask which has all bits set in the normal case, but has all - // bits cleared if we are speculatively executing the wrong PC. - __ ComputeCodeStartAddress(scratch); - __ Cmp(kJavaScriptCallCodeStartRegister, scratch); - __ Csetm(kSpeculationPoisonRegister, eq); - __ Csdb(); -} - -void CodeGenerator::AssembleRegisterArgumentPoisoning() { - UseScratchRegisterScope temps(tasm()); - Register scratch = temps.AcquireX(); - - __ Mov(scratch, sp); - __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister); - __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister); - __ And(scratch, scratch, kSpeculationPoisonRegister); - __ Mov(sp, scratch); -} - // Assembles an instruction after register allocation, producing machine code. CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Instruction* instr) { @@ -1814,12 +1750,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64Ldrb: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); __ Ldrb(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kArm64Ldrsb: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); __ Ldrsb(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kArm64LdrsbW: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); @@ -1832,12 +1766,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64Ldrh: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); __ Ldrh(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kArm64Ldrsh: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); __ Ldrsh(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kArm64LdrshW: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); @@ -1850,12 +1782,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64Ldrsw: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); __ Ldrsw(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kArm64LdrW: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); __ Ldr(i.OutputRegister32(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kArm64StrW: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); @@ -1864,19 +1794,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kArm64Ldr: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); __ Ldr(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kArm64LdrDecompressTaggedSigned: __ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kArm64LdrDecompressTaggedPointer: __ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kArm64LdrDecompressAnyTagged: __ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kArm64Str: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); @@ -1887,7 +1813,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArm64LdrS: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); - EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister().S()); + __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand()); break; case kArm64StrS: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); @@ -1895,7 +1821,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kArm64LdrD: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); - EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister()); + __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand()); break; case kArm64StrD: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); @@ -1916,10 +1842,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ Dsb(FullSystem, BarrierAll); __ Isb(); break; - case kArchWordPoisonOnSpeculation: - __ And(i.OutputRegister(0), i.InputRegister(0), - Operand(kSpeculationPoisonRegister)); - break; case kWord32AtomicLoadInt8: ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32); __ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); @@ -2907,7 +2829,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { ArchOpcode opcode = instr->arch_opcode(); if (opcode == kArm64CompareAndBranch32) { - DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison); switch (condition) { case kEqual: __ Cbz(i.InputRegister32(0), tlabel); @@ -2919,7 +2840,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { UNREACHABLE(); } } else if (opcode == kArm64CompareAndBranch) { - DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison); switch (condition) { case kEqual: __ Cbz(i.InputRegister64(0), tlabel); @@ -2931,7 +2851,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { UNREACHABLE(); } } else if (opcode == kArm64TestAndBranch32) { - DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison); switch (condition) { case kEqual: __ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel); @@ -2943,7 +2862,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { UNREACHABLE(); } } else if (opcode == kArm64TestAndBranch) { - DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison); switch (condition) { case kEqual: __ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel); @@ -2961,19 +2879,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { if (!branch->fallthru) __ B(flabel); // no fallthru to flabel. } -void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, - Instruction* instr) { - // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal). - if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) { - return; - } - - condition = NegateFlagsCondition(condition); - __ CmovX(kSpeculationPoisonRegister, xzr, - FlagsConditionToCondition(condition)); - __ Csdb(); -} - void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch) { AssembleArchBranch(instr, branch); @@ -3143,7 +3048,6 @@ void CodeGenerator::AssembleConstructFrame() { // arguments count was pushed. required_slots -= unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue; - ResetSpeculationPoison(); } #if V8_ENABLE_WEBASSEMBLY diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc index 6a1a101e35baaa..c8bada6ab20d2e 100644 --- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -845,10 +845,6 @@ void InstructionSelector::VisitLoad(Node* node) { case MachineRepresentation::kNone: UNREACHABLE(); } - if (node->opcode() == IrOpcode::kPoisonedLoad) { - CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); - opcode |= AccessModeField::encode(kMemoryAccessPoisoned); - } if (node->opcode() == IrOpcode::kProtectedLoad) { opcode |= AccessModeField::encode(kMemoryAccessProtected); } @@ -856,8 +852,6 @@ void InstructionSelector::VisitLoad(Node* node) { EmitLoad(this, node, opcode, immediate_mode, rep); } -void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } - void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); } void InstructionSelector::VisitStore(Node* node) { @@ -2324,9 +2318,6 @@ template bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node, typename CbzOrTbzMatchTrait::IntegralType value, Node* user, FlagsCondition cond, FlagsContinuation* cont) { - // Branch poisoning requires flags to be set, so when it's enabled for - // a particular branch, we shouldn't be applying the cbz/tbz optimization. - DCHECK(!cont->IsPoisoned()); // Only handle branches and deoptimisations. if (!cont->IsBranch() && !cont->IsDeoptimize()) return false; @@ -2414,7 +2405,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node, std::swap(left, right); } - if (opcode == kArm64Cmp && !cont->IsPoisoned()) { + if (opcode == kArm64Cmp) { Int64Matcher m(right); if (m.HasResolvedValue()) { if (TryEmitCbzOrTbz<64>(selector, left, m.ResolvedValue(), node, @@ -2432,19 +2423,17 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node, FlagsContinuation* cont) { Int32BinopMatcher m(node); FlagsCondition cond = cont->condition(); - if (!cont->IsPoisoned()) { - if (m.right().HasResolvedValue()) { - if (TryEmitCbzOrTbz<32>(selector, m.left().node(), - m.right().ResolvedValue(), node, cond, cont)) { - return; - } - } else if (m.left().HasResolvedValue()) { - FlagsCondition commuted_cond = CommuteFlagsCondition(cond); - if (TryEmitCbzOrTbz<32>(selector, m.right().node(), - m.left().ResolvedValue(), node, commuted_cond, - cont)) { - return; - } + if (m.right().HasResolvedValue()) { + if (TryEmitCbzOrTbz<32>(selector, m.left().node(), + m.right().ResolvedValue(), node, cond, cont)) { + return; + } + } else if (m.left().HasResolvedValue()) { + FlagsCondition commuted_cond = CommuteFlagsCondition(cond); + if (TryEmitCbzOrTbz<32>(selector, m.right().node(), + m.left().ResolvedValue(), node, commuted_cond, + cont)) { + return; } } ArchOpcode opcode = kArm64Cmp32; @@ -2533,8 +2522,7 @@ struct TestAndBranchMatcher { Matcher matcher_; void Initialize() { - if (cont_->IsBranch() && !cont_->IsPoisoned() && - matcher_.right().HasResolvedValue() && + if (cont_->IsBranch() && matcher_.right().HasResolvedValue() && base::bits::IsPowerOfTwo(matcher_.right().ResolvedValue())) { // If the mask has only one bit set, we can use tbz/tbnz. DCHECK((cont_->condition() == kEqual) || @@ -2842,7 +2830,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value, } // Branch could not be combined with a compare, compare against 0 and branch. - if (!cont->IsPoisoned() && cont->IsBranch()) { + if (cont->IsBranch()) { Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(), g.UseRegister(value), g.Label(cont->true_block()), g.Label(cont->false_block())); diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc index 9e378b84584cdd..ed25a1f1ff72ed 100644 --- a/deps/v8/src/compiler/backend/code-generator.cc +++ b/deps/v8/src/compiler/backend/code-generator.cc @@ -41,14 +41,16 @@ class CodeGenerator::JumpTable final : public ZoneObject { size_t const target_count_; }; -CodeGenerator::CodeGenerator( - Zone* codegen_zone, Frame* frame, Linkage* linkage, - InstructionSequence* instructions, OptimizedCompilationInfo* info, - Isolate* isolate, base::Optional osr_helper, - int start_source_position, JumpOptimizationInfo* jump_opt, - PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options, - Builtin builtin, size_t max_unoptimized_frame_height, - size_t max_pushed_argument_count, const char* debug_name) +CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage, + InstructionSequence* instructions, + OptimizedCompilationInfo* info, Isolate* isolate, + base::Optional osr_helper, + int start_source_position, + JumpOptimizationInfo* jump_opt, + const AssemblerOptions& options, Builtin builtin, + size_t max_unoptimized_frame_height, + size_t max_pushed_argument_count, + const char* debug_name) : zone_(codegen_zone), isolate_(isolate), frame_access_state_(nullptr), @@ -80,7 +82,6 @@ CodeGenerator::CodeGenerator( codegen_zone, SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS), protected_instructions_(codegen_zone), result_(kSuccess), - poisoning_level_(poisoning_level), block_starts_(codegen_zone), instr_starts_(codegen_zone), debug_name_(debug_name) { @@ -284,9 +285,6 @@ void CodeGenerator::AssembleCode() { BailoutIfDeoptimized(); } - offsets_info_.init_poison = tasm()->pc_offset(); - InitializeSpeculationPoison(); - // Define deoptimization literals for all inlined functions. DCHECK_EQ(0u, deoptimization_literals_.size()); for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined : @@ -355,8 +353,6 @@ void CodeGenerator::AssembleCode() { tasm()->bind(GetLabel(current_block_)); - TryInsertBranchPoisoning(block); - if (block->must_construct_frame()) { AssembleConstructFrame(); // We need to setup the root register after we assemble the prologue, to @@ -494,37 +490,6 @@ void CodeGenerator::AssembleCode() { result_ = kSuccess; } -void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) { - // See if our predecessor was a basic block terminated by a branch_and_poison - // instruction. If yes, then perform the masking based on the flags. - if (block->PredecessorCount() != 1) return; - RpoNumber pred_rpo = (block->predecessors())[0]; - const InstructionBlock* pred = instructions()->InstructionBlockAt(pred_rpo); - if (pred->code_start() == pred->code_end()) return; - Instruction* instr = instructions()->InstructionAt(pred->code_end() - 1); - FlagsMode mode = FlagsModeField::decode(instr->opcode()); - switch (mode) { - case kFlags_branch_and_poison: { - BranchInfo branch; - RpoNumber target = ComputeBranchInfo(&branch, instr); - if (!target.IsValid()) { - // Non-trivial branch, add the masking code. - FlagsCondition condition = branch.condition; - if (branch.false_label == GetLabel(block->rpo_number())) { - condition = NegateFlagsCondition(condition); - } - AssembleBranchPoisoning(condition, instr); - } - break; - } - case kFlags_deoptimize_and_poison: { - UNREACHABLE(); - } - default: - break; - } -} - void CodeGenerator::AssembleArchBinarySearchSwitchRange( Register input, RpoNumber def_block, std::pair* begin, std::pair* end) { @@ -839,8 +804,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( FlagsCondition condition = FlagsConditionField::decode(instr->opcode()); switch (mode) { - case kFlags_branch: - case kFlags_branch_and_poison: { + case kFlags_branch: { BranchInfo branch; RpoNumber target = ComputeBranchInfo(&branch, instr); if (target.IsValid()) { @@ -854,8 +818,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( AssembleArchBranch(instr, &branch); break; } - case kFlags_deoptimize: - case kFlags_deoptimize_and_poison: { + case kFlags_deoptimize: { // Assemble a conditional eager deoptimization after this instruction. InstructionOperandConverter i(this, instr); size_t frame_state_offset = @@ -864,17 +827,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( DeoptImmedArgsCountField::decode(instr->opcode()); DeoptimizationExit* const exit = AddDeoptimizationExit( instr, frame_state_offset, immediate_args_count); - Label continue_label; BranchInfo branch; branch.condition = condition; branch.true_label = exit->label(); - branch.false_label = &continue_label; + branch.false_label = exit->continue_label(); branch.fallthru = true; AssembleArchDeoptBranch(instr, &branch); - tasm()->bind(&continue_label); - if (mode == kFlags_deoptimize_and_poison) { - AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr); - } tasm()->bind(exit->continue_label()); break; } @@ -900,11 +858,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction( } } - // TODO(jarin) We should thread the flag through rather than set it. - if (instr->IsCall()) { - ResetSpeculationPoison(); - } - return kSuccess; } @@ -1087,9 +1040,9 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) { if (needs_frame_state) { MarkLazyDeoptSite(); - // If the frame state is present, it starts at argument 2 - after - // the code address and the poison-alias index. - size_t frame_state_offset = 2; + // If the frame state is present, it starts at argument 1 - after + // the code address. + size_t frame_state_offset = 1; FrameStateDescriptor* descriptor = GetDeoptimizationEntry(instr, frame_state_offset).descriptor(); int pc_offset = tasm()->pc_offset_for_safepoint(); @@ -1428,29 +1381,6 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit( OutputFrameStateCombine::Ignore()); } -void CodeGenerator::InitializeSpeculationPoison() { - if (poisoning_level_ == PoisoningMitigationLevel::kDontPoison) return; - - // Initialize {kSpeculationPoisonRegister} either by comparing the expected - // with the actual call target, or by unconditionally using {-1} initially. - // Masking register arguments with it only makes sense in the first case. - if (info()->called_with_code_start_register()) { - tasm()->RecordComment("-- Prologue: generate speculation poison --"); - GenerateSpeculationPoisonFromCodeStartRegister(); - if (info()->poison_register_arguments()) { - AssembleRegisterArgumentPoisoning(); - } - } else { - ResetSpeculationPoison(); - } -} - -void CodeGenerator::ResetSpeculationPoison() { - if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) { - tasm()->ResetSpeculationPoisonRegister(); - } -} - OutOfLineCode::OutOfLineCode(CodeGenerator* gen) : frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) { gen->ools_ = this; diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h index 7ccb09d5ac3ed2..18de20f92c8e4b 100644 --- a/deps/v8/src/compiler/backend/code-generator.h +++ b/deps/v8/src/compiler/backend/code-generator.h @@ -103,7 +103,6 @@ class DeoptimizationLiteral { struct TurbolizerCodeOffsetsInfo { int code_start_register_check = -1; int deopt_check = -1; - int init_poison = -1; int blocks_start = -1; int out_of_line_code = -1; int deoptimization_exits = -1; @@ -120,14 +119,16 @@ struct TurbolizerInstructionStartInfo { // Generates native code for a sequence of instructions. class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { public: - explicit CodeGenerator( - Zone* codegen_zone, Frame* frame, Linkage* linkage, - InstructionSequence* instructions, OptimizedCompilationInfo* info, - Isolate* isolate, base::Optional osr_helper, - int start_source_position, JumpOptimizationInfo* jump_opt, - PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options, - Builtin builtin, size_t max_unoptimized_frame_height, - size_t max_pushed_argument_count, const char* debug_name = nullptr); + explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage, + InstructionSequence* instructions, + OptimizedCompilationInfo* info, Isolate* isolate, + base::Optional osr_helper, + int start_source_position, + JumpOptimizationInfo* jump_opt, + const AssemblerOptions& options, Builtin builtin, + size_t max_unoptimized_frame_height, + size_t max_pushed_argument_count, + const char* debug_name = nullptr); // Generate native code. After calling AssembleCode, call FinalizeCode to // produce the actual code object. If an error occurs during either phase, @@ -216,17 +217,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { // Assemble instructions for the specified block. CodeGenResult AssembleBlock(const InstructionBlock* block); - // Inserts mask update at the beginning of an instruction block if the - // predecessor blocks ends with a masking branch. - void TryInsertBranchPoisoning(const InstructionBlock* block); - - // Initializes the masking register in the prologue of a function. - void InitializeSpeculationPoison(); - // Reset the masking register during execution of a function. - void ResetSpeculationPoison(); - // Generates a mask from the pc passed in {kJavaScriptCallCodeStartRegister}. - void GenerateSpeculationPoisonFromCodeStartRegister(); - // Assemble code for the specified instruction. CodeGenResult AssembleInstruction(int instruction_index, const InstructionBlock* block); @@ -276,18 +266,12 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { // contains the expected pointer to the start of the instruction stream. void AssembleCodeStartRegisterCheck(); - void AssembleBranchPoisoning(FlagsCondition condition, Instruction* instr); - // When entering a code that is marked for deoptimization, rather continuing // with its execution, we jump to a lazy compiled code. We need to do this // because this code has already been deoptimized and needs to be unlinked // from the JS functions referring it. void BailoutIfDeoptimized(); - // Generates code to poison the stack pointer and implicit register arguments - // like the context register and the function register. - void AssembleRegisterArgumentPoisoning(); - // Generates an architecture-specific, descriptor-specific prologue // to set up a stack frame. void AssembleConstructFrame(); @@ -484,7 +468,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { SourcePositionTableBuilder source_position_table_builder_; ZoneVector protected_instructions_; CodeGenResult result_; - PoisoningMitigationLevel poisoning_level_; ZoneVector block_starts_; TurbolizerCodeOffsetsInfo offsets_info_; ZoneVector instr_starts_; diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc index 5db3f20fa4f4e3..c8180d3e5bfb44 100644 --- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc @@ -684,16 +684,6 @@ void CodeGenerator::BailoutIfDeoptimized() { __ bind(&skip); } -void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { - // TODO(860429): Remove remaining poisoning infrastructure on ia32. - UNREACHABLE(); -} - -void CodeGenerator::AssembleRegisterArgumentPoisoning() { - // TODO(860429): Remove remaining poisoning infrastructure on ia32. - UNREACHABLE(); -} - // Assembles an instruction after register allocation, producing machine code. CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Instruction* instr) { @@ -712,11 +702,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); __ LoadCodeObjectEntry(reg, reg); - if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) { - __ RetpolineCall(reg); - } else { - __ call(reg); - } + __ call(reg); } RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); @@ -738,19 +724,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) { __ wasm_call(wasm_code, constant.rmode()); } else { - if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) { - __ RetpolineCall(wasm_code, constant.rmode()); - } else { - __ call(wasm_code, constant.rmode()); - } + __ call(wasm_code, constant.rmode()); } } else { - Register reg = i.InputRegister(0); - if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) { - __ RetpolineCall(reg); - } else { - __ call(reg); - } + __ call(i.InputRegister(0)); } RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); @@ -762,12 +739,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Address wasm_code = static_cast
(constant.ToInt32()); __ jmp(wasm_code, constant.rmode()); } else { - Register reg = i.InputRegister(0); - if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) { - __ RetpolineJump(reg); - } else { - __ jmp(reg); - } + __ jmp(i.InputRegister(0)); } frame_access_state()->ClearSPDelta(); frame_access_state()->SetFrameAccessToDefault(); @@ -784,11 +756,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); __ LoadCodeObjectEntry(reg, reg); - if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) { - __ RetpolineJump(reg); - } else { - __ jmp(reg); - } + __ jmp(reg); } frame_access_state()->ClearSPDelta(); frame_access_state()->SetFrameAccessToDefault(); @@ -800,11 +768,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) { - __ RetpolineJump(reg); - } else { - __ jmp(reg); - } + __ jmp(reg); frame_access_state()->ClearSPDelta(); frame_access_state()->SetFrameAccessToDefault(); break; @@ -1278,9 +1242,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kIA32Bswap: __ bswap(i.OutputRegister()); break; - case kArchWordPoisonOnSpeculation: - // TODO(860429): Remove remaining poisoning infrastructure on ia32. - UNREACHABLE(); case kIA32MFence: __ mfence(); break; @@ -4183,12 +4144,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { if (!branch->fallthru) __ jmp(flabel); } -void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, - Instruction* instr) { - // TODO(860429): Remove remaining poisoning infrastructure on ia32. - UNREACHABLE(); -} - void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch) { AssembleArchBranch(instr, branch); diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc index f36fdb293564ed..b4714408e83f2d 100644 --- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc @@ -564,15 +564,9 @@ void InstructionSelector::VisitLoad(Node* node) { AddressingMode mode = g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); InstructionCode code = opcode | AddressingModeField::encode(mode); - if (node->opcode() == IrOpcode::kPoisonedLoad) { - CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); - code |= AccessModeField::encode(kMemoryAccessPoisoned); - } Emit(code, 1, outputs, input_count, inputs); } -void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } - void InstructionSelector::VisitProtectedLoad(Node* node) { // TODO(eholk) UNIMPLEMENTED(); diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h index 31d669813e2862..448c9e7148a4ad 100644 --- a/deps/v8/src/compiler/backend/instruction-codes.h +++ b/deps/v8/src/compiler/backend/instruction-codes.h @@ -100,7 +100,6 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode( V(ArchTruncateDoubleToI) \ V(ArchStoreWithWriteBarrier) \ V(ArchStackSlot) \ - V(ArchWordPoisonOnSpeculation) \ V(ArchStackPointerGreaterThan) \ V(ArchStackCheckOffset) \ V(Word32AtomicLoadInt8) \ @@ -208,12 +207,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, enum FlagsMode { kFlags_none = 0, kFlags_branch = 1, - kFlags_branch_and_poison = 2, - kFlags_deoptimize = 3, - kFlags_deoptimize_and_poison = 4, - kFlags_set = 5, - kFlags_trap = 6, - kFlags_select = 7, + kFlags_deoptimize = 2, + kFlags_set = 3, + kFlags_trap = 4, + kFlags_select = 5, }; V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, @@ -262,7 +259,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, enum MemoryAccessMode { kMemoryAccessDirect = 0, kMemoryAccessProtected = 1, - kMemoryAccessPoisoned = 2 }; // The InstructionCode is an opaque, target-specific integer that encodes diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc index c46d263bae26c8..291b4def9279b0 100644 --- a/deps/v8/src/compiler/backend/instruction-scheduler.cc +++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc @@ -132,7 +132,6 @@ void InstructionScheduler::AddInstruction(Instruction* instr) { // We should not have branches in the middle of a block. DCHECK_NE(instr->flags_mode(), kFlags_branch); - DCHECK_NE(instr->flags_mode(), kFlags_branch_and_poison); if (IsFixedRegisterParameter(instr)) { if (last_live_in_reg_marker_ != nullptr) { @@ -298,11 +297,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const { // effects. return kIsLoadOperation; - case kArchWordPoisonOnSpeculation: - // While poisoning operations have no side effect, they must not be - // reordered relative to branches. - return kHasSideEffect; - case kArchPrepareCallCFunction: case kArchPrepareTailCall: case kArchTailCallCodeObject: diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc index f279ea15900976..7bcb5f806948a4 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.cc +++ b/deps/v8/src/compiler/backend/instruction-selector.cc @@ -39,7 +39,7 @@ InstructionSelector::InstructionSelector( size_t* max_pushed_argument_count, SourcePositionMode source_position_mode, Features features, EnableScheduling enable_scheduling, EnableRootsRelativeAddressing enable_roots_relative_addressing, - PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo) + EnableTraceTurboJson trace_turbo) : zone_(zone), linkage_(linkage), sequence_(sequence), @@ -63,7 +63,6 @@ InstructionSelector::InstructionSelector( enable_roots_relative_addressing_(enable_roots_relative_addressing), enable_switch_jump_table_(enable_switch_jump_table), state_values_cache_(zone), - poisoning_level_(poisoning_level), frame_(frame), instruction_selection_failed_(false), instr_origins_(sequence->zone()), @@ -1076,17 +1075,10 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, } DCHECK_EQ(1u, buffer->instruction_args.size()); - // Argument 1 is used for poison-alias index (encoded in a word-sized - // immediate. This an index of the operand that aliases with poison register - // or -1 if there is no aliasing. - buffer->instruction_args.push_back(g.TempImmediate(-1)); - const size_t poison_alias_index = 1; - DCHECK_EQ(buffer->instruction_args.size() - 1, poison_alias_index); - // If the call needs a frame state, we insert the state information as // follows (n is the number of value inputs to the frame state): - // arg 2 : deoptimization id. - // arg 3 - arg (n + 2) : value inputs to the frame state. + // arg 1 : deoptimization id. + // arg 2 - arg (n + 2) : value inputs to the frame state. size_t frame_state_entries = 0; USE(frame_state_entries); // frame_state_entries is only used for debug. if (buffer->frame_state_descriptor != nullptr) { @@ -1123,7 +1115,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, &buffer->instruction_args, FrameStateInputKind::kStackSlot, instruction_zone()); - DCHECK_EQ(2 + frame_state_entries, buffer->instruction_args.size()); + DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size()); } size_t input_count = static_cast(buffer->input_count()); @@ -1159,23 +1151,11 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer, buffer->pushed_nodes[stack_index] = param; pushed_count++; } else { - // If we do load poisoning and the linkage uses the poisoning register, - // then we request the input in memory location, and during code - // generation, we move the input to the register. - if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison && - unallocated.HasFixedRegisterPolicy()) { - int reg = unallocated.fixed_register_index(); - if (Register::from_code(reg) == kSpeculationPoisonRegister) { - buffer->instruction_args[poison_alias_index] = g.TempImmediate( - static_cast(buffer->instruction_args.size())); - op = g.UseRegisterOrSlotOrConstant(*iter); - } - } buffer->instruction_args.push_back(op); } } DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count - - frame_state_entries - 1); + frame_state_entries); if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && is_tail_call && stack_param_delta != 0) { // For tail calls that change the size of their parameter list and keep @@ -1509,11 +1489,6 @@ void InstructionSelector::VisitNode(Node* node) { MarkAsRepresentation(MachineRepresentation::kSimd128, node); return VisitLoadLane(node); } - case IrOpcode::kPoisonedLoad: { - LoadRepresentation type = LoadRepresentationOf(node->op()); - MarkAsRepresentation(type.representation(), node); - return VisitPoisonedLoad(node); - } case IrOpcode::kStore: return VisitStore(node); case IrOpcode::kProtectedStore: @@ -1850,12 +1825,6 @@ void InstructionSelector::VisitNode(Node* node) { return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node); case IrOpcode::kFloat64InsertHighWord32: return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node); - case IrOpcode::kTaggedPoisonOnSpeculation: - return MarkAsTagged(node), VisitTaggedPoisonOnSpeculation(node); - case IrOpcode::kWord32PoisonOnSpeculation: - return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node); - case IrOpcode::kWord64PoisonOnSpeculation: - return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node); case IrOpcode::kStackSlot: return VisitStackSlot(node); case IrOpcode::kStackPointerGreaterThan: @@ -2389,30 +2358,6 @@ void InstructionSelector::VisitNode(Node* node) { } } -void InstructionSelector::EmitWordPoisonOnSpeculation(Node* node) { - if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) { - OperandGenerator g(this); - Node* input_node = NodeProperties::GetValueInput(node, 0); - InstructionOperand input = g.UseRegister(input_node); - InstructionOperand output = g.DefineSameAsFirst(node); - Emit(kArchWordPoisonOnSpeculation, output, input); - } else { - EmitIdentity(node); - } -} - -void InstructionSelector::VisitWord32PoisonOnSpeculation(Node* node) { - EmitWordPoisonOnSpeculation(node); -} - -void InstructionSelector::VisitWord64PoisonOnSpeculation(Node* node) { - EmitWordPoisonOnSpeculation(node); -} - -void InstructionSelector::VisitTaggedPoisonOnSpeculation(Node* node) { - EmitWordPoisonOnSpeculation(node); -} - void InstructionSelector::VisitStackPointerGreaterThan(Node* node) { FlagsContinuation cont = FlagsContinuation::ForSet(kStackPointerGreaterThanCondition, node); @@ -3104,45 +3049,24 @@ void InstructionSelector::VisitReturn(Node* ret) { void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch, BasicBlock* fbranch) { - if (NeedsPoisoning(IsSafetyCheckOf(branch->op()))) { - FlagsContinuation cont = - FlagsContinuation::ForBranchAndPoison(kNotEqual, tbranch, fbranch); - VisitWordCompareZero(branch, branch->InputAt(0), &cont); - } else { - FlagsContinuation cont = - FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch); - VisitWordCompareZero(branch, branch->InputAt(0), &cont); - } + FlagsContinuation cont = + FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch); + VisitWordCompareZero(branch, branch->InputAt(0), &cont); } void InstructionSelector::VisitDeoptimizeIf(Node* node) { DeoptimizeParameters p = DeoptimizeParametersOf(node->op()); - if (NeedsPoisoning(p.is_safety_check())) { - FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison( - kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(), - node->InputAt(1)); - VisitWordCompareZero(node, node->InputAt(0), &cont); - } else { - FlagsContinuation cont = FlagsContinuation::ForDeoptimize( - kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(), - node->InputAt(1)); - VisitWordCompareZero(node, node->InputAt(0), &cont); - } + FlagsContinuation cont = FlagsContinuation::ForDeoptimize( + kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(), + node->InputAt(1)); + VisitWordCompareZero(node, node->InputAt(0), &cont); } void InstructionSelector::VisitDeoptimizeUnless(Node* node) { DeoptimizeParameters p = DeoptimizeParametersOf(node->op()); - if (NeedsPoisoning(p.is_safety_check())) { - FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison( - kEqual, p.kind(), p.reason(), node->id(), p.feedback(), - node->InputAt(1)); - VisitWordCompareZero(node, node->InputAt(0), &cont); - } else { - FlagsContinuation cont = FlagsContinuation::ForDeoptimize( - kEqual, p.kind(), p.reason(), node->id(), p.feedback(), - node->InputAt(1)); - VisitWordCompareZero(node, node->InputAt(0), &cont); - } + FlagsContinuation cont = FlagsContinuation::ForDeoptimize( + kEqual, p.kind(), p.reason(), node->id(), p.feedback(), node->InputAt(1)); + VisitWordCompareZero(node, node->InputAt(0), &cont); } void InstructionSelector::VisitSelect(Node* node) { @@ -3186,17 +3110,10 @@ void InstructionSelector::VisitDynamicCheckMapsWithDeoptUnless(Node* node) { g.UseImmediate(n.slot()), g.UseImmediate(n.handler())}); } - if (NeedsPoisoning(IsSafetyCheck::kCriticalSafetyCheck)) { - FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison( - kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(), - dynamic_check_args.data(), static_cast(dynamic_check_args.size())); - VisitWordCompareZero(node, n.condition(), &cont); - } else { - FlagsContinuation cont = FlagsContinuation::ForDeoptimize( - kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(), - dynamic_check_args.data(), static_cast(dynamic_check_args.size())); - VisitWordCompareZero(node, n.condition(), &cont); - } + FlagsContinuation cont = FlagsContinuation::ForDeoptimize( + kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(), + dynamic_check_args.data(), static_cast(dynamic_check_args.size())); + VisitWordCompareZero(node, n.condition(), &cont); } void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) { @@ -3409,18 +3326,6 @@ void InstructionSelector::SwapShuffleInputs(Node* node) { } #endif // V8_ENABLE_WEBASSEMBLY -// static -bool InstructionSelector::NeedsPoisoning(IsSafetyCheck safety_check) const { - switch (poisoning_level_) { - case PoisoningMitigationLevel::kDontPoison: - return false; - case PoisoningMitigationLevel::kPoisonAll: - return safety_check != IsSafetyCheck::kNoSafetyCheck; - case PoisoningMitigationLevel::kPoisonCriticalOnly: - return safety_check == IsSafetyCheck::kCriticalSafetyCheck; - } - UNREACHABLE(); -} } // namespace compiler } // namespace internal } // namespace v8 diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h index 11a329d1d6e410..b33de8e8569ebc 100644 --- a/deps/v8/src/compiler/backend/instruction-selector.h +++ b/deps/v8/src/compiler/backend/instruction-selector.h @@ -54,13 +54,6 @@ class FlagsContinuation final { return FlagsContinuation(kFlags_branch, condition, true_block, false_block); } - static FlagsContinuation ForBranchAndPoison(FlagsCondition condition, - BasicBlock* true_block, - BasicBlock* false_block) { - return FlagsContinuation(kFlags_branch_and_poison, condition, true_block, - false_block); - } - // Creates a new flags continuation for an eager deoptimization exit. static FlagsContinuation ForDeoptimize( FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason, @@ -71,16 +64,6 @@ class FlagsContinuation final { extra_args_count); } - // Creates a new flags continuation for an eager deoptimization exit. - static FlagsContinuation ForDeoptimizeAndPoison( - FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason, - NodeId node_id, FeedbackSource const& feedback, Node* frame_state, - InstructionOperand* extra_args = nullptr, int extra_args_count = 0) { - return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind, - reason, node_id, feedback, frame_state, extra_args, - extra_args_count); - } - // Creates a new flags continuation for a boolean value. static FlagsContinuation ForSet(FlagsCondition condition, Node* result) { return FlagsContinuation(condition, result); @@ -98,16 +81,8 @@ class FlagsContinuation final { } bool IsNone() const { return mode_ == kFlags_none; } - bool IsBranch() const { - return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison; - } - bool IsDeoptimize() const { - return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison; - } - bool IsPoisoned() const { - return mode_ == kFlags_branch_and_poison || - mode_ == kFlags_deoptimize_and_poison; - } + bool IsBranch() const { return mode_ == kFlags_branch; } + bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; } bool IsSet() const { return mode_ == kFlags_set; } bool IsTrap() const { return mode_ == kFlags_trap; } bool IsSelect() const { return mode_ == kFlags_select; } @@ -226,7 +201,7 @@ class FlagsContinuation final { condition_(condition), true_block_(true_block), false_block_(false_block) { - DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison); + DCHECK(mode == kFlags_branch); DCHECK_NOT_NULL(true_block); DCHECK_NOT_NULL(false_block); } @@ -245,7 +220,7 @@ class FlagsContinuation final { frame_state_or_result_(frame_state), extra_args_(extra_args), extra_args_count_(extra_args_count) { - DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison); + DCHECK(mode == kFlags_deoptimize); DCHECK_NOT_NULL(frame_state); } @@ -338,8 +313,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final { : kDisableScheduling, EnableRootsRelativeAddressing enable_roots_relative_addressing = kDisableRootsRelativeAddressing, - PoisoningMitigationLevel poisoning_level = - PoisoningMitigationLevel::kDontPoison, EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson); // Visit code for the entire graph with the included schedule. @@ -443,8 +416,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final { static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements(); - bool NeedsPoisoning(IsSafetyCheck safety_check) const; - // =========================================================================== // ============ Architecture-independent graph covering methods. ============= // =========================================================================== @@ -681,8 +652,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final { void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont); - void EmitWordPoisonOnSpeculation(Node* node); - void EmitPrepareArguments(ZoneVector* arguments, const CallDescriptor* call_descriptor, Node* node); void EmitPrepareResults(ZoneVector* results, @@ -797,7 +766,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final { FrameStateInput::Equal> state_values_cache_; - PoisoningMitigationLevel poisoning_level_; Frame* frame_; bool instruction_selection_failed_; ZoneVector> instr_origins_; diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc index 63ca78e06000c4..0da8e054ae33ca 100644 --- a/deps/v8/src/compiler/backend/instruction.cc +++ b/deps/v8/src/compiler/backend/instruction.cc @@ -410,12 +410,8 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) { return os; case kFlags_branch: return os << "branch"; - case kFlags_branch_and_poison: - return os << "branch_and_poison"; case kFlags_deoptimize: return os << "deoptimize"; - case kFlags_deoptimize_and_poison: - return os << "deoptimize_and_poison"; case kFlags_set: return os << "set"; case kFlags_trap: diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h index 204683c973567c..8698ed8a98b617 100644 --- a/deps/v8/src/compiler/backend/instruction.h +++ b/deps/v8/src/compiler/backend/instruction.h @@ -935,8 +935,7 @@ class V8_EXPORT_PRIVATE Instruction final { bool IsDeoptimizeCall() const { return arch_opcode() == ArchOpcode::kArchDeoptimize || - FlagsModeField::decode(opcode()) == kFlags_deoptimize || - FlagsModeField::decode(opcode()) == kFlags_deoptimize_and_poison; + FlagsModeField::decode(opcode()) == kFlags_deoptimize; } bool IsTrap() const { diff --git a/deps/v8/src/compiler/backend/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc index e91b7e17d2b730..258d05955e5bec 100644 --- a/deps/v8/src/compiler/backend/jump-threading.cc +++ b/deps/v8/src/compiler/backend/jump-threading.cc @@ -55,17 +55,6 @@ struct JumpThreadingState { RpoNumber onstack() { return RpoNumber::FromInt(-2); } }; -bool IsBlockWithBranchPoisoning(InstructionSequence* code, - InstructionBlock* block) { - if (block->PredecessorCount() != 1) return false; - RpoNumber pred_rpo = (block->predecessors())[0]; - const InstructionBlock* pred = code->InstructionBlockAt(pred_rpo); - if (pred->code_start() == pred->code_end()) return false; - Instruction* instr = code->InstructionAt(pred->code_end() - 1); - FlagsMode mode = FlagsModeField::decode(instr->opcode()); - return mode == kFlags_branch_and_poison; -} - } // namespace bool JumpThreading::ComputeForwarding(Zone* local_zone, @@ -92,85 +81,80 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone, TRACE("jt [%d] B%d\n", static_cast(stack.size()), block->rpo_number().ToInt()); RpoNumber fw = block->rpo_number(); - if (!IsBlockWithBranchPoisoning(code, block)) { - bool fallthru = true; - for (int i = block->code_start(); i < block->code_end(); ++i) { - Instruction* instr = code->InstructionAt(i); - if (!instr->AreMovesRedundant()) { - // can't skip instructions with non redundant moves. - TRACE(" parallel move\n"); - fallthru = false; - } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) { - // can't skip instructions with flags continuations. - TRACE(" flags\n"); - fallthru = false; - } else if (instr->IsNop()) { - // skip nops. - TRACE(" nop\n"); - continue; - } else if (instr->arch_opcode() == kArchJmp) { - // try to forward the jump instruction. - TRACE(" jmp\n"); - // if this block deconstructs the frame, we can't forward it. - // TODO(mtrofin): we can still forward if we end up building - // the frame at start. So we should move the decision of whether - // to build a frame or not in the register allocator, and trickle it - // here and to the code generator. - if (frame_at_start || !(block->must_deconstruct_frame() || - block->must_construct_frame())) { - fw = code->InputRpo(instr, 0); - } - fallthru = false; - } else if (instr->IsRet()) { - TRACE(" ret\n"); - if (fallthru) { - CHECK_IMPLIES(block->must_construct_frame(), - block->must_deconstruct_frame()); - // Only handle returns with immediate/constant operands, since - // they must always be the same for all returns in a function. - // Dynamic return values might use different registers at - // different return sites and therefore cannot be shared. - if (instr->InputAt(0)->IsImmediate()) { - int32_t return_size = ImmediateOperand::cast(instr->InputAt(0)) - ->inline_int32_value(); - // Instructions can be shared only for blocks that share - // the same |must_deconstruct_frame| attribute. - if (block->must_deconstruct_frame()) { - if (empty_deconstruct_frame_return_block == - RpoNumber::Invalid()) { - empty_deconstruct_frame_return_block = block->rpo_number(); - empty_deconstruct_frame_return_size = return_size; - } else if (empty_deconstruct_frame_return_size == - return_size) { - fw = empty_deconstruct_frame_return_block; - block->clear_must_deconstruct_frame(); - } - } else { - if (empty_no_deconstruct_frame_return_block == - RpoNumber::Invalid()) { - empty_no_deconstruct_frame_return_block = - block->rpo_number(); - empty_no_deconstruct_frame_return_size = return_size; - } else if (empty_no_deconstruct_frame_return_size == - return_size) { - fw = empty_no_deconstruct_frame_return_block; - } + bool fallthru = true; + for (int i = block->code_start(); i < block->code_end(); ++i) { + Instruction* instr = code->InstructionAt(i); + if (!instr->AreMovesRedundant()) { + // can't skip instructions with non redundant moves. + TRACE(" parallel move\n"); + fallthru = false; + } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) { + // can't skip instructions with flags continuations. + TRACE(" flags\n"); + fallthru = false; + } else if (instr->IsNop()) { + // skip nops. + TRACE(" nop\n"); + continue; + } else if (instr->arch_opcode() == kArchJmp) { + // try to forward the jump instruction. + TRACE(" jmp\n"); + // if this block deconstructs the frame, we can't forward it. + // TODO(mtrofin): we can still forward if we end up building + // the frame at start. So we should move the decision of whether + // to build a frame or not in the register allocator, and trickle it + // here and to the code generator. + if (frame_at_start || !(block->must_deconstruct_frame() || + block->must_construct_frame())) { + fw = code->InputRpo(instr, 0); + } + fallthru = false; + } else if (instr->IsRet()) { + TRACE(" ret\n"); + if (fallthru) { + CHECK_IMPLIES(block->must_construct_frame(), + block->must_deconstruct_frame()); + // Only handle returns with immediate/constant operands, since + // they must always be the same for all returns in a function. + // Dynamic return values might use different registers at + // different return sites and therefore cannot be shared. + if (instr->InputAt(0)->IsImmediate()) { + int32_t return_size = ImmediateOperand::cast(instr->InputAt(0)) + ->inline_int32_value(); + // Instructions can be shared only for blocks that share + // the same |must_deconstruct_frame| attribute. + if (block->must_deconstruct_frame()) { + if (empty_deconstruct_frame_return_block == + RpoNumber::Invalid()) { + empty_deconstruct_frame_return_block = block->rpo_number(); + empty_deconstruct_frame_return_size = return_size; + } else if (empty_deconstruct_frame_return_size == return_size) { + fw = empty_deconstruct_frame_return_block; + block->clear_must_deconstruct_frame(); + } + } else { + if (empty_no_deconstruct_frame_return_block == + RpoNumber::Invalid()) { + empty_no_deconstruct_frame_return_block = block->rpo_number(); + empty_no_deconstruct_frame_return_size = return_size; + } else if (empty_no_deconstruct_frame_return_size == + return_size) { + fw = empty_no_deconstruct_frame_return_block; } } } - fallthru = false; - } else { - // can't skip other instructions. - TRACE(" other\n"); - fallthru = false; } - break; - } - if (fallthru) { - int next = 1 + block->rpo_number().ToInt(); - if (next < code->InstructionBlockCount()) - fw = RpoNumber::FromInt(next); + fallthru = false; + } else { + // can't skip other instructions. + TRACE(" other\n"); + fallthru = false; } + break; + } + if (fallthru) { + int next = 1 + block->rpo_number().ToInt(); + if (next < code->InstructionBlockCount()) fw = RpoNumber::FromInt(next); } state.Forward(fw); } @@ -225,7 +209,7 @@ void JumpThreading::ApplyForwarding(Zone* local_zone, for (int i = block->code_start(); i < block->code_end(); ++i) { Instruction* instr = code->InstructionAt(i); FlagsMode mode = FlagsModeField::decode(instr->opcode()); - if (mode == kFlags_branch || mode == kFlags_branch_and_poison) { + if (mode == kFlags_branch) { fallthru = false; // branches don't fall through to the next block. } else if (instr->arch_opcode() == kArchJmp || instr->arch_opcode() == kArchRet) { diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc index 2b8197e7e64a06..e7ba9ab933811e 100644 --- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc +++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc @@ -313,16 +313,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, << "\""; \ UNIMPLEMENTED(); -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - MipsOperandConverter const& i) { - const MemoryAccessMode access_mode = AccessModeField::decode(opcode); - if (access_mode == kMemoryAccessPoisoned) { - Register value = i.OutputRegister(); - codegen->tasm()->And(value, value, kSpeculationPoisonRegister); - } -} - } // namespace #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ @@ -614,31 +604,6 @@ void CodeGenerator::BailoutIfDeoptimized() { RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg)); } -void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { - // Calculate a mask which has all bits set in the normal case, but has all - // bits cleared if we are speculatively executing the wrong PC. - // difference = (current - expected) | (expected - current) - // poison = ~(difference >> (kBitsPerSystemPointer - 1)) - __ ComputeCodeStartAddress(kScratchReg); - __ Move(kSpeculationPoisonRegister, kScratchReg); - __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - kJavaScriptCallCodeStartRegister); - __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister, - kScratchReg); - __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - kJavaScriptCallCodeStartRegister); - __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - kBitsPerSystemPointer - 1); - __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - kSpeculationPoisonRegister); -} - -void CodeGenerator::AssembleRegisterArgumentPoisoning() { - __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister); - __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister); - __ And(sp, sp, kSpeculationPoisonRegister); -} - // Assembles an instruction after register allocation, producing machine code. CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Instruction* instr) { @@ -938,10 +903,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; } - case kArchWordPoisonOnSpeculation: - __ And(i.OutputRegister(), i.InputRegister(0), - kSpeculationPoisonRegister); - break; case kIeee754Float64Acos: ASSEMBLE_IEEE754_UNOP(acos); break; @@ -1541,30 +1502,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kMipsLbu: __ lbu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMipsLb: __ lb(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMipsSb: __ sb(i.InputOrZeroRegister(2), i.MemoryOperand()); break; case kMipsLhu: __ lhu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMipsUlhu: __ Ulhu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMipsLh: __ lh(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMipsUlh: __ Ulh(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMipsSh: __ sh(i.InputOrZeroRegister(2), i.MemoryOperand()); @@ -1574,11 +1529,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kMipsLw: __ lw(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMipsUlw: __ Ulw(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMipsSw: __ sw(i.InputOrZeroRegister(2), i.MemoryOperand()); @@ -3727,85 +3680,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { branch->fallthru); } -void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, - Instruction* instr) { - // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal). - if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) { - return; - } - - MipsOperandConverter i(this, instr); - condition = NegateFlagsCondition(condition); - - switch (instr->arch_opcode()) { - case kMipsCmp: { - __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0), - i.InputOperand(1), - FlagsConditionToConditionCmp(condition)); - } - return; - case kMipsTst: { - switch (condition) { - case kEqual: - __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); - break; - case kNotEqual: - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, - kScratchReg); - break; - default: - UNREACHABLE(); - } - } - return; - case kMipsAddOvf: - case kMipsSubOvf: { - // Overflow occurs if overflow register is negative - __ Slt(kScratchReg2, kScratchReg, zero_reg); - switch (condition) { - case kOverflow: - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, - kScratchReg2); - break; - case kNotOverflow: - __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); - break; - default: - UNSUPPORTED_COND(instr->arch_opcode(), condition); - } - } - return; - case kMipsMulOvf: { - // Overflow occurs if overflow register is not zero - switch (condition) { - case kOverflow: - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, - kScratchReg); - break; - case kNotOverflow: - __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); - break; - default: - UNSUPPORTED_COND(instr->arch_opcode(), condition); - } - } - return; - case kMipsCmpS: - case kMipsCmpD: { - bool predicate; - FlagsConditionToConditionCmpFPU(&predicate, condition); - if (predicate) { - __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister); - } else { - __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister); - } - } - return; - default: - UNREACHABLE(); - } -} - void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch) { AssembleArchBranch(instr, branch); @@ -4130,7 +4004,6 @@ void CodeGenerator::AssembleConstructFrame() { __ RecordComment("-- OSR entrypoint --"); osr_pc_offset_ = __ pc_offset(); required_slots -= osr_helper()->UnoptimizedFrameSlots(); - ResetSpeculationPoison(); } const RegList saves = call_descriptor->CalleeSavedRegisters(); diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc index 48635c9c15b519..6d56892f30ca2f 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc +++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc @@ -1444,8 +1444,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { AdduLatency(false) + AndLatency(false) + BranchShortLatency() + 1 + SubuLatency() + AdduLatency(); } - case kArchWordPoisonOnSpeculation: - return AndLatency(); case kIeee754Float64Acos: case kIeee754Float64Acosh: case kIeee754Float64Asin: diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc index c8236122461d65..f22278e240b0ae 100644 --- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc +++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc @@ -375,10 +375,6 @@ void InstructionSelector::VisitLoad(Node* node) { case MachineRepresentation::kNone: UNREACHABLE(); } - if (node->opcode() == IrOpcode::kPoisonedLoad) { - CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); - opcode |= AccessModeField::encode(kMemoryAccessPoisoned); - } if (g.CanBeImmediate(index, opcode)) { Emit(opcode | AddressingModeField::encode(kMode_MRI), @@ -393,8 +389,6 @@ void InstructionSelector::VisitLoad(Node* node) { } } -void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } - void InstructionSelector::VisitProtectedLoad(Node* node) { // TODO(eholk) UNIMPLEMENTED(); diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc index 6fce103d247f2e..cd5365b1eefebc 100644 --- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc @@ -321,16 +321,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - MipsOperandConverter const& i) { - const MemoryAccessMode access_mode = AccessModeField::decode(opcode); - if (access_mode == kMemoryAccessPoisoned) { - Register value = i.OutputRegister(); - codegen->tasm()->And(value, value, kSpeculationPoisonRegister); - } -} - } // namespace #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ @@ -577,31 +567,6 @@ void CodeGenerator::BailoutIfDeoptimized() { RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg)); } -void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { - // Calculate a mask which has all bits set in the normal case, but has all - // bits cleared if we are speculatively executing the wrong PC. - // difference = (current - expected) | (expected - current) - // poison = ~(difference >> (kBitsPerSystemPointer - 1)) - __ ComputeCodeStartAddress(kScratchReg); - __ Move(kSpeculationPoisonRegister, kScratchReg); - __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - kJavaScriptCallCodeStartRegister); - __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister, - kScratchReg); - __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - kJavaScriptCallCodeStartRegister); - __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - kBitsPerSystemPointer - 1); - __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - kSpeculationPoisonRegister); -} - -void CodeGenerator::AssembleRegisterArgumentPoisoning() { - __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister); - __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister); - __ And(sp, sp, kSpeculationPoisonRegister); -} - // Assembles an instruction after register allocation, producing machine code. CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Instruction* instr) { @@ -900,10 +865,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; } - case kArchWordPoisonOnSpeculation: - __ And(i.OutputRegister(), i.InputRegister(0), - kSpeculationPoisonRegister); - break; case kIeee754Float64Acos: ASSEMBLE_IEEE754_UNOP(acos); break; @@ -1646,30 +1607,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kMips64Lbu: __ Lbu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMips64Lb: __ Lb(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMips64Sb: __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand()); break; case kMips64Lhu: __ Lhu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMips64Ulhu: __ Ulhu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMips64Lh: __ Lh(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMips64Ulh: __ Ulh(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMips64Sh: __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand()); @@ -1679,27 +1634,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kMips64Lw: __ Lw(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMips64Ulw: __ Ulw(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMips64Lwu: __ Lwu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMips64Ulwu: __ Ulwu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMips64Ld: __ Ld(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMips64Uld: __ Uld(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kMips64Sw: __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand()); @@ -3904,104 +3853,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { branch->fallthru); } -void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, - Instruction* instr) { - // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal). - if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) { - return; - } - - MipsOperandConverter i(this, instr); - condition = NegateFlagsCondition(condition); - - switch (instr->arch_opcode()) { - case kMips64Cmp: { - __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0), - i.InputOperand(1), - FlagsConditionToConditionCmp(condition)); - } - return; - case kMips64Tst: { - switch (condition) { - case kEqual: - __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); - break; - case kNotEqual: - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, - kScratchReg); - break; - default: - UNREACHABLE(); - } - } - return; - case kMips64Dadd: - case kMips64Dsub: { - // Check for overflow creates 1 or 0 for result. - __ dsrl32(kScratchReg, i.OutputRegister(), 31); - __ srl(kScratchReg2, i.OutputRegister(), 31); - __ xor_(kScratchReg2, kScratchReg, kScratchReg2); - switch (condition) { - case kOverflow: - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, - kScratchReg2); - break; - case kNotOverflow: - __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); - break; - default: - UNSUPPORTED_COND(instr->arch_opcode(), condition); - } - } - return; - case kMips64DaddOvf: - case kMips64DsubOvf: { - // Overflow occurs if overflow register is negative - __ Slt(kScratchReg2, kScratchReg, zero_reg); - switch (condition) { - case kOverflow: - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, - kScratchReg2); - break; - case kNotOverflow: - __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); - break; - default: - UNSUPPORTED_COND(instr->arch_opcode(), condition); - } - } - return; - case kMips64MulOvf: { - // Overflow occurs if overflow register is not zero - switch (condition) { - case kOverflow: - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, - kScratchReg); - break; - case kNotOverflow: - __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); - break; - default: - UNSUPPORTED_COND(instr->arch_opcode(), condition); - } - } - return; - case kMips64CmpS: - case kMips64CmpD: { - bool predicate; - FlagsConditionToConditionCmpFPU(&predicate, condition); - if (predicate) { - __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister); - } else { - __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister); - } - } - return; - default: - UNREACHABLE(); - } -} - #undef UNSUPPORTED_COND void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, @@ -4340,7 +4191,6 @@ void CodeGenerator::AssembleConstructFrame() { __ RecordComment("-- OSR entrypoint --"); osr_pc_offset_ = __ pc_offset(); required_slots -= osr_helper()->UnoptimizedFrameSlots(); - ResetSpeculationPoison(); } const RegList saves = call_descriptor->CalleeSavedRegisters(); diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc index c63e0aa3d36d55..d6016f2b97d559 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc @@ -1352,8 +1352,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { return DadduLatency(false) + AndLatency(false) + AssertLatency() + DadduLatency(false) + AndLatency(false) + BranchShortLatency() + 1 + DsubuLatency() + DadduLatency(); - case kArchWordPoisonOnSpeculation: - return AndLatency(); case kIeee754Float64Acos: case kIeee754Float64Acosh: case kIeee754Float64Asin: diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc index bec7bbefdcbce6..942d918dce73c6 100644 --- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc +++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc @@ -515,16 +515,10 @@ void InstructionSelector::VisitLoad(Node* node) { case MachineRepresentation::kNone: UNREACHABLE(); } - if (node->opcode() == IrOpcode::kPoisonedLoad) { - CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); - opcode |= AccessModeField::encode(kMemoryAccessPoisoned); - } EmitLoad(this, node, opcode); } -void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } - void InstructionSelector::VisitProtectedLoad(Node* node) { // TODO(eholk) UNIMPLEMENTED(); @@ -2041,8 +2035,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node, bool IsNodeUnsigned(Node* n) { NodeMatcher m(n); - if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() || - m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) { + if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad() || + m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) { LoadRepresentation load_rep = LoadRepresentationOf(n->op()); return load_rep.IsUnsigned(); } else { diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc index cf324353f2c43c..838ef4b4c07ce4 100644 --- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc @@ -38,9 +38,7 @@ class PPCOperandConverter final : public InstructionOperandConverter { RCBit OutputRCBit() const { switch (instr_->flags_mode()) { case kFlags_branch: - case kFlags_branch_and_poison: case kFlags_deoptimize: - case kFlags_deoptimize_and_poison: case kFlags_set: case kFlags_trap: case kFlags_select: @@ -289,15 +287,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) { UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, - PPCOperandConverter const& i) { - const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode()); - if (access_mode == kMemoryAccessPoisoned) { - Register value = i.OutputRegister(); - codegen->tasm()->and_(value, value, kSpeculationPoisonRegister); - } -} - } // namespace #define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round) \ @@ -777,25 +766,6 @@ void CodeGenerator::BailoutIfDeoptimized() { RelocInfo::CODE_TARGET, ne, cr0); } -void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { - Register scratch = kScratchReg; - - __ ComputeCodeStartAddress(scratch); - - // Calculate a mask which has all bits set in the normal case, but has all - // bits cleared if we are speculatively executing the wrong PC. - __ CmpS64(kJavaScriptCallCodeStartRegister, scratch); - __ li(scratch, Operand::Zero()); - __ notx(kSpeculationPoisonRegister, scratch); - __ isel(eq, kSpeculationPoisonRegister, kSpeculationPoisonRegister, scratch); -} - -void CodeGenerator::AssembleRegisterArgumentPoisoning() { - __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister); - __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister); - __ and_(sp, sp, kSpeculationPoisonRegister); -} - // Assembles an instruction after register allocation, producing machine code. CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Instruction* instr) { @@ -1164,10 +1134,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Operand(offset.offset()), r0); break; } - case kArchWordPoisonOnSpeculation: - __ and_(i.OutputRegister(), i.InputRegister(0), - kSpeculationPoisonRegister); - break; case kPPC_Peek: { int reverse_slot = i.InputInt32(0); int offset = @@ -1968,33 +1934,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( #endif case kPPC_LoadWordU8: ASSEMBLE_LOAD_INTEGER(lbz, lbzx); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kPPC_LoadWordS8: ASSEMBLE_LOAD_INTEGER(lbz, lbzx); __ extsb(i.OutputRegister(), i.OutputRegister()); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kPPC_LoadWordU16: ASSEMBLE_LOAD_INTEGER(lhz, lhzx); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kPPC_LoadWordS16: ASSEMBLE_LOAD_INTEGER(lha, lhax); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kPPC_LoadWordU32: ASSEMBLE_LOAD_INTEGER(lwz, lwzx); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kPPC_LoadWordS32: ASSEMBLE_LOAD_INTEGER(lwa, lwax); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; #if V8_TARGET_ARCH_PPC64 case kPPC_LoadWord64: ASSEMBLE_LOAD_INTEGER(ld, ldx); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; #endif case kPPC_LoadFloat32: @@ -2143,7 +2102,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kPPC_LoadByteRev32: { ASSEMBLE_LOAD_INTEGER_RR(lwbrx); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; } case kPPC_StoreByteRev32: { @@ -2169,7 +2127,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kPPC_LoadByteRev64: { ASSEMBLE_LOAD_INTEGER_RR(ldbrx); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; } case kPPC_StoreByteRev64: { @@ -3799,21 +3756,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { if (!branch->fallthru) __ b(flabel); // no fallthru to flabel. } -void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, - Instruction* instr) { - // TODO(John) Handle float comparisons (kUnordered[Not]Equal). - if (condition == kUnorderedEqual || condition == kUnorderedNotEqual || - condition == kOverflow || condition == kNotOverflow) { - return; - } - - ArchOpcode op = instr->arch_opcode(); - condition = NegateFlagsCondition(condition); - __ li(kScratchReg, Operand::Zero()); - __ isel(FlagsConditionToCondition(condition, op), kSpeculationPoisonRegister, - kScratchReg, kSpeculationPoisonRegister, cr0); -} - void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch) { AssembleArchBranch(instr, branch); @@ -4079,7 +4021,6 @@ void CodeGenerator::AssembleConstructFrame() { __ RecordComment("-- OSR entrypoint --"); osr_pc_offset_ = __ pc_offset(); required_slots -= osr_helper()->UnoptimizedFrameSlots(); - ResetSpeculationPoison(); } const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters(); diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc index c74211aa3895f1..ab2b0a3338f7d9 100644 --- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc +++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc @@ -229,11 +229,6 @@ void InstructionSelector::VisitLoad(Node* node) { UNREACHABLE(); } - if (node->opcode() == IrOpcode::kPoisonedLoad && - poisoning_level_ != PoisoningMitigationLevel::kDontPoison) { - opcode |= AccessModeField::encode(kMemoryAccessPoisoned); - } - bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad || node->opcode() == IrOpcode::kWord64AtomicLoad); @@ -252,8 +247,6 @@ void InstructionSelector::VisitLoad(Node* node) { } } -void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } - void InstructionSelector::VisitProtectedLoad(Node* node) { // TODO(eholk) UNIMPLEMENTED(); diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc index 2d92ae1567e1e8..3bd288ae1d9b04 100644 --- a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc +++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc @@ -307,17 +307,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate, UNREACHABLE(); } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - RiscvOperandConverter const& i) { - const MemoryAccessMode access_mode = - static_cast(MiscField::decode(opcode)); - if (access_mode == kMemoryAccessPoisoned) { - Register value = i.OutputRegister(); - codegen->tasm()->And(value, value, kSpeculationPoisonRegister); - } -} - } // namespace #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \ @@ -570,31 +559,6 @@ void CodeGenerator::BailoutIfDeoptimized() { RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg)); } -void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { - // Calculate a mask which has all bits set in the normal case, but has all - // bits cleared if we are speculatively executing the wrong PC. - // difference = (current - expected) | (expected - current) - // poison = ~(difference >> (kBitsPerSystemPointer - 1)) - __ ComputeCodeStartAddress(kScratchReg); - __ Move(kSpeculationPoisonRegister, kScratchReg); - __ Sub32(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - kJavaScriptCallCodeStartRegister); - __ Sub32(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister, - kScratchReg); - __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - kJavaScriptCallCodeStartRegister); - __ Sra64(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - kBitsPerSystemPointer - 1); - __ Nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister, - kSpeculationPoisonRegister); -} - -void CodeGenerator::AssembleRegisterArgumentPoisoning() { - __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister); - __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister); - __ And(sp, sp, kSpeculationPoisonRegister); -} - // Assembles an instruction after register allocation, producing machine code. CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Instruction* instr) { @@ -887,10 +851,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; } - case kArchWordPoisonOnSpeculation: - __ And(i.OutputRegister(), i.InputRegister(0), - kSpeculationPoisonRegister); - break; case kIeee754Float64Acos: ASSEMBLE_IEEE754_UNOP(acos); break; @@ -1553,30 +1513,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kRiscvLbu: __ Lbu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kRiscvLb: __ Lb(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kRiscvSb: __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand()); break; case kRiscvLhu: __ Lhu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kRiscvUlhu: __ Ulhu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kRiscvLh: __ Lh(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kRiscvUlh: __ Ulh(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kRiscvSh: __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand()); @@ -1586,27 +1540,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kRiscvLw: __ Lw(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kRiscvUlw: __ Ulw(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kRiscvLwu: __ Lwu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kRiscvUlwu: __ Ulwu(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kRiscvLd: __ Ld(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kRiscvUld: __ Uld(i.OutputRegister(), i.MemoryOperand()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kRiscvSw: __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand()); @@ -2011,110 +1959,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { branch->fallthru); } -void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, - Instruction* instr) { - // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal). - if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) { - return; - } - - RiscvOperandConverter i(this, instr); - condition = NegateFlagsCondition(condition); - - switch (instr->arch_opcode()) { - case kRiscvCmp: { - __ CompareI(kScratchReg, i.InputRegister(0), i.InputOperand(1), - FlagsConditionToConditionCmp(condition)); - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg); - } - return; - case kRiscvCmpZero: { - __ CompareI(kScratchReg, i.InputRegister(0), Operand(zero_reg), - FlagsConditionToConditionCmp(condition)); - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg); - } - return; - case kRiscvTst: { - switch (condition) { - case kEqual: - __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); - break; - case kNotEqual: - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, - kScratchReg); - break; - default: - UNREACHABLE(); - } - } - return; - case kRiscvAdd64: - case kRiscvSub64: { - // Check for overflow creates 1 or 0 for result. - __ Srl64(kScratchReg, i.OutputRegister(), 63); - __ Srl32(kScratchReg2, i.OutputRegister(), 31); - __ Xor(kScratchReg2, kScratchReg, kScratchReg2); - switch (condition) { - case kOverflow: - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, - kScratchReg2); - break; - case kNotOverflow: - __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); - break; - default: - UNSUPPORTED_COND(instr->arch_opcode(), condition); - } - } - return; - case kRiscvAddOvf64: - case kRiscvSubOvf64: { - // Overflow occurs if overflow register is negative - __ Slt(kScratchReg2, kScratchReg, zero_reg); - switch (condition) { - case kOverflow: - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, - kScratchReg2); - break; - case kNotOverflow: - __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2); - break; - default: - UNSUPPORTED_COND(instr->arch_opcode(), condition); - } - } - return; - case kRiscvMulOvf32: { - // Overflow occurs if overflow register is not zero - switch (condition) { - case kOverflow: - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, - kScratchReg); - break; - case kNotOverflow: - __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); - break; - default: - UNSUPPORTED_COND(instr->arch_opcode(), condition); - } - } - return; - case kRiscvCmpS: - case kRiscvCmpD: { - bool predicate; - FlagsConditionToConditionCmpFPU(&predicate, condition); - if (predicate) { - __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg); - } else { - __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg); - } - } - return; - default: - UNREACHABLE(); - } -} - #undef UNSUPPORTED_COND void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, @@ -2489,7 +2333,6 @@ void CodeGenerator::AssembleConstructFrame() { __ RecordComment("-- OSR entrypoint --"); osr_pc_offset_ = __ pc_offset(); required_slots -= osr_helper()->UnoptimizedFrameSlots(); - ResetSpeculationPoison(); } const RegList saves = call_descriptor->CalleeSavedRegisters(); diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc index 157b11c9308a6f..91ceae622bb037 100644 --- a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc +++ b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc @@ -1169,8 +1169,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) { return Add64Latency(false) + AndLatency(false) + AssertLatency() + Add64Latency(false) + AndLatency(false) + BranchShortLatency() + 1 + Sub64Latency() + Add64Latency(); - case kArchWordPoisonOnSpeculation: - return AndLatency(); case kIeee754Float64Acos: case kIeee754Float64Acosh: case kIeee754Float64Asin: diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc index 72706201e2a78b..dea4ed9fe6fc24 100644 --- a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc +++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc @@ -489,16 +489,10 @@ void InstructionSelector::VisitLoad(Node* node) { case MachineRepresentation::kNone: UNREACHABLE(); } - if (node->opcode() == IrOpcode::kPoisonedLoad) { - CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); - opcode |= MiscField::encode(kMemoryAccessPoisoned); - } EmitLoad(this, node, opcode); } -void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } - void InstructionSelector::VisitProtectedLoad(Node* node) { // TODO(eholk) UNIMPLEMENTED(); @@ -1827,8 +1821,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node, bool IsNodeUnsigned(Node* n) { NodeMatcher m(n); - if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() || - m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) { + if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad() || + m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) { LoadRepresentation load_rep = LoadRepresentationOf(n->op()); return load_rep.IsUnsigned(); } else { diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc index 685293169d7fd7..2cd3244a1fb7c7 100644 --- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc +++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc @@ -985,15 +985,6 @@ void AdjustStackPointerForTailCall( } } -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr, - S390OperandConverter const& i) { - const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode()); - if (access_mode == kMemoryAccessPoisoned) { - Register value = i.OutputRegister(); - codegen->tasm()->AndP(value, kSpeculationPoisonRegister); - } -} - } // namespace void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr, @@ -1071,25 +1062,6 @@ void CodeGenerator::BailoutIfDeoptimized() { RelocInfo::CODE_TARGET, ne); } -void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { - Register scratch = r1; - - __ ComputeCodeStartAddress(scratch); - - // Calculate a mask which has all bits set in the normal case, but has all - // bits cleared if we are speculatively executing the wrong PC. - __ mov(kSpeculationPoisonRegister, Operand::Zero()); - __ mov(r0, Operand(-1)); - __ CmpS64(kJavaScriptCallCodeStartRegister, scratch); - __ LoadOnConditionP(eq, kSpeculationPoisonRegister, r0); -} - -void CodeGenerator::AssembleRegisterArgumentPoisoning() { - __ AndP(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister); - __ AndP(kContextRegister, kContextRegister, kSpeculationPoisonRegister); - __ AndP(sp, sp, kSpeculationPoisonRegister); -} - // Assembles an instruction after register allocation, producing machine code. CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Instruction* instr) { @@ -1395,10 +1367,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Operand(offset.offset())); break; } - case kArchWordPoisonOnSpeculation: - DCHECK_EQ(i.OutputRegister(), i.InputRegister(0)); - __ AndP(i.InputRegister(0), kSpeculationPoisonRegister); - break; case kS390_Peek: { int reverse_slot = i.InputInt32(0); int offset = @@ -2155,7 +2123,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( break; case kS390_LoadWordS8: ASSEMBLE_LOAD_INTEGER(LoadS8); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kS390_BitcastFloat32ToInt32: ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadU32), nullInstr); @@ -2173,35 +2140,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( #endif case kS390_LoadWordU8: ASSEMBLE_LOAD_INTEGER(LoadU8); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kS390_LoadWordU16: ASSEMBLE_LOAD_INTEGER(LoadU16); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kS390_LoadWordS16: ASSEMBLE_LOAD_INTEGER(LoadS16); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kS390_LoadWordU32: ASSEMBLE_LOAD_INTEGER(LoadU32); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kS390_LoadWordS32: ASSEMBLE_LOAD_INTEGER(LoadS32); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kS390_LoadReverse16: ASSEMBLE_LOAD_INTEGER(lrvh); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kS390_LoadReverse32: ASSEMBLE_LOAD_INTEGER(lrv); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kS390_LoadReverse64: ASSEMBLE_LOAD_INTEGER(lrvg); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kS390_LoadReverse16RR: __ lrvr(i.OutputRegister(), i.InputRegister(0)); @@ -2238,7 +2197,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } case kS390_LoadWord64: ASSEMBLE_LOAD_INTEGER(lg); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; case kS390_LoadAndTestWord32: { ASSEMBLE_LOADANDTEST32(ltr, lt_z); @@ -2258,7 +2216,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( AddressingMode mode = kMode_None; MemOperand operand = i.MemoryOperand(&mode); __ vl(i.OutputSimd128Register(), operand, Condition(0)); - EmitWordLoadPoisoningIfNeeded(this, instr, i); break; } case kS390_StoreWord8: @@ -3541,20 +3498,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { if (!branch->fallthru) __ b(flabel); // no fallthru to flabel. } -void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, - Instruction* instr) { - // TODO(John) Handle float comparisons (kUnordered[Not]Equal). - if (condition == kUnorderedEqual || condition == kUnorderedNotEqual || - condition == kOverflow || condition == kNotOverflow) { - return; - } - - condition = NegateFlagsCondition(condition); - __ mov(r0, Operand::Zero()); - __ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop), - kSpeculationPoisonRegister, r0); -} - void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch) { AssembleArchBranch(instr, branch); @@ -3781,7 +3724,6 @@ void CodeGenerator::AssembleConstructFrame() { __ RecordComment("-- OSR entrypoint --"); osr_pc_offset_ = __ pc_offset(); required_slots -= osr_helper()->UnoptimizedFrameSlots(); - ResetSpeculationPoison(); } const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters(); diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc index bcf5a8dfff8ff0..27a0218b8513ba 100644 --- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc +++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc @@ -704,15 +704,9 @@ void InstructionSelector::VisitLoad(Node* node) { AddressingMode mode = g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count); opcode |= AddressingModeField::encode(mode); - if (node->opcode() == IrOpcode::kPoisonedLoad) { - CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); - opcode |= AccessModeField::encode(kMemoryAccessPoisoned); - } Emit(opcode, 1, outputs, input_count, inputs); } -void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } - void InstructionSelector::VisitProtectedLoad(Node* node) { // TODO(eholk) UNIMPLEMENTED(); diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc index 60a40fb4893bea..bb0bc293610013 100644 --- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc +++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc @@ -569,16 +569,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen, #endif // V8_ENABLE_WEBASSEMBLY -void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, - InstructionCode opcode, Instruction* instr, - X64OperandConverter const& i) { - const MemoryAccessMode access_mode = AccessModeField::decode(opcode); - if (access_mode == kMemoryAccessPoisoned) { - Register value = i.OutputRegister(); - codegen->tasm()->andq(value, kSpeculationPoisonRegister); - } -} - } // namespace #define ASSEMBLE_UNOP(asm_instr) \ @@ -1019,22 +1009,6 @@ void CodeGenerator::BailoutIfDeoptimized() { RelocInfo::CODE_TARGET, not_zero); } -void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() { - // Set a mask which has all bits set in the normal case, but has all - // bits cleared if we are speculatively executing the wrong PC. - __ ComputeCodeStartAddress(rbx); - __ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister); - __ cmpq(kJavaScriptCallCodeStartRegister, rbx); - __ Move(rbx, -1); - __ cmovq(equal, kSpeculationPoisonRegister, rbx); -} - -void CodeGenerator::AssembleRegisterArgumentPoisoning() { - __ andq(kJSFunctionRegister, kSpeculationPoisonRegister); - __ andq(kContextRegister, kSpeculationPoisonRegister); - __ andq(rsp, kSpeculationPoisonRegister); -} - // Assembles an instruction after register allocation, producing machine code. CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( Instruction* instr) { @@ -1052,11 +1026,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); __ LoadCodeObjectEntry(reg, reg); - if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) { - __ RetpolineCall(reg); - } else { - __ call(reg); - } + __ call(reg); } RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); @@ -1078,19 +1048,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) { __ near_call(wasm_code, constant.rmode()); } else { - if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) { - __ RetpolineCall(wasm_code, constant.rmode()); - } else { - __ Call(wasm_code, constant.rmode()); - } + __ Call(wasm_code, constant.rmode()); } } else { - Register reg = i.InputRegister(0); - if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) { - __ RetpolineCall(reg); - } else { - __ call(reg); - } + __ call(i.InputRegister(0)); } RecordCallPosition(instr); frame_access_state()->ClearSPDelta(); @@ -1107,12 +1068,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ jmp(kScratchRegister); } } else { - Register reg = i.InputRegister(0); - if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) { - __ RetpolineJump(reg); - } else { - __ jmp(reg); - } + __ jmp(i.InputRegister(0)); } unwinding_info_writer_.MarkBlockWillExit(); frame_access_state()->ClearSPDelta(); @@ -1130,11 +1086,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); __ LoadCodeObjectEntry(reg, reg); - if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) { - __ RetpolineJump(reg); - } else { - __ jmp(reg); - } + __ jmp(reg); } unwinding_info_writer_.MarkBlockWillExit(); frame_access_state()->ClearSPDelta(); @@ -1147,11 +1099,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DCHECK_IMPLIES( instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister), reg == kJavaScriptCallCodeStartRegister); - if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) { - __ RetpolineJump(reg); - } else { - __ jmp(reg); - } + __ jmp(reg); unwinding_info_writer_.MarkBlockWillExit(); frame_access_state()->ClearSPDelta(); frame_access_state()->SetFrameAccessToDefault(); @@ -1368,10 +1316,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DetermineStubCallMode(), kTaggedSize); break; } - case kArchWordPoisonOnSpeculation: - DCHECK_EQ(i.OutputRegister(), i.InputRegister(0)); - __ andq(i.InputRegister(0), kSpeculationPoisonRegister); - break; case kX64MFence: __ mfence(); break; @@ -2180,24 +2124,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxbl); __ AssertZeroExtended(i.OutputRegister()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movzxbl: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movzxbl); __ AssertZeroExtended(i.OutputRegister()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movsxbq: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxbq); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movzxbq: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movzxbq); __ AssertZeroExtended(i.OutputRegister()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movb: { EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); @@ -2214,20 +2154,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), kInt8Size); } - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64Movsxwl: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxwl); __ AssertZeroExtended(i.OutputRegister()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movzxwl: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movzxwl); __ AssertZeroExtended(i.OutputRegister()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movsxwq: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); @@ -2237,7 +2174,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movzxwq); __ AssertZeroExtended(i.OutputRegister()); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movw: { EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); @@ -2254,7 +2190,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), kInt16Size); } - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64Movl: @@ -2288,12 +2223,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DetermineStubCallMode(), kInt32Size); } } - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movsxlq: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); ASSEMBLE_MOVX(movsxlq); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64MovqDecompressTaggedSigned: { CHECK(instr->HasOutput()); @@ -2301,7 +2234,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ DecompressTaggedSigned(i.OutputRegister(), address); EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i, DetermineStubCallMode(), kTaggedSize); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64MovqDecompressTaggedPointer: { @@ -2310,7 +2242,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ DecompressTaggedPointer(i.OutputRegister(), address); EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i, DetermineStubCallMode(), kTaggedSize); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64MovqDecompressAnyTagged: { @@ -2319,7 +2250,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( __ DecompressAnyTagged(i.OutputRegister(), address); EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i, DetermineStubCallMode(), kTaggedSize); - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; } case kX64MovqCompressTagged: { @@ -2361,7 +2291,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( DetermineStubCallMode(), kInt64Size); } } - EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i); break; case kX64Movss: EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); @@ -2376,17 +2305,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( case kX64Movsd: { EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); if (instr->HasOutput()) { - const MemoryAccessMode access_mode = AccessModeField::decode(opcode); - if (access_mode == kMemoryAccessPoisoned) { - // If we have to poison the loaded value, we load into a general - // purpose register first, mask it with the poison, and move the - // value from the general purpose register into the double register. - __ movq(kScratchRegister, i.MemoryOperand()); - __ andq(kScratchRegister, kSpeculationPoisonRegister); - __ Movq(i.OutputDoubleRegister(), kScratchRegister); - } else { - __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand()); - } + __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand()); } else { size_t index = 0; Operand operand = i.MemoryOperand(&index); @@ -4462,19 +4381,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { if (!branch->fallthru) __ jmp(flabel, flabel_distance); } -void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition, - Instruction* instr) { - // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal). - if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) { - return; - } - - condition = NegateFlagsCondition(condition); - __ Move(kScratchRegister, 0); - __ cmovq(FlagsConditionToCondition(condition), kSpeculationPoisonRegister, - kScratchRegister); -} - void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr, BranchInfo* branch) { Label::Distance flabel_distance = @@ -4716,7 +4622,6 @@ void CodeGenerator::AssembleConstructFrame() { __ RecordComment("-- OSR entrypoint --"); osr_pc_offset_ = __ pc_offset(); required_slots -= static_cast(osr_helper()->UnoptimizedFrameSlots()); - ResetSpeculationPoison(); } const RegList saves = call_descriptor->CalleeSavedRegisters(); diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc index 53ee75064bb97c..5a5acb746e5c3d 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc @@ -471,9 +471,6 @@ void InstructionSelector::VisitLoad(Node* node, Node* value, InstructionCode code = opcode | AddressingModeField::encode(mode); if (node->opcode() == IrOpcode::kProtectedLoad) { code |= AccessModeField::encode(kMemoryAccessProtected); - } else if (node->opcode() == IrOpcode::kPoisonedLoad) { - CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison); - code |= AccessModeField::encode(kMemoryAccessPoisoned); } Emit(code, 1, outputs, input_count, inputs, temp_count, temps); } @@ -484,8 +481,6 @@ void InstructionSelector::VisitLoad(Node* node) { VisitLoad(node, node, GetLoadOpcode(load_rep)); } -void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); } - void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); } void InstructionSelector::VisitStore(Node* node) { @@ -1502,8 +1497,7 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) { } case IrOpcode::kLoad: case IrOpcode::kLoadImmutable: - case IrOpcode::kProtectedLoad: - case IrOpcode::kPoisonedLoad: { + case IrOpcode::kProtectedLoad: { // The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly // zero-extend to 64-bit on x64, so the zero-extension is a no-op. LoadRepresentation load_rep = LoadRepresentationOf(node->op()); diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc index a864012a7a662a..151534050391ec 100644 --- a/deps/v8/src/compiler/branch-elimination.cc +++ b/deps/v8/src/compiler/branch-elimination.cc @@ -135,7 +135,6 @@ Reduction BranchElimination::ReduceBranch(Node* node) { bool condition_value; // If we know the condition we can discard the branch. if (from_input.LookupCondition(condition, &branch, &condition_value)) { - MarkAsSafetyCheckIfNeeded(branch, node); for (Node* const use : node->uses()) { switch (use->opcode()) { case IrOpcode::kIfTrue: @@ -215,7 +214,6 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) { Node* branch; // If we know the condition we can discard the branch. if (conditions.LookupCondition(condition, &branch, &condition_value)) { - MarkAsSafetyCheckIfNeeded(branch, node); if (condition_is_true == condition_value) { // We don't update the conditions here, because we're replacing {node} // with the {control} node that already contains the right information. @@ -410,21 +408,6 @@ bool BranchElimination::ControlPathConditions::BlocksAndConditionsInvariant() { } #endif -void BranchElimination::MarkAsSafetyCheckIfNeeded(Node* branch, Node* node) { - // Check if {branch} is dead because we might have a stale side-table entry. - if (!branch->IsDead() && branch->opcode() != IrOpcode::kDead && - branch->opcode() != IrOpcode::kTrapIf && - branch->opcode() != IrOpcode::kTrapUnless) { - IsSafetyCheck branch_safety = IsSafetyCheckOf(branch->op()); - IsSafetyCheck combined_safety = - CombineSafetyChecks(branch_safety, IsSafetyCheckOf(node->op())); - if (branch_safety != combined_safety) { - NodeProperties::ChangeOp( - branch, common()->MarkAsSafetyCheck(branch->op(), combined_safety)); - } - } -} - Graph* BranchElimination::graph() const { return jsgraph()->graph(); } Isolate* BranchElimination::isolate() const { return jsgraph()->isolate(); } diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h index 9078c3903814cb..93bacbff7b30cd 100644 --- a/deps/v8/src/compiler/branch-elimination.h +++ b/deps/v8/src/compiler/branch-elimination.h @@ -114,7 +114,6 @@ class V8_EXPORT_PRIVATE BranchElimination final Reduction UpdateConditions(Node* node, ControlPathConditions prev_conditions, Node* current_condition, Node* current_branch, bool is_true_branch, bool in_new_block); - void MarkAsSafetyCheckIfNeeded(Node* branch, Node* node); Node* dead() const { return dead_; } Graph* graph() const; diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc index 985a256c57dd99..3a28b33b52b86e 100644 --- a/deps/v8/src/compiler/bytecode-graph-builder.cc +++ b/deps/v8/src/compiler/bytecode-graph-builder.cc @@ -141,9 +141,8 @@ class BytecodeGraphBuilder { Node* NewIfDefault() { return NewNode(common()->IfDefault()); } Node* NewMerge() { return NewNode(common()->Merge(1), true); } Node* NewLoop() { return NewNode(common()->Loop(1), true); } - Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone, - IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck) { - return NewNode(common()->Branch(hint, is_safety_check), condition); + Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) { + return NewNode(common()->Branch(hint), condition); } Node* NewSwitch(Node* condition, int control_output_count) { return NewNode(common()->Switch(control_output_count), condition); @@ -3959,7 +3958,7 @@ void BytecodeGraphBuilder::BuildJump() { } void BytecodeGraphBuilder::BuildJumpIf(Node* condition) { - NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck); + NewBranch(condition, BranchHint::kNone); { SubEnvironment sub_environment(this); NewIfTrue(); @@ -3971,7 +3970,7 @@ void BytecodeGraphBuilder::BuildJumpIf(Node* condition) { } void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) { - NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck); + NewBranch(condition, BranchHint::kNone); { SubEnvironment sub_environment(this); NewIfFalse(); @@ -3997,8 +3996,7 @@ void BytecodeGraphBuilder::BuildJumpIfNotEqual(Node* comperand) { } void BytecodeGraphBuilder::BuildJumpIfFalse() { - NewBranch(environment()->LookupAccumulator(), BranchHint::kNone, - IsSafetyCheck::kNoSafetyCheck); + NewBranch(environment()->LookupAccumulator(), BranchHint::kNone); { SubEnvironment sub_environment(this); NewIfFalse(); @@ -4012,8 +4010,7 @@ void BytecodeGraphBuilder::BuildJumpIfFalse() { } void BytecodeGraphBuilder::BuildJumpIfTrue() { - NewBranch(environment()->LookupAccumulator(), BranchHint::kNone, - IsSafetyCheck::kNoSafetyCheck); + NewBranch(environment()->LookupAccumulator(), BranchHint::kNone); { SubEnvironment sub_environment(this); NewIfTrue(); diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc index 2cbcce236fa1e0..63154ea6a064b9 100644 --- a/deps/v8/src/compiler/code-assembler.cc +++ b/deps/v8/src/compiler/code-assembler.cc @@ -48,8 +48,7 @@ static_assert( CodeAssemblerState::CodeAssemblerState( Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor, - CodeKind kind, const char* name, PoisoningMitigationLevel poisoning_level, - Builtin builtin) + CodeKind kind, const char* name, Builtin builtin) // TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for // bytecode handlers? : CodeAssemblerState( @@ -57,29 +56,26 @@ CodeAssemblerState::CodeAssemblerState( Linkage::GetStubCallDescriptor( zone, descriptor, descriptor.GetStackParameterCount(), CallDescriptor::kNoFlags, Operator::kNoProperties), - kind, name, poisoning_level, builtin) {} + kind, name, builtin) {} CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count, CodeKind kind, - const char* name, - PoisoningMitigationLevel poisoning_level, - Builtin builtin) + const char* name, Builtin builtin) : CodeAssemblerState( isolate, zone, Linkage::GetJSCallDescriptor(zone, false, parameter_count, CallDescriptor::kCanUseRoots), - kind, name, poisoning_level, builtin) {} + kind, name, builtin) {} CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor, CodeKind kind, const char* name, - PoisoningMitigationLevel poisoning_level, Builtin builtin) : raw_assembler_(new RawMachineAssembler( isolate, zone->New(zone), call_descriptor, MachineType::PointerRepresentation(), InstructionSelector::SupportedMachineOperatorFlags(), - InstructionSelector::AlignmentRequirements(), poisoning_level)), + InstructionSelector::AlignmentRequirements())), kind_(kind), name_(name), builtin_(builtin), @@ -169,10 +165,6 @@ bool CodeAssembler::Word32ShiftIsSafe() const { return raw_assembler()->machine()->Word32ShiftIsSafe(); } -PoisoningMitigationLevel CodeAssembler::poisoning_level() const { - return raw_assembler()->poisoning_level(); -} - // static Handle CodeAssembler::GenerateCode( CodeAssemblerState* state, const AssemblerOptions& options, @@ -187,7 +179,7 @@ Handle CodeAssembler::GenerateCode( code = Pipeline::GenerateCodeForCodeStub( rasm->isolate(), rasm->call_descriptor(), graph, state->jsgraph_, rasm->source_positions(), state->kind_, state->name_, - state->builtin_, rasm->poisoning_level(), options, profile_data) + state->builtin_, options, profile_data) .ToHandleChecked(); state->code_generated_ = true; @@ -565,15 +557,6 @@ TNode CodeAssembler::LoadParentFramePointer() { return UncheckedCast(raw_assembler()->LoadParentFramePointer()); } -TNode CodeAssembler::TaggedPoisonOnSpeculation(TNode value) { - return UncheckedCast( - raw_assembler()->TaggedPoisonOnSpeculation(value)); -} - -TNode CodeAssembler::WordPoisonOnSpeculation(TNode value) { - return UncheckedCast(raw_assembler()->WordPoisonOnSpeculation(value)); -} - #define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \ TNode CodeAssembler::name(TNode a, TNode b) { \ return UncheckedCast(raw_assembler()->name(a, b)); \ @@ -677,27 +660,23 @@ TNode CodeAssembler::TruncateFloat32ToInt32(TNode value) { CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP) #undef DEFINE_CODE_ASSEMBLER_UNARY_OP -Node* CodeAssembler::Load(MachineType type, Node* base, - LoadSensitivity needs_poisoning) { - return raw_assembler()->Load(type, base, needs_poisoning); +Node* CodeAssembler::Load(MachineType type, Node* base) { + return raw_assembler()->Load(type, base); } -Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset, - LoadSensitivity needs_poisoning) { - return raw_assembler()->Load(type, base, offset, needs_poisoning); +Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset) { + return raw_assembler()->Load(type, base, offset); } -TNode CodeAssembler::LoadFullTagged(Node* base, - LoadSensitivity needs_poisoning) { - return BitcastWordToTagged(Load(base, needs_poisoning)); +TNode CodeAssembler::LoadFullTagged(Node* base) { + return BitcastWordToTagged(Load(base)); } -TNode CodeAssembler::LoadFullTagged(Node* base, TNode offset, - LoadSensitivity needs_poisoning) { +TNode CodeAssembler::LoadFullTagged(Node* base, TNode offset) { // Please use LoadFromObject(MachineType::MapInHeader(), object, // IntPtrConstant(-kHeapObjectTag)) instead. DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset)); - return BitcastWordToTagged(Load(base, offset, needs_poisoning)); + return BitcastWordToTagged(Load(base, offset)); } Node* CodeAssembler::AtomicLoad(MachineType type, TNode base, diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h index 0e6872aa66e22f..35d860db22c82d 100644 --- a/deps/v8/src/compiler/code-assembler.h +++ b/deps/v8/src/compiler/code-assembler.h @@ -725,32 +725,22 @@ class V8_EXPORT_PRIVATE CodeAssembler { TNode LoadFramePointer(); TNode LoadParentFramePointer(); - // Poison |value| on speculative paths. - TNode TaggedPoisonOnSpeculation(TNode value); - TNode WordPoisonOnSpeculation(TNode value); - // Load raw memory location. - Node* Load(MachineType type, Node* base, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); + Node* Load(MachineType type, Node* base); template TNode Load(MachineType type, TNode> base) { DCHECK( IsSubtype(type.representation(), MachineRepresentationOf::value)); return UncheckedCast(Load(type, static_cast(base))); } - Node* Load(MachineType type, Node* base, Node* offset, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); + Node* Load(MachineType type, Node* base, Node* offset); template - TNode Load(Node* base, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { - return UncheckedCast( - Load(MachineTypeOf::value, base, needs_poisoning)); + TNode Load(Node* base) { + return UncheckedCast(Load(MachineTypeOf::value, base)); } template - TNode Load(Node* base, TNode offset, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { - return UncheckedCast( - Load(MachineTypeOf::value, base, offset, needs_poisoning)); + TNode Load(Node* base, TNode offset) { + return UncheckedCast(Load(MachineTypeOf::value, base, offset)); } template TNode AtomicLoad(TNode base, TNode offset) { @@ -761,11 +751,8 @@ class V8_EXPORT_PRIVATE CodeAssembler { TNode AtomicLoad64(TNode base, TNode offset); // Load uncompressed tagged value from (most likely off JS heap) memory // location. - TNode LoadFullTagged( - Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); - TNode LoadFullTagged( - Node* base, TNode offset, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); + TNode LoadFullTagged(Node* base); + TNode LoadFullTagged(Node* base, TNode offset); Node* LoadFromObject(MachineType type, TNode object, TNode offset); @@ -1312,7 +1299,6 @@ class V8_EXPORT_PRIVATE CodeAssembler { void UnregisterCallGenerationCallbacks(); bool Word32ShiftIsSafe() const; - PoisoningMitigationLevel poisoning_level() const; bool IsJSFunctionCall() const; @@ -1595,13 +1581,11 @@ class V8_EXPORT_PRIVATE CodeAssemblerState { // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor. CodeAssemblerState(Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor, CodeKind kind, - const char* name, PoisoningMitigationLevel poisoning_level, - Builtin builtin = Builtin::kNoBuiltinId); + const char* name, Builtin builtin = Builtin::kNoBuiltinId); // Create with JSCall linkage. CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count, CodeKind kind, const char* name, - PoisoningMitigationLevel poisoning_level, Builtin builtin = Builtin::kNoBuiltinId); ~CodeAssemblerState(); @@ -1628,8 +1612,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerState { CodeAssemblerState(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor, CodeKind kind, - const char* name, PoisoningMitigationLevel poisoning_level, - Builtin builtin); + const char* name, Builtin builtin); void PushExceptionHandler(CodeAssemblerExceptionHandlerLabel* label); void PopExceptionHandler(); diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc index b370a673b96850..329ccc7e86ddae 100644 --- a/deps/v8/src/compiler/common-operator.cc +++ b/deps/v8/src/compiler/common-operator.cc @@ -28,18 +28,6 @@ std::ostream& operator<<(std::ostream& os, BranchHint hint) { UNREACHABLE(); } -std::ostream& operator<<(std::ostream& os, IsSafetyCheck is_safety_check) { - switch (is_safety_check) { - case IsSafetyCheck::kCriticalSafetyCheck: - return os << "CriticalSafetyCheck"; - case IsSafetyCheck::kSafetyCheck: - return os << "SafetyCheck"; - case IsSafetyCheck::kNoSafetyCheck: - return os << "NoSafetyCheck"; - } - UNREACHABLE(); -} - std::ostream& operator<<(std::ostream& os, TrapId trap_id) { switch (trap_id) { #define TRAP_CASE(Name) \ @@ -59,22 +47,12 @@ TrapId TrapIdOf(const Operator* const op) { return OpParameter(op); } -std::ostream& operator<<(std::ostream& os, BranchOperatorInfo info) { - return os << info.hint << ", " << info.is_safety_check; -} - -const BranchOperatorInfo& BranchOperatorInfoOf(const Operator* const op) { - DCHECK_EQ(IrOpcode::kBranch, op->opcode()); - return OpParameter(op); -} - BranchHint BranchHintOf(const Operator* const op) { switch (op->opcode()) { - case IrOpcode::kBranch: - return BranchOperatorInfoOf(op).hint; case IrOpcode::kIfValue: return IfValueParametersOf(op).hint(); case IrOpcode::kIfDefault: + case IrOpcode::kBranch: return OpParameter(op); default: UNREACHABLE(); @@ -90,8 +68,7 @@ int ValueInputCountOfReturn(Operator const* const op) { bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) { return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason() && - lhs.feedback() == rhs.feedback() && - lhs.is_safety_check() == rhs.is_safety_check(); + lhs.feedback() == rhs.feedback(); } bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) { @@ -100,13 +77,11 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) { size_t hash_value(DeoptimizeParameters p) { FeedbackSource::Hash feebdack_hash; - return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()), - p.is_safety_check()); + return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback())); } std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) { - return os << p.kind() << ", " << p.reason() << ", " << p.is_safety_check() - << ", " << p.feedback(); + return os << p.kind() << ", " << p.reason() << ", " << p.feedback(); } DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) { @@ -117,32 +92,6 @@ DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) { return OpParameter(op); } -IsSafetyCheck IsSafetyCheckOf(const Operator* op) { - if (op->opcode() == IrOpcode::kBranch) { - return BranchOperatorInfoOf(op).is_safety_check; - } - return DeoptimizeParametersOf(op).is_safety_check(); -} - -const Operator* CommonOperatorBuilder::MarkAsSafetyCheck( - const Operator* op, IsSafetyCheck safety_check) { - if (op->opcode() == IrOpcode::kBranch) { - BranchOperatorInfo info = BranchOperatorInfoOf(op); - if (info.is_safety_check == safety_check) return op; - return Branch(info.hint, safety_check); - } - DeoptimizeParameters p = DeoptimizeParametersOf(op); - if (p.is_safety_check() == safety_check) return op; - switch (op->opcode()) { - case IrOpcode::kDeoptimizeIf: - return DeoptimizeIf(p.kind(), p.reason(), p.feedback(), safety_check); - case IrOpcode::kDeoptimizeUnless: - return DeoptimizeUnless(p.kind(), p.reason(), p.feedback(), safety_check); - default: - UNREACHABLE(); - } -} - const Operator* CommonOperatorBuilder::DelayedStringConstant( const StringConstantBase* str) { return zone()->New>( @@ -478,16 +427,10 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) { #define CACHED_LOOP_EXIT_VALUE_LIST(V) V(kTagged) -#define CACHED_BRANCH_LIST(V) \ - V(None, CriticalSafetyCheck) \ - V(True, CriticalSafetyCheck) \ - V(False, CriticalSafetyCheck) \ - V(None, SafetyCheck) \ - V(True, SafetyCheck) \ - V(False, SafetyCheck) \ - V(None, NoSafetyCheck) \ - V(True, NoSafetyCheck) \ - V(False, NoSafetyCheck) +#define CACHED_BRANCH_LIST(V) \ + V(None) \ + V(True) \ + V(False) #define CACHED_RETURN_LIST(V) \ V(1) \ @@ -541,28 +484,22 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) { V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \ V(Soft, InsufficientTypeFeedbackForGenericNamedAccess) -#define CACHED_DEOPTIMIZE_IF_LIST(V) \ - V(Eager, DivisionByZero, NoSafetyCheck) \ - V(Eager, DivisionByZero, SafetyCheck) \ - V(Eager, Hole, NoSafetyCheck) \ - V(Eager, Hole, SafetyCheck) \ - V(Eager, MinusZero, NoSafetyCheck) \ - V(Eager, MinusZero, SafetyCheck) \ - V(Eager, Overflow, NoSafetyCheck) \ - V(Eager, Overflow, SafetyCheck) \ - V(Eager, Smi, SafetyCheck) - -#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \ - V(Eager, LostPrecision, NoSafetyCheck) \ - V(Eager, LostPrecision, SafetyCheck) \ - V(Eager, LostPrecisionOrNaN, NoSafetyCheck) \ - V(Eager, LostPrecisionOrNaN, SafetyCheck) \ - V(Eager, NotAHeapNumber, SafetyCheck) \ - V(Eager, NotANumberOrOddball, SafetyCheck) \ - V(Eager, NotASmi, SafetyCheck) \ - V(Eager, OutOfBounds, SafetyCheck) \ - V(Eager, WrongInstanceType, SafetyCheck) \ - V(Eager, WrongMap, SafetyCheck) +#define CACHED_DEOPTIMIZE_IF_LIST(V) \ + V(Eager, DivisionByZero) \ + V(Eager, Hole) \ + V(Eager, MinusZero) \ + V(Eager, Overflow) \ + V(Eager, Smi) + +#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \ + V(Eager, LostPrecision) \ + V(Eager, LostPrecisionOrNaN) \ + V(Eager, NotAHeapNumber) \ + V(Eager, NotANumberOrOddball) \ + V(Eager, NotASmi) \ + V(Eager, OutOfBounds) \ + V(Eager, WrongInstanceType) \ + V(Eager, WrongMap) #define CACHED_DYNAMIC_CHECK_MAPS_LIST(V) \ V(DynamicCheckMaps) \ @@ -668,18 +605,17 @@ struct CommonOperatorGlobalCache final { CACHED_RETURN_LIST(CACHED_RETURN) #undef CACHED_RETURN - template - struct BranchOperator final : public Operator1 { + template + struct BranchOperator final : public Operator1 { BranchOperator() - : Operator1( // -- - IrOpcode::kBranch, Operator::kKontrol, // opcode - "Branch", // name - 1, 0, 1, 0, 0, 2, // counts - BranchOperatorInfo{hint, is_safety_check}) {} // parameter + : Operator1( // -- + IrOpcode::kBranch, Operator::kKontrol, // opcode + "Branch", // name + 1, 0, 1, 0, 0, 2, // counts + hint) {} // parameter }; -#define CACHED_BRANCH(Hint, IsCheck) \ - BranchOperator \ - kBranch##Hint##IsCheck##Operator; +#define CACHED_BRANCH(Hint) \ + BranchOperator kBranch##Hint##Operator; CACHED_BRANCH_LIST(CACHED_BRANCH) #undef CACHED_BRANCH @@ -757,8 +693,7 @@ struct CommonOperatorGlobalCache final { Operator::kFoldable | Operator::kNoThrow, // properties "Deoptimize", // name 1, 1, 1, 0, 0, 1, // counts - DeoptimizeParameters(kKind, kReason, FeedbackSource(), - IsSafetyCheck::kNoSafetyCheck)) {} + DeoptimizeParameters(kKind, kReason, FeedbackSource())) {} }; #define CACHED_DEOPTIMIZE(Kind, Reason) \ DeoptimizeOperator \ @@ -766,8 +701,7 @@ struct CommonOperatorGlobalCache final { CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE) #undef CACHED_DEOPTIMIZE - template + template struct DeoptimizeIfOperator final : public Operator1 { DeoptimizeIfOperator() : Operator1( // -- @@ -775,18 +709,15 @@ struct CommonOperatorGlobalCache final { Operator::kFoldable | Operator::kNoThrow, // properties "DeoptimizeIf", // name 2, 1, 1, 0, 1, 1, // counts - DeoptimizeParameters(kKind, kReason, FeedbackSource(), - is_safety_check)) {} + DeoptimizeParameters(kKind, kReason, FeedbackSource())) {} }; -#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \ - DeoptimizeIfOperator \ - kDeoptimizeIf##Kind##Reason##IsCheck##Operator; +#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \ + DeoptimizeIfOperator \ + kDeoptimizeIf##Kind##Reason##Operator; CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF) #undef CACHED_DEOPTIMIZE_IF - template + template struct DeoptimizeUnlessOperator final : public Operator1 { DeoptimizeUnlessOperator() @@ -795,14 +726,12 @@ struct CommonOperatorGlobalCache final { Operator::kFoldable | Operator::kNoThrow, // properties "DeoptimizeUnless", // name 2, 1, 1, 0, 1, 1, // counts - DeoptimizeParameters(kKind, kReason, FeedbackSource(), - is_safety_check)) {} + DeoptimizeParameters(kKind, kReason, FeedbackSource())) {} }; -#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \ +#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \ DeoptimizeUnlessOperator \ - kDeoptimizeUnless##Kind##Reason##IsCheck##Operator; + DeoptimizeReason::k##Reason> \ + kDeoptimizeUnless##Kind##Reason##Operator; CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS) #undef CACHED_DEOPTIMIZE_UNLESS @@ -815,8 +744,7 @@ struct CommonOperatorGlobalCache final { "DynamicCheckMapsWithDeoptUnless", // name 6, 1, 1, 0, 1, 1, // counts DeoptimizeParameters(DeoptimizeKind::kEagerWithResume, kReason, - FeedbackSource(), - IsSafetyCheck::kCriticalSafetyCheck)) {} + FeedbackSource())) {} }; #define CACHED_DYNAMIC_CHECK_MAPS(Reason) \ DynamicMapCheckOperator k##Reason##Operator; @@ -985,12 +913,10 @@ const Operator* CommonOperatorBuilder::StaticAssert(const char* source) { 1, 0, source); } -const Operator* CommonOperatorBuilder::Branch(BranchHint hint, - IsSafetyCheck is_safety_check) { -#define CACHED_BRANCH(Hint, IsCheck) \ - if (hint == BranchHint::k##Hint && \ - is_safety_check == IsSafetyCheck::k##IsCheck) { \ - return &cache_.kBranch##Hint##IsCheck##Operator; \ +const Operator* CommonOperatorBuilder::Branch(BranchHint hint) { +#define CACHED_BRANCH(Hint) \ + if (hint == BranchHint::k##Hint) { \ + return &cache_.kBranch##Hint##Operator; \ } CACHED_BRANCH_LIST(CACHED_BRANCH) #undef CACHED_BRANCH @@ -1008,8 +934,7 @@ const Operator* CommonOperatorBuilder::Deoptimize( CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE) #undef CACHED_DEOPTIMIZE // Uncached - DeoptimizeParameters parameter(kind, reason, feedback, - IsSafetyCheck::kNoSafetyCheck); + DeoptimizeParameters parameter(kind, reason, feedback); return zone()->New>( // -- IrOpcode::kDeoptimize, // opcodes Operator::kFoldable | Operator::kNoThrow, // properties @@ -1020,17 +945,16 @@ const Operator* CommonOperatorBuilder::Deoptimize( const Operator* CommonOperatorBuilder::DeoptimizeIf( DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback, IsSafetyCheck is_safety_check) { -#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \ - if (kind == DeoptimizeKind::k##Kind && \ - reason == DeoptimizeReason::k##Reason && \ - is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \ - return &cache_.kDeoptimizeIf##Kind##Reason##IsCheck##Operator; \ + FeedbackSource const& feedback) { +#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \ + if (kind == DeoptimizeKind::k##Kind && \ + reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \ + return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \ } CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF) #undef CACHED_DEOPTIMIZE_IF // Uncached - DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check); + DeoptimizeParameters parameter(kind, reason, feedback); return zone()->New>( // -- IrOpcode::kDeoptimizeIf, // opcode Operator::kFoldable | Operator::kNoThrow, // properties @@ -1041,17 +965,16 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf( const Operator* CommonOperatorBuilder::DeoptimizeUnless( DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback, IsSafetyCheck is_safety_check) { -#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \ - if (kind == DeoptimizeKind::k##Kind && \ - reason == DeoptimizeReason::k##Reason && \ - is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \ - return &cache_.kDeoptimizeUnless##Kind##Reason##IsCheck##Operator; \ + FeedbackSource const& feedback) { +#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \ + if (kind == DeoptimizeKind::k##Kind && \ + reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \ + return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \ } CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS) #undef CACHED_DEOPTIMIZE_UNLESS // Uncached - DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check); + DeoptimizeParameters parameter(kind, reason, feedback); return zone()->New>( // -- IrOpcode::kDeoptimizeUnless, // opcode Operator::kFoldable | Operator::kNoThrow, // properties @@ -1664,17 +1587,6 @@ const FrameStateInfo& FrameStateInfoOf(const Operator* op) { return OpParameter(op); } -IsSafetyCheck CombineSafetyChecks(IsSafetyCheck a, IsSafetyCheck b) { - if (a == IsSafetyCheck::kCriticalSafetyCheck || - b == IsSafetyCheck::kCriticalSafetyCheck) { - return IsSafetyCheck::kCriticalSafetyCheck; - } - if (a == IsSafetyCheck::kSafetyCheck || b == IsSafetyCheck::kSafetyCheck) { - return IsSafetyCheck::kSafetyCheck; - } - return IsSafetyCheck::kNoSafetyCheck; -} - #undef COMMON_CACHED_OP_LIST #undef CACHED_BRANCH_LIST #undef CACHED_RETURN_LIST diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h index fa49d3b9920157..f691c1fbf46921 100644 --- a/deps/v8/src/compiler/common-operator.h +++ b/deps/v8/src/compiler/common-operator.h @@ -51,20 +51,6 @@ inline size_t hash_value(BranchHint hint) { return static_cast(hint); } V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchHint); -enum class IsSafetyCheck : uint8_t { - kCriticalSafetyCheck, - kSafetyCheck, - kNoSafetyCheck -}; - -// Get the more critical safety check of the two arguments. -IsSafetyCheck CombineSafetyChecks(IsSafetyCheck, IsSafetyCheck); - -V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IsSafetyCheck); -inline size_t hash_value(IsSafetyCheck is_safety_check) { - return static_cast(is_safety_check); -} - enum class TrapId : uint32_t { #define DEF_ENUM(Name, ...) k##Name, FOREACH_WASM_TRAPREASON(DEF_ENUM) @@ -78,24 +64,6 @@ std::ostream& operator<<(std::ostream&, TrapId trap_id); TrapId TrapIdOf(const Operator* const op); -struct BranchOperatorInfo { - BranchHint hint; - IsSafetyCheck is_safety_check; -}; - -inline size_t hash_value(const BranchOperatorInfo& info) { - return base::hash_combine(info.hint, info.is_safety_check); -} - -V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchOperatorInfo); - -inline bool operator==(const BranchOperatorInfo& a, - const BranchOperatorInfo& b) { - return a.hint == b.hint && a.is_safety_check == b.is_safety_check; -} - -V8_EXPORT_PRIVATE const BranchOperatorInfo& BranchOperatorInfoOf( - const Operator* const) V8_WARN_UNUSED_RESULT; V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const) V8_WARN_UNUSED_RESULT; @@ -106,23 +74,17 @@ int ValueInputCountOfReturn(Operator const* const op); class DeoptimizeParameters final { public: DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback, - IsSafetyCheck is_safety_check) - : kind_(kind), - reason_(reason), - feedback_(feedback), - is_safety_check_(is_safety_check) {} + FeedbackSource const& feedback) + : kind_(kind), reason_(reason), feedback_(feedback) {} DeoptimizeKind kind() const { return kind_; } DeoptimizeReason reason() const { return reason_; } const FeedbackSource& feedback() const { return feedback_; } - IsSafetyCheck is_safety_check() const { return is_safety_check_; } private: DeoptimizeKind const kind_; DeoptimizeReason const reason_; FeedbackSource const feedback_; - IsSafetyCheck is_safety_check_; }; bool operator==(DeoptimizeParameters, DeoptimizeParameters); @@ -135,8 +97,6 @@ std::ostream& operator<<(std::ostream&, DeoptimizeParameters p); DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const) V8_WARN_UNUSED_RESULT; -IsSafetyCheck IsSafetyCheckOf(const Operator* op) V8_WARN_UNUSED_RESULT; - class SelectParameters final { public: explicit SelectParameters(MachineRepresentation representation, @@ -479,8 +439,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final const Operator* Unreachable(); const Operator* StaticAssert(const char* source); const Operator* End(size_t control_input_count); - const Operator* Branch(BranchHint = BranchHint::kNone, - IsSafetyCheck = IsSafetyCheck::kSafetyCheck); + const Operator* Branch(BranchHint = BranchHint::kNone); const Operator* IfTrue(); const Operator* IfFalse(); const Operator* IfSuccess(); @@ -492,14 +451,10 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final const Operator* Throw(); const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason, FeedbackSource const& feedback); - const Operator* DeoptimizeIf( - DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback, - IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck); - const Operator* DeoptimizeUnless( - DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback, - IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck); + const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason, + FeedbackSource const& feedback); + const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason, + FeedbackSource const& feedback); // DynamicCheckMapsWithDeoptUnless will call the dynamic map check builtin if // the condition is false, which may then either deoptimize or resume // execution. @@ -577,9 +532,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final const wasm::FunctionSig* signature); #endif // V8_ENABLE_WEBASSEMBLY - const Operator* MarkAsSafetyCheck(const Operator* op, - IsSafetyCheck safety_check); - const Operator* DelayedStringConstant(const StringConstantBase* str); private: diff --git a/deps/v8/src/compiler/decompression-optimizer.cc b/deps/v8/src/compiler/decompression-optimizer.cc index 79e77fcee65e73..c0068489f75fc1 100644 --- a/deps/v8/src/compiler/decompression-optimizer.cc +++ b/deps/v8/src/compiler/decompression-optimizer.cc @@ -15,8 +15,7 @@ namespace { bool IsMachineLoad(Node* const node) { const IrOpcode::Value opcode = node->opcode(); - return opcode == IrOpcode::kLoad || opcode == IrOpcode::kPoisonedLoad || - opcode == IrOpcode::kProtectedLoad || + return opcode == IrOpcode::kLoad || opcode == IrOpcode::kProtectedLoad || opcode == IrOpcode::kUnalignedLoad || opcode == IrOpcode::kLoadImmutable; } @@ -212,10 +211,6 @@ void DecompressionOptimizer::ChangeLoad(Node* const node) { NodeProperties::ChangeOp(node, machine()->LoadImmutable(compressed_load_rep)); break; - case IrOpcode::kPoisonedLoad: - NodeProperties::ChangeOp(node, - machine()->PoisonedLoad(compressed_load_rep)); - break; case IrOpcode::kProtectedLoad: NodeProperties::ChangeOp(node, machine()->ProtectedLoad(compressed_load_rep)); diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc index d7a0ca62dd2110..a6bb7074b033f8 100644 --- a/deps/v8/src/compiler/effect-control-linearizer.cc +++ b/deps/v8/src/compiler/effect-control-linearizer.cc @@ -36,7 +36,6 @@ namespace internal { namespace compiler { enum class MaintainSchedule { kMaintain, kDiscard }; -enum class MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex }; class EffectControlLinearizer { public: @@ -44,13 +43,11 @@ class EffectControlLinearizer { JSGraphAssembler* graph_assembler, Zone* temp_zone, SourcePositionTable* source_positions, NodeOriginTable* node_origins, - MaskArrayIndexEnable mask_array_index, MaintainSchedule maintain_schedule, JSHeapBroker* broker) : js_graph_(js_graph), schedule_(schedule), temp_zone_(temp_zone), - mask_array_index_(mask_array_index), maintain_schedule_(maintain_schedule), source_positions_(source_positions), node_origins_(node_origins), @@ -80,7 +77,6 @@ class EffectControlLinearizer { Node* LowerChangeTaggedToUint32(Node* node); Node* LowerChangeTaggedToInt64(Node* node); Node* LowerChangeTaggedToTaggedSigned(Node* node); - Node* LowerPoisonIndex(Node* node); Node* LowerCheckInternalizedString(Node* node, Node* frame_state); void LowerCheckMaps(Node* node, Node* frame_state); void LowerDynamicCheckMaps(Node* node, Node* frame_state); @@ -338,7 +334,6 @@ class EffectControlLinearizer { JSGraph* js_graph_; Schedule* schedule_; Zone* temp_zone_; - MaskArrayIndexEnable mask_array_index_; MaintainSchedule maintain_schedule_; RegionObservability region_observability_ = RegionObservability::kObservable; SourcePositionTable* source_positions_; @@ -966,9 +961,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node, case IrOpcode::kTruncateTaggedToFloat64: result = LowerTruncateTaggedToFloat64(node); break; - case IrOpcode::kPoisonIndex: - result = LowerPoisonIndex(node); - break; case IrOpcode::kCheckClosure: result = LowerCheckClosure(node, frame_state); break; @@ -1788,14 +1780,6 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) { return done.PhiAt(0); } -Node* EffectControlLinearizer::LowerPoisonIndex(Node* node) { - Node* index = node->InputAt(0); - if (mask_array_index_ == MaskArrayIndexEnable::kMaskArrayIndex) { - index = __ Word32PoisonOnSpeculation(index); - } - return index; -} - Node* EffectControlLinearizer::LowerCheckClosure(Node* node, Node* frame_state) { Handle feedback_cell = FeedbackCellOf(node->op()); @@ -1831,8 +1815,7 @@ void EffectControlLinearizer::MigrateInstanceOrDeopt( __ Word32And(bitfield3, __ Int32Constant(Map::Bits3::IsDeprecatedBit::kMask)), __ Int32Constant(0)); - __ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state, - IsSafetyCheck::kCriticalSafetyCheck); + __ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state); Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow; Runtime::FunctionId id = Runtime::kTryMigrateInstance; auto call_descriptor = Linkage::GetRuntimeCallDescriptor( @@ -1842,7 +1825,7 @@ void EffectControlLinearizer::MigrateInstanceOrDeopt( __ Int32Constant(1), __ NoContextConstant()); Node* check = ObjectIsSmi(result); __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, feedback_source, - check, frame_state, IsSafetyCheck::kCriticalSafetyCheck); + check, frame_state); } void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) { @@ -1886,7 +1869,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) { Node* check = __ TaggedEqual(value_map, map); if (i == map_count - 1) { __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check, - frame_state, IsSafetyCheck::kCriticalSafetyCheck); + frame_state); } else { auto next_map = __ MakeLabel(); __ BranchWithCriticalSafetyCheck(check, &done, &next_map); @@ -1908,7 +1891,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) { if (i == map_count - 1) { __ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check, - frame_state, IsSafetyCheck::kCriticalSafetyCheck); + frame_state); } else { auto next_map = __ MakeLabel(); __ BranchWithCriticalSafetyCheck(check, &done, &next_map); @@ -2528,8 +2511,8 @@ Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node, Node* check = __ Uint32LessThan(index, limit); if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) { __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, - params.check_parameters().feedback(), check, frame_state, - IsSafetyCheck::kCriticalSafetyCheck); + params.check_parameters().feedback(), check, + frame_state); } else { auto if_abort = __ MakeDeferredLabel(); auto done = __ MakeLabel(); @@ -2574,8 +2557,8 @@ Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node, Node* check = __ Uint64LessThan(index, limit); if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) { __ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds, - params.check_parameters().feedback(), check, frame_state, - IsSafetyCheck::kCriticalSafetyCheck); + params.check_parameters().feedback(), check, + frame_state); } else { auto if_abort = __ MakeDeferredLabel(); auto done = __ MakeLabel(); @@ -5776,8 +5759,7 @@ Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) { Node* data_ptr = BuildTypedArrayDataPointer(base, external); // Perform the actual typed element access. - return __ LoadElement(AccessBuilder::ForTypedArrayElement( - array_type, true, LoadSensitivity::kCritical), + return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true), data_ptr, index); } @@ -6796,26 +6778,13 @@ Node* EffectControlLinearizer::BuildIsClearedWeakReference(Node* maybe_object) { #undef __ -namespace { - -MaskArrayIndexEnable MaskArrayForPoisonLevel( - PoisoningMitigationLevel poison_level) { - return (poison_level != PoisoningMitigationLevel::kDontPoison) - ? MaskArrayIndexEnable::kMaskArrayIndex - : MaskArrayIndexEnable::kDoNotMaskArrayIndex; -} - -} // namespace - void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone, SourcePositionTable* source_positions, NodeOriginTable* node_origins, - PoisoningMitigationLevel poison_level, JSHeapBroker* broker) { JSGraphAssembler graph_assembler_(graph, temp_zone, base::nullopt, nullptr); EffectControlLinearizer linearizer(graph, schedule, &graph_assembler_, temp_zone, source_positions, node_origins, - MaskArrayForPoisonLevel(poison_level), MaintainSchedule::kDiscard, broker); linearizer.Run(); } @@ -6824,16 +6793,13 @@ void LowerToMachineSchedule(JSGraph* js_graph, Schedule* schedule, Zone* temp_zone, SourcePositionTable* source_positions, NodeOriginTable* node_origins, - PoisoningMitigationLevel poison_level, JSHeapBroker* broker) { JSGraphAssembler graph_assembler(js_graph, temp_zone, base::nullopt, schedule); EffectControlLinearizer linearizer(js_graph, schedule, &graph_assembler, temp_zone, source_positions, node_origins, - MaskArrayForPoisonLevel(poison_level), MaintainSchedule::kMaintain, broker); - MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler, - poison_level); + MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler); SelectLowering select_lowering(&graph_assembler, js_graph->graph()); graph_assembler.AddInlineReducer(&memory_lowering); graph_assembler.AddInlineReducer(&select_lowering); diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h index fca4899263cd43..97467391e2af71 100644 --- a/deps/v8/src/compiler/effect-control-linearizer.h +++ b/deps/v8/src/compiler/effect-control-linearizer.h @@ -26,7 +26,7 @@ class JSHeapBroker; V8_EXPORT_PRIVATE void LinearizeEffectControl( JSGraph* graph, Schedule* schedule, Zone* temp_zone, SourcePositionTable* source_positions, NodeOriginTable* node_origins, - PoisoningMitigationLevel poison_level, JSHeapBroker* broker); + JSHeapBroker* broker); // Performs effect control linearization lowering in addition to machine // lowering, producing a scheduled graph that is ready for instruction @@ -34,7 +34,7 @@ V8_EXPORT_PRIVATE void LinearizeEffectControl( V8_EXPORT_PRIVATE void LowerToMachineSchedule( JSGraph* graph, Schedule* schedule, Zone* temp_zone, SourcePositionTable* source_positions, NodeOriginTable* node_origins, - PoisoningMitigationLevel poison_level, JSHeapBroker* broker); + JSHeapBroker* broker); } // namespace compiler } // namespace internal diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc index 26ae88362d8067..6bfd6f8c22356c 100644 --- a/deps/v8/src/compiler/graph-assembler.cc +++ b/deps/v8/src/compiler/graph-assembler.cc @@ -829,46 +829,36 @@ Node* GraphAssembler::BitcastMaybeObjectToWord(Node* value) { effect(), control())); } -Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) { - return AddNode(graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value, - effect(), control())); -} - Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason, FeedbackSource const& feedback, - Node* condition, Node* frame_state, - IsSafetyCheck is_safety_check) { - return AddNode( - graph()->NewNode(common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, - feedback, is_safety_check), - condition, frame_state, effect(), control())); + Node* condition, Node* frame_state) { + return AddNode(graph()->NewNode( + common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, feedback), + condition, frame_state, effect(), control())); } Node* GraphAssembler::DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason, FeedbackSource const& feedback, - Node* condition, Node* frame_state, - IsSafetyCheck is_safety_check) { - return AddNode(graph()->NewNode( - common()->DeoptimizeIf(kind, reason, feedback, is_safety_check), - condition, frame_state, effect(), control())); + Node* condition, Node* frame_state) { + return AddNode( + graph()->NewNode(common()->DeoptimizeIf(kind, reason, feedback), + condition, frame_state, effect(), control())); } Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeKind kind, DeoptimizeReason reason, FeedbackSource const& feedback, - Node* condition, Node* frame_state, - IsSafetyCheck is_safety_check) { - return AddNode(graph()->NewNode( - common()->DeoptimizeUnless(kind, reason, feedback, is_safety_check), - condition, frame_state, effect(), control())); + Node* condition, Node* frame_state) { + return AddNode( + graph()->NewNode(common()->DeoptimizeUnless(kind, reason, feedback), + condition, frame_state, effect(), control())); } Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason, FeedbackSource const& feedback, - Node* condition, Node* frame_state, - IsSafetyCheck is_safety_check) { + Node* condition, Node* frame_state) { return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, feedback, condition, - frame_state, is_safety_check); + frame_state); } Node* GraphAssembler::DynamicCheckMapsWithDeoptUnless(Node* condition, @@ -924,8 +914,7 @@ void GraphAssembler::BranchWithCriticalSafetyCheck( hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse; } - BranchImpl(condition, if_true, if_false, hint, - IsSafetyCheck::kCriticalSafetyCheck); + BranchImpl(condition, if_true, if_false, hint); } void GraphAssembler::RecordBranchInBlockUpdater(Node* branch, diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h index 5efe6dd9c3c3dd..c9ddd63e719254 100644 --- a/deps/v8/src/compiler/graph-assembler.h +++ b/deps/v8/src/compiler/graph-assembler.h @@ -330,24 +330,16 @@ class V8_EXPORT_PRIVATE GraphAssembler { Node* Retain(Node* buffer); Node* UnsafePointerAdd(Node* base, Node* external); - Node* Word32PoisonOnSpeculation(Node* value); - - Node* DeoptimizeIf( - DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition, - Node* frame_state, - IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck); - Node* DeoptimizeIf( - DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback, Node* condition, Node* frame_state, - IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck); - Node* DeoptimizeIfNot( - DeoptimizeKind kind, DeoptimizeReason reason, - FeedbackSource const& feedback, Node* condition, Node* frame_state, - IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck); - Node* DeoptimizeIfNot( - DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition, - Node* frame_state, - IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck); + Node* DeoptimizeIf(DeoptimizeReason reason, FeedbackSource const& feedback, + Node* condition, Node* frame_state); + Node* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason, + FeedbackSource const& feedback, Node* condition, + Node* frame_state); + Node* DeoptimizeIfNot(DeoptimizeKind kind, DeoptimizeReason reason, + FeedbackSource const& feedback, Node* condition, + Node* frame_state); + Node* DeoptimizeIfNot(DeoptimizeReason reason, FeedbackSource const& feedback, + Node* condition, Node* frame_state); Node* DynamicCheckMapsWithDeoptUnless(Node* condition, Node* slot_index, Node* map, Node* handler, Node* feedback_vector, @@ -557,7 +549,7 @@ class V8_EXPORT_PRIVATE GraphAssembler { void BranchImpl(Node* condition, GraphAssemblerLabel* if_true, GraphAssemblerLabel* if_false, - BranchHint hint, IsSafetyCheck is_safety_check, Vars...); + BranchHint hint, Vars...); void RecordBranchInBlockUpdater(Node* branch, Node* if_true_control, Node* if_false_control, BasicBlock* if_true_block, @@ -742,8 +734,7 @@ void GraphAssembler::Branch(Node* condition, hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse; } - BranchImpl(condition, if_true, if_false, hint, IsSafetyCheck::kNoSafetyCheck, - vars...); + BranchImpl(condition, if_true, if_false, hint, vars...); } template @@ -751,20 +742,17 @@ void GraphAssembler::BranchWithHint( Node* condition, GraphAssemblerLabel* if_true, GraphAssemblerLabel* if_false, BranchHint hint, Vars... vars) { - BranchImpl(condition, if_true, if_false, hint, IsSafetyCheck::kNoSafetyCheck, - vars...); + BranchImpl(condition, if_true, if_false, hint, vars...); } template void GraphAssembler::BranchImpl(Node* condition, GraphAssemblerLabel* if_true, GraphAssemblerLabel* if_false, - BranchHint hint, IsSafetyCheck is_safety_check, - Vars... vars) { + BranchHint hint, Vars... vars) { DCHECK_NOT_NULL(control()); - Node* branch = graph()->NewNode(common()->Branch(hint, is_safety_check), - condition, control()); + Node* branch = graph()->NewNode(common()->Branch(hint), condition, control()); Node* if_true_control = control_ = graph()->NewNode(common()->IfTrue(), branch); diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc index 3dcdc6a33ee2ed..32bdebad493a38 100644 --- a/deps/v8/src/compiler/js-call-reducer.cc +++ b/deps/v8/src/compiler/js-call-reducer.cc @@ -728,8 +728,7 @@ class IteratingArrayBuiltinReducerAssembler : public JSCallReducerAssembler { TNode elements = LoadField(AccessBuilder::ForJSObjectElements(), o); TNode value = LoadElement( - AccessBuilder::ForFixedArrayElement(kind, LoadSensitivity::kCritical), - elements, index); + AccessBuilder::ForFixedArrayElement(kind), elements, index); return std::make_pair(index, value); } @@ -6373,9 +6372,8 @@ Reduction JSCallReducer::ReduceStringPrototypeStringAt( index, receiver_length, effect, control); // Return the character from the {receiver} as single character string. - Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index); Node* value = effect = graph()->NewNode(string_access_operator, receiver, - masked_index, effect, control); + index, effect, control); ReplaceWithValue(node, value, effect, control); return Replace(value); @@ -6433,11 +6431,9 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) { Node* etrue = effect; Node* vtrue; { - Node* masked_position = graph()->NewNode( - simplified()->PoisonIndex(), unsigned_position); Node* string_first = etrue = graph()->NewNode(simplified()->StringCharCodeAt(), receiver, - masked_position, etrue, if_true); + unsigned_position, etrue, if_true); Node* search_first = jsgraph()->Constant(str.GetFirstChar().value()); @@ -6488,10 +6484,8 @@ Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) { index, receiver_length, effect, control); // Return the character from the {receiver} as single character string. - Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index); - Node* value = effect = - graph()->NewNode(simplified()->StringCharCodeAt(), receiver, masked_index, - effect, control); + Node* value = effect = graph()->NewNode(simplified()->StringCharCodeAt(), + receiver, index, effect, control); value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value); ReplaceWithValue(node, value, effect, control); diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc index 414977eb7db985..2d69b8dfca3f2d 100644 --- a/deps/v8/src/compiler/js-create-lowering.cc +++ b/deps/v8/src/compiler/js-create-lowering.cc @@ -1713,7 +1713,6 @@ base::Optional JSCreateLowering::TryAllocateFastLiteral( Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier, - LoadSensitivity::kUnsafe, const_field_info}; // Note: the use of RawInobjectPropertyAt (vs. the higher-level diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc index e03e0d41a31437..49edcc783a221d 100644 --- a/deps/v8/src/compiler/js-native-context-specialization.cc +++ b/deps/v8/src/compiler/js-native-context-specialization.cc @@ -2449,7 +2449,6 @@ JSNativeContextSpecialization::BuildPropertyStore( field_type, MachineType::TypeForRepresentation(field_representation), kFullWriteBarrier, - LoadSensitivity::kUnsafe, access_info.GetConstFieldInfo(), access_mode == AccessMode::kStoreInLiteral}; @@ -2483,7 +2482,6 @@ JSNativeContextSpecialization::BuildPropertyStore( Type::OtherInternal(), MachineType::TaggedPointer(), kPointerWriteBarrier, - LoadSensitivity::kUnsafe, access_info.GetConstFieldInfo(), access_mode == AccessMode::kStoreInLiteral}; storage = effect = @@ -2789,10 +2787,8 @@ JSNativeContextSpecialization::BuildElementAccess( if (situation == kHandleOOB_SmiCheckDone) { Node* check = graph()->NewNode(simplified()->NumberLessThan(), index, length); - Node* branch = graph()->NewNode( - common()->Branch(BranchHint::kTrue, - IsSafetyCheck::kCriticalSafetyCheck), - check, control); + Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), + check, control); Node* if_true = graph()->NewNode(common()->IfTrue(), branch); Node* etrue = effect; @@ -2980,10 +2976,9 @@ JSNativeContextSpecialization::BuildElementAccess( element_type = Type::SignedSmall(); element_machine_type = MachineType::TaggedSigned(); } - ElementAccess element_access = { - kTaggedBase, FixedArray::kHeaderSize, - element_type, element_machine_type, - kFullWriteBarrier, LoadSensitivity::kCritical}; + ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize, + element_type, element_machine_type, + kFullWriteBarrier}; // Access the actual element. if (keyed_mode.access_mode() == AccessMode::kLoad) { @@ -3003,10 +2998,8 @@ JSNativeContextSpecialization::BuildElementAccess( CanTreatHoleAsUndefined(receiver_maps)) { Node* check = graph()->NewNode(simplified()->NumberLessThan(), index, length); - Node* branch = graph()->NewNode( - common()->Branch(BranchHint::kTrue, - IsSafetyCheck::kCriticalSafetyCheck), - check, control); + Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), + check, control); Node* if_true = graph()->NewNode(common()->IfTrue(), branch); Node* etrue = effect; @@ -3289,9 +3282,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad( Node* check = graph()->NewNode(simplified()->NumberLessThan(), index, length); Node* branch = - graph()->NewNode(common()->Branch(BranchHint::kTrue, - IsSafetyCheck::kCriticalSafetyCheck), - check, *control); + graph()->NewNode(common()->Branch(BranchHint::kTrue), check, *control); Node* if_true = graph()->NewNode(common()->IfTrue(), branch); // Do a real bounds check against {length}. This is in order to protect @@ -3302,10 +3293,8 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad( CheckBoundsFlag::kConvertStringAndMinusZero | CheckBoundsFlag::kAbortOnOutOfBounds), index, length, *effect, if_true); - Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index); - Node* vtrue = etrue = - graph()->NewNode(simplified()->StringCharCodeAt(), receiver, - masked_index, etrue, if_true); + Node* vtrue = etrue = graph()->NewNode(simplified()->StringCharCodeAt(), + receiver, index, etrue, if_true); vtrue = graph()->NewNode(simplified()->StringFromSingleCharCode(), vtrue); Node* if_false = graph()->NewNode(common()->IfFalse(), branch); @@ -3323,12 +3312,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad( CheckBoundsFlag::kConvertStringAndMinusZero), index, length, *effect, *control); - Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index); - // Return the character from the {receiver} as single character string. - Node* value = *effect = - graph()->NewNode(simplified()->StringCharCodeAt(), receiver, - masked_index, *effect, *control); + Node* value = *effect = graph()->NewNode( + simplified()->StringCharCodeAt(), receiver, index, *effect, *control); value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value); return value; } diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h index 8b33444b294bf2..707c7d98ab8b04 100644 --- a/deps/v8/src/compiler/linkage.h +++ b/deps/v8/src/compiler/linkage.h @@ -214,15 +214,13 @@ class V8_EXPORT_PRIVATE CallDescriptor final kInitializeRootRegister = 1u << 3, // Does not ever try to allocate space on our heap. kNoAllocate = 1u << 4, - // Use retpoline for this call if indirect. - kRetpoline = 1u << 5, // Use the kJavaScriptCallCodeStartRegister (fixed) register for the // indirect target address when calling. - kFixedTargetRegister = 1u << 6, - kCallerSavedRegisters = 1u << 7, + kFixedTargetRegister = 1u << 5, + kCallerSavedRegisters = 1u << 6, // The kCallerSavedFPRegisters only matters (and set) when the more general // flag for kCallerSavedRegisters above is also set. - kCallerSavedFPRegisters = 1u << 8, + kCallerSavedFPRegisters = 1u << 7, // Tail calls for tier up are special (in fact they are different enough // from normal tail calls to warrant a dedicated opcode; but they also have // enough similar aspects that reusing the TailCall opcode is pragmatic). @@ -238,15 +236,15 @@ class V8_EXPORT_PRIVATE CallDescriptor final // // In other words, behavior is identical to a jmp instruction prior caller // frame construction. - kIsTailCallForTierUp = 1u << 9, + kIsTailCallForTierUp = 1u << 8, + + // AIX has a function descriptor by default but it can be disabled for a + // certain CFunction call (only used for Kind::kCallAddress). + kNoFunctionDescriptor = 1u << 9, // Flags past here are *not* encoded in InstructionCode and are thus not // accessible from the code generator. See also // kFlagsBitsEncodedInInstructionCode. - - // AIX has a function descriptor by default but it can be disabled for a - // certain CFunction call (only used for Kind::kCallAddress). - kNoFunctionDescriptor = 1u << 10, }; using Flags = base::Flags; diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc index 88679283d94877..eb4918cfd762ea 100644 --- a/deps/v8/src/compiler/machine-graph-verifier.cc +++ b/deps/v8/src/compiler/machine-graph-verifier.cc @@ -124,7 +124,6 @@ class MachineRepresentationInferrer { case IrOpcode::kLoad: case IrOpcode::kLoadImmutable: case IrOpcode::kProtectedLoad: - case IrOpcode::kPoisonedLoad: representation_vector_[node->id()] = PromoteRepresentation( LoadRepresentationOf(node->op()).representation()); break; @@ -206,15 +205,8 @@ class MachineRepresentationInferrer { case IrOpcode::kChangeInt32ToTagged: case IrOpcode::kChangeUint32ToTagged: case IrOpcode::kBitcastWordToTagged: - case IrOpcode::kTaggedPoisonOnSpeculation: representation_vector_[node->id()] = MachineRepresentation::kTagged; break; - case IrOpcode::kWord32PoisonOnSpeculation: - representation_vector_[node->id()] = MachineRepresentation::kWord32; - break; - case IrOpcode::kWord64PoisonOnSpeculation: - representation_vector_[node->id()] = MachineRepresentation::kWord64; - break; case IrOpcode::kCompressedHeapConstant: representation_vector_[node->id()] = MachineRepresentation::kCompressedPointer; @@ -394,14 +386,6 @@ class MachineRepresentationChecker { CheckValueInputRepresentationIs( node, 0, MachineType::PointerRepresentation()); break; - case IrOpcode::kWord32PoisonOnSpeculation: - CheckValueInputRepresentationIs(node, 0, - MachineRepresentation::kWord32); - break; - case IrOpcode::kWord64PoisonOnSpeculation: - CheckValueInputRepresentationIs(node, 0, - MachineRepresentation::kWord64); - break; case IrOpcode::kBitcastTaggedToWord: case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits: if (COMPRESS_POINTERS_BOOL) { @@ -410,9 +394,6 @@ class MachineRepresentationChecker { CheckValueInputIsTagged(node, 0); } break; - case IrOpcode::kTaggedPoisonOnSpeculation: - CheckValueInputIsTagged(node, 0); - break; case IrOpcode::kTruncateFloat64ToWord32: case IrOpcode::kTruncateFloat64ToUint32: case IrOpcode::kTruncateFloat64ToFloat32: @@ -566,7 +547,6 @@ class MachineRepresentationChecker { case IrOpcode::kWord32AtomicLoad: case IrOpcode::kWord32AtomicPairLoad: case IrOpcode::kWord64AtomicLoad: - case IrOpcode::kPoisonedLoad: CheckValueInputIsTaggedOrPointer(node, 0); CheckValueInputRepresentationIs( node, 1, MachineType::PointerRepresentation()); diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc index 411c6d4cb32d35..3dd695339015e6 100644 --- a/deps/v8/src/compiler/machine-operator.cc +++ b/deps/v8/src/compiler/machine-operator.cc @@ -124,7 +124,6 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) { IrOpcode::kWord32AtomicLoad == op->opcode() || IrOpcode::kWord64AtomicLoad == op->opcode() || IrOpcode::kWord32AtomicPairLoad == op->opcode() || - IrOpcode::kPoisonedLoad == op->opcode() || IrOpcode::kUnalignedLoad == op->opcode() || IrOpcode::kLoadImmutable == op->opcode()); return OpParameter(op); @@ -831,13 +830,6 @@ struct MachineOperatorGlobalCache { Operator::kEliminatable, "Load", 2, 1, \ 1, 1, 1, 0, MachineType::Type()) {} \ }; \ - struct PoisonedLoad##Type##Operator final \ - : public Operator1 { \ - PoisonedLoad##Type##Operator() \ - : Operator1( \ - IrOpcode::kPoisonedLoad, Operator::kEliminatable, \ - "PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \ - }; \ struct UnalignedLoad##Type##Operator final \ : public Operator1 { \ UnalignedLoad##Type##Operator() \ @@ -861,7 +853,6 @@ struct MachineOperatorGlobalCache { 0, 0, 1, 0, 0, MachineType::Type()) {} \ }; \ Load##Type##Operator kLoad##Type; \ - PoisonedLoad##Type##Operator kPoisonedLoad##Type; \ UnalignedLoad##Type##Operator kUnalignedLoad##Type; \ ProtectedLoad##Type##Operator kProtectedLoad##Type; \ LoadImmutable##Type##Operator kLoadImmutable##Type; @@ -1157,30 +1148,6 @@ struct MachineOperatorGlobalCache { }; BitcastMaybeObjectToWordOperator kBitcastMaybeObjectToWord; - struct TaggedPoisonOnSpeculation : public Operator { - TaggedPoisonOnSpeculation() - : Operator(IrOpcode::kTaggedPoisonOnSpeculation, - Operator::kEliminatable | Operator::kNoWrite, - "TaggedPoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {} - }; - TaggedPoisonOnSpeculation kTaggedPoisonOnSpeculation; - - struct Word32PoisonOnSpeculation : public Operator { - Word32PoisonOnSpeculation() - : Operator(IrOpcode::kWord32PoisonOnSpeculation, - Operator::kEliminatable | Operator::kNoWrite, - "Word32PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {} - }; - Word32PoisonOnSpeculation kWord32PoisonOnSpeculation; - - struct Word64PoisonOnSpeculation : public Operator { - Word64PoisonOnSpeculation() - : Operator(IrOpcode::kWord64PoisonOnSpeculation, - Operator::kEliminatable | Operator::kNoWrite, - "Word64PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {} - }; - Word64PoisonOnSpeculation kWord64PoisonOnSpeculation; - struct AbortCSAAssertOperator : public Operator { AbortCSAAssertOperator() : Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow, @@ -1366,16 +1333,6 @@ const Operator* MachineOperatorBuilder::LoadImmutable(LoadRepresentation rep) { UNREACHABLE(); } -const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) { -#define LOAD(Type) \ - if (rep == MachineType::Type()) { \ - return &cache_.kPoisonedLoad##Type; \ - } - MACHINE_TYPE_LIST(LOAD) -#undef LOAD - UNREACHABLE(); -} - const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) { #define LOAD(Type) \ if (rep == MachineType::Type()) { \ @@ -1813,18 +1770,6 @@ const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() { return &cache_.kWord32AtomicPairCompareExchange; } -const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() { - return &cache_.kTaggedPoisonOnSpeculation; -} - -const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() { - return &cache_.kWord32PoisonOnSpeculation; -} - -const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() { - return &cache_.kWord64PoisonOnSpeculation; -} - #define EXTRACT_LANE_OP(Type, Sign, lane_count) \ const Operator* MachineOperatorBuilder::Type##ExtractLane##Sign( \ int32_t lane_index) { \ diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h index 0ee3649ad0cd78..80a5b991a5b838 100644 --- a/deps/v8/src/compiler/machine-operator.h +++ b/deps/v8/src/compiler/machine-operator.h @@ -852,7 +852,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final // load [base + index] const Operator* Load(LoadRepresentation rep); const Operator* LoadImmutable(LoadRepresentation rep); - const Operator* PoisonedLoad(LoadRepresentation rep); const Operator* ProtectedLoad(LoadRepresentation rep); const Operator* LoadTransform(MemoryAccessKind kind, @@ -879,11 +878,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* StackSlot(int size, int alignment = 0); const Operator* StackSlot(MachineRepresentation rep, int alignment = 0); - // Destroy value by masking when misspeculating. - const Operator* TaggedPoisonOnSpeculation(); - const Operator* Word32PoisonOnSpeculation(); - const Operator* Word64PoisonOnSpeculation(); - // Access to the machine stack. const Operator* LoadFramePointer(); const Operator* LoadParentFramePointer(); @@ -980,7 +974,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final V(Word, Ror) \ V(Word, Clz) \ V(Word, Equal) \ - V(Word, PoisonOnSpeculation) \ V(Int, Add) \ V(Int, Sub) \ V(Int, Mul) \ diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc index 9673a51844ecc1..27ad71c07a62b2 100644 --- a/deps/v8/src/compiler/memory-lowering.cc +++ b/deps/v8/src/compiler/memory-lowering.cc @@ -60,7 +60,6 @@ class MemoryLowering::AllocationGroup final : public ZoneObject { MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone, JSGraphAssembler* graph_assembler, - PoisoningMitigationLevel poisoning_level, AllocationFolding allocation_folding, WriteBarrierAssertFailedCallback callback, const char* function_debug_name) @@ -71,7 +70,6 @@ MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone, machine_(jsgraph->machine()), graph_assembler_(graph_assembler), allocation_folding_(allocation_folding), - poisoning_level_(poisoning_level), write_barrier_assert_failed_(callback), function_debug_name_(function_debug_name) {} @@ -401,11 +399,7 @@ Reduction MemoryLowering::ReduceLoadElement(Node* node) { node->ReplaceInput(1, ComputeIndex(access, index)); MachineType type = access.machine_type; DCHECK(!type.IsMapWord()); - if (NeedsPoisoning(access.load_sensitivity)) { - NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type)); - } else { - NodeProperties::ChangeOp(node, machine()->Load(type)); - } + NodeProperties::ChangeOp(node, machine()->Load(type)); return Changed(node); } @@ -413,8 +407,7 @@ Node* MemoryLowering::DecodeExternalPointer( Node* node, ExternalPointerTag external_pointer_tag) { #ifdef V8_HEAP_SANDBOX DCHECK(V8_HEAP_SANDBOX_BOOL); - DCHECK(node->opcode() == IrOpcode::kLoad || - node->opcode() == IrOpcode::kPoisonedLoad); + DCHECK(node->opcode() == IrOpcode::kLoad); Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); __ InitializeEffectControl(effect, control); @@ -476,16 +469,11 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) { } if (type.IsMapWord()) { - DCHECK(!NeedsPoisoning(access.load_sensitivity)); DCHECK(!access.type.Is(Type::SandboxedExternalPointer())); return ReduceLoadMap(node); } - if (NeedsPoisoning(access.load_sensitivity)) { - NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type)); - } else { - NodeProperties::ChangeOp(node, machine()->Load(type)); - } + NodeProperties::ChangeOp(node, machine()->Load(type)); if (V8_HEAP_SANDBOX_BOOL && access.type.Is(Type::SandboxedExternalPointer())) { @@ -655,21 +643,6 @@ WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind( return write_barrier_kind; } -bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const { - // Safe loads do not need poisoning. - if (load_sensitivity == LoadSensitivity::kSafe) return false; - - switch (poisoning_level_) { - case PoisoningMitigationLevel::kDontPoison: - return false; - case PoisoningMitigationLevel::kPoisonAll: - return true; - case PoisoningMitigationLevel::kPoisonCriticalOnly: - return load_sensitivity == LoadSensitivity::kCritical; - } - UNREACHABLE(); -} - MemoryLowering::AllocationGroup::AllocationGroup(Node* node, AllocationType allocation, Zone* zone) diff --git a/deps/v8/src/compiler/memory-lowering.h b/deps/v8/src/compiler/memory-lowering.h index 1fbe18abff329f..9edb880e6fd3ff 100644 --- a/deps/v8/src/compiler/memory-lowering.h +++ b/deps/v8/src/compiler/memory-lowering.h @@ -75,7 +75,6 @@ class MemoryLowering final : public Reducer { MemoryLowering( JSGraph* jsgraph, Zone* zone, JSGraphAssembler* graph_assembler, - PoisoningMitigationLevel poisoning_level, AllocationFolding allocation_folding = AllocationFolding::kDontAllocationFolding, WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*, @@ -112,7 +111,6 @@ class MemoryLowering final : public Reducer { Node* DecodeExternalPointer(Node* encoded_pointer, ExternalPointerTag tag); Reduction ReduceLoadMap(Node* encoded_pointer); Node* ComputeIndex(ElementAccess const& access, Node* node); - bool NeedsPoisoning(LoadSensitivity load_sensitivity) const; void EnsureAllocateOperator(); Node* GetWasmInstanceNode(); @@ -133,7 +131,6 @@ class MemoryLowering final : public Reducer { MachineOperatorBuilder* machine_; JSGraphAssembler* graph_assembler_; AllocationFolding allocation_folding_; - PoisoningMitigationLevel poisoning_level_; WriteBarrierAssertFailedCallback write_barrier_assert_failed_; const char* function_debug_name_; diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc index 860ea1fae18ce9..ba4a5c1f675e2a 100644 --- a/deps/v8/src/compiler/memory-optimizer.cc +++ b/deps/v8/src/compiler/memory-optimizer.cc @@ -40,7 +40,6 @@ bool CanAllocate(const Node* node) { case IrOpcode::kLoadLane: case IrOpcode::kLoadTransform: case IrOpcode::kMemoryBarrier: - case IrOpcode::kPoisonedLoad: case IrOpcode::kProtectedLoad: case IrOpcode::kProtectedStore: case IrOpcode::kRetain: @@ -54,7 +53,6 @@ bool CanAllocate(const Node* node) { case IrOpcode::kStoreField: case IrOpcode::kStoreLane: case IrOpcode::kStoreToObject: - case IrOpcode::kTaggedPoisonOnSpeculation: case IrOpcode::kUnalignedLoad: case IrOpcode::kUnalignedStore: case IrOpcode::kUnreachable: @@ -77,7 +75,6 @@ bool CanAllocate(const Node* node) { case IrOpcode::kWord32AtomicStore: case IrOpcode::kWord32AtomicSub: case IrOpcode::kWord32AtomicXor: - case IrOpcode::kWord32PoisonOnSpeculation: case IrOpcode::kWord64AtomicAdd: case IrOpcode::kWord64AtomicAnd: case IrOpcode::kWord64AtomicCompareExchange: @@ -87,7 +84,6 @@ bool CanAllocate(const Node* node) { case IrOpcode::kWord64AtomicStore: case IrOpcode::kWord64AtomicSub: case IrOpcode::kWord64AtomicXor: - case IrOpcode::kWord64PoisonOnSpeculation: return false; case IrOpcode::kCall: @@ -183,13 +179,12 @@ void WriteBarrierAssertFailed(Node* node, Node* object, const char* name, } // namespace MemoryOptimizer::MemoryOptimizer( - JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level, + JSGraph* jsgraph, Zone* zone, MemoryLowering::AllocationFolding allocation_folding, const char* function_debug_name, TickCounter* tick_counter) : graph_assembler_(jsgraph, zone), - memory_lowering_(jsgraph, zone, &graph_assembler_, poisoning_level, - allocation_folding, WriteBarrierAssertFailed, - function_debug_name), + memory_lowering_(jsgraph, zone, &graph_assembler_, allocation_folding, + WriteBarrierAssertFailed, function_debug_name), jsgraph_(jsgraph), empty_state_(AllocationState::Empty(zone)), pending_(zone), diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h index 3845304fdd6b98..7d8bca44d455e0 100644 --- a/deps/v8/src/compiler/memory-optimizer.h +++ b/deps/v8/src/compiler/memory-optimizer.h @@ -30,7 +30,6 @@ using NodeId = uint32_t; class MemoryOptimizer final { public: MemoryOptimizer(JSGraph* jsgraph, Zone* zone, - PoisoningMitigationLevel poisoning_level, MemoryLowering::AllocationFolding allocation_folding, const char* function_debug_name, TickCounter* tick_counter); ~MemoryOptimizer() = default; diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h index 1ce402342431d2..52dc476dc45328 100644 --- a/deps/v8/src/compiler/node-matchers.h +++ b/deps/v8/src/compiler/node-matchers.h @@ -743,7 +743,6 @@ struct BaseWithIndexAndDisplacementMatcher { switch (from->opcode()) { case IrOpcode::kLoad: case IrOpcode::kLoadImmutable: - case IrOpcode::kPoisonedLoad: case IrOpcode::kProtectedLoad: case IrOpcode::kInt32Add: case IrOpcode::kInt64Add: diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h index 912bd7b5cecc71..b956f148cc0852 100644 --- a/deps/v8/src/compiler/opcodes.h +++ b/deps/v8/src/compiler/opcodes.h @@ -463,7 +463,6 @@ V(PlainPrimitiveToFloat64) \ V(PlainPrimitiveToNumber) \ V(PlainPrimitiveToWord32) \ - V(PoisonIndex) \ V(RestLength) \ V(RuntimeAbort) \ V(StoreDataViewElement) \ @@ -686,7 +685,6 @@ V(DebugBreak) \ V(Comment) \ V(Load) \ - V(PoisonedLoad) \ V(LoadImmutable) \ V(Store) \ V(StackSlot) \ @@ -746,9 +744,6 @@ V(Word64Select) \ V(Float32Select) \ V(Float64Select) \ - V(TaggedPoisonOnSpeculation) \ - V(Word32PoisonOnSpeculation) \ - V(Word64PoisonOnSpeculation) \ V(LoadStackCheckOffset) \ V(LoadFramePointer) \ V(LoadParentFramePointer) \ diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc index e802cd72682345..09fb15fb17229f 100644 --- a/deps/v8/src/compiler/pipeline.cc +++ b/deps/v8/src/compiler/pipeline.cc @@ -547,8 +547,7 @@ class PipelineData { code_generator_ = new CodeGenerator( codegen_zone(), frame(), linkage, sequence(), info(), isolate(), osr_helper_, start_source_position_, jump_optimization_info_, - info()->GetPoisoningMitigationLevel(), assembler_options(), - info_->builtin(), max_unoptimized_frame_height(), + assembler_options(), info_->builtin(), max_unoptimized_frame_height(), max_pushed_argument_count(), FLAG_trace_turbo_stack_accesses ? debug_name_.get() : nullptr); } @@ -1161,18 +1160,6 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl( if (FLAG_turbo_inlining) { compilation_info()->set_inlining(); } - - // This is the bottleneck for computing and setting poisoning level in the - // optimizing compiler. - PoisoningMitigationLevel load_poisoning = - PoisoningMitigationLevel::kDontPoison; - if (FLAG_untrusted_code_mitigations) { - // For full mitigations, this can be changed to - // PoisoningMitigationLevel::kPoisonAll. - load_poisoning = PoisoningMitigationLevel::kPoisonCriticalOnly; - } - compilation_info()->SetPoisoningMitigationLevel(load_poisoning); - if (FLAG_turbo_allocation_folding) { compilation_info()->set_allocation_folding(); } @@ -1629,10 +1616,10 @@ struct SimplifiedLoweringPhase { DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering) void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) { - SimplifiedLowering lowering( - data->jsgraph(), data->broker(), temp_zone, data->source_positions(), - data->node_origins(), data->info()->GetPoisoningMitigationLevel(), - &data->info()->tick_counter(), linkage, data->observe_node_manager()); + SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone, + data->source_positions(), data->node_origins(), + &data->info()->tick_counter(), linkage, + data->observe_node_manager()); // RepresentationChanger accesses the heap. UnparkedScopeIfNeeded scope(data->broker()); @@ -1797,7 +1784,6 @@ struct EffectControlLinearizationPhase { // - introduce effect phis and rewire effects to get SSA again. LinearizeEffectControl(data->jsgraph(), schedule, temp_zone, data->source_positions(), data->node_origins(), - data->info()->GetPoisoningMitigationLevel(), data->broker()); } { @@ -1899,7 +1885,7 @@ struct MemoryOptimizationPhase { // Optimize allocations and load/store operations. MemoryOptimizer optimizer( - data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(), + data->jsgraph(), temp_zone, data->info()->allocation_folding() ? MemoryLowering::AllocationFolding::kDoAllocationFolding : MemoryLowering::AllocationFolding::kDontAllocationFolding, @@ -1989,7 +1975,6 @@ struct ScheduledEffectControlLinearizationPhase { // - lower simplified memory and select nodes to machine level nodes. LowerToMachineSchedule(data->jsgraph(), data->schedule(), temp_zone, data->source_positions(), data->node_origins(), - data->info()->GetPoisoningMitigationLevel(), data->broker()); // TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time. @@ -2205,7 +2190,6 @@ struct InstructionSelectionPhase { data->assembler_options().enable_root_relative_access ? InstructionSelector::kEnableRootsRelativeAddressing : InstructionSelector::kDisableRootsRelativeAddressing, - data->info()->GetPoisoningMitigationLevel(), data->info()->trace_turbo_json() ? InstructionSelector::kEnableTraceTurboJson : InstructionSelector::kDisableTraceTurboJson); @@ -2969,17 +2953,12 @@ int HashGraphForPGO(Graph* graph) { MaybeHandle Pipeline::GenerateCodeForCodeStub( Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph, JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind, - const char* debug_name, Builtin builtin, - PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options, + const char* debug_name, Builtin builtin, const AssemblerOptions& options, const ProfileDataFromFile* profile_data) { OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(), kind); info.set_builtin(builtin); - if (poisoning_level != PoisoningMitigationLevel::kDontPoison) { - info.SetPoisoningMitigationLevel(poisoning_level); - } - // Construct a pipeline for scheduling and code generation. ZoneStats zone_stats(isolate->allocator()); NodeOriginTable node_origins(graph); @@ -3546,18 +3525,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) { config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers)); AllocateRegistersForTopTier(config.get(), call_descriptor, run_verifier); } else { - const RegisterConfiguration* config; - if (data->info()->GetPoisoningMitigationLevel() != - PoisoningMitigationLevel::kDontPoison) { -#ifdef V8_TARGET_ARCH_IA32 - FATAL("Poisoning is not supported on ia32."); -#else - config = RegisterConfiguration::Poisoning(); -#endif // V8_TARGET_ARCH_IA32 - } else { - config = RegisterConfiguration::Default(); - } - + const RegisterConfiguration* config = RegisterConfiguration::Default(); if (data->info()->IsTurboprop() && FLAG_turboprop_mid_tier_reg_alloc) { AllocateRegistersForMidTier(config, call_descriptor, run_verifier); } else { @@ -3643,7 +3611,6 @@ std::ostream& operator<<(std::ostream& out, out << "\"codeStartRegisterCheck\": " << s.offsets_info->code_start_register_check << ", "; out << "\"deoptCheck\": " << s.offsets_info->deopt_check << ", "; - out << "\"initPoison\": " << s.offsets_info->init_poison << ", "; out << "\"blocksStart\": " << s.offsets_info->blocks_start << ", "; out << "\"outOfLineCode\": " << s.offsets_info->out_of_line_code << ", "; out << "\"deoptimizationExits\": " << s.offsets_info->deoptimization_exits diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h index ea67b31e06cbf5..cd4c6b9e16c8c3 100644 --- a/deps/v8/src/compiler/pipeline.h +++ b/deps/v8/src/compiler/pipeline.h @@ -78,8 +78,7 @@ class Pipeline : public AllStatic { static MaybeHandle GenerateCodeForCodeStub( Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph, JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind, - const char* debug_name, Builtin builtin, - PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options, + const char* debug_name, Builtin builtin, const AssemblerOptions& options, const ProfileDataFromFile* profile_data); // --------------------------------------------------------------------------- diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc index a64521d6f66dcb..9abaf45b261e12 100644 --- a/deps/v8/src/compiler/property-access-builder.cc +++ b/deps/v8/src/compiler/property-access-builder.cc @@ -235,7 +235,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name, Type::Any(), MachineType::AnyTagged(), kPointerWriteBarrier, - LoadSensitivity::kCritical, field_access.const_field_info}; storage = *effect = graph()->NewNode( simplified()->LoadField(storage_access), storage, *effect, *control); @@ -263,7 +262,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name, Type::OtherInternal(), MachineType::TaggedPointer(), kPointerWriteBarrier, - LoadSensitivity::kCritical, field_access.const_field_info}; storage = *effect = graph()->NewNode( simplified()->LoadField(storage_access), storage, *effect, *control); @@ -291,7 +289,6 @@ Node* PropertyAccessBuilder::BuildMinimorphicLoadDataField( access_info.field_type(), MachineType::TypeForRepresentation(field_representation), kFullWriteBarrier, - LoadSensitivity::kCritical, ConstFieldInfo::None()}; return BuildLoadDataField(name, lookup_start_object, field_access, access_info.is_inobject(), effect, control); @@ -319,7 +316,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField( access_info.field_type(), MachineType::TypeForRepresentation(field_representation), kFullWriteBarrier, - LoadSensitivity::kCritical, access_info.GetConstFieldInfo()}; if (field_representation == MachineRepresentation::kTaggedPointer || field_representation == MachineRepresentation::kCompressedPointer) { diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc index 7ed217d4e36dd9..383d63dd69fdc5 100644 --- a/deps/v8/src/compiler/raw-machine-assembler.cc +++ b/deps/v8/src/compiler/raw-machine-assembler.cc @@ -18,8 +18,7 @@ namespace compiler { RawMachineAssembler::RawMachineAssembler( Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor, MachineRepresentation word, MachineOperatorBuilder::Flags flags, - MachineOperatorBuilder::AlignmentRequirements alignment_requirements, - PoisoningMitigationLevel poisoning_level) + MachineOperatorBuilder::AlignmentRequirements alignment_requirements) : isolate_(isolate), graph_(graph), schedule_(zone()->New(zone())), @@ -30,8 +29,7 @@ RawMachineAssembler::RawMachineAssembler( call_descriptor_(call_descriptor), target_parameter_(nullptr), parameters_(parameter_count(), zone()), - current_block_(schedule()->start()), - poisoning_level_(poisoning_level) { + current_block_(schedule()->start()) { int param_count = static_cast(parameter_count()); // Add an extra input for the JSFunction parameter to the start node. graph->SetStart(graph->NewNode(common_.Start(param_count + 1))); @@ -472,7 +470,7 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) { return; case IrOpcode::kIfTrue: { Node* branch = NodeProperties::GetControlInput(control_node); - BranchHint hint = BranchOperatorInfoOf(branch->op()).hint; + BranchHint hint = BranchHintOf(branch->op()); if (hint == BranchHint::kTrue) { // The other possibility is also deferred, so the responsible branch // has to be before. @@ -485,7 +483,7 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) { } case IrOpcode::kIfFalse: { Node* branch = NodeProperties::GetControlInput(control_node); - BranchHint hint = BranchOperatorInfoOf(branch->op()).hint; + BranchHint hint = BranchHintOf(branch->op()); if (hint == BranchHint::kFalse) { // The other possibility is also deferred, so the responsible branch // has to be before. @@ -516,11 +514,10 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) { } } - BranchOperatorInfo info = BranchOperatorInfoOf(responsible_branch->op()); - if (info.hint == new_branch_hint) return; - NodeProperties::ChangeOp( - responsible_branch, - common()->Branch(new_branch_hint, info.is_safety_check)); + BranchHint hint = BranchHintOf(responsible_branch->op()); + if (hint == new_branch_hint) return; + NodeProperties::ChangeOp(responsible_branch, + common()->Branch(new_branch_hint)); } Node* RawMachineAssembler::TargetParameter() { @@ -544,9 +541,7 @@ void RawMachineAssembler::Goto(RawMachineLabel* label) { void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val, RawMachineLabel* false_val) { DCHECK(current_block_ != schedule()->end()); - Node* branch = MakeNode( - common()->Branch(BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck), 1, - &condition); + Node* branch = MakeNode(common()->Branch(BranchHint::kNone), 1, &condition); BasicBlock* true_block = schedule()->NewBasicBlock(); BasicBlock* false_block = schedule()->NewBasicBlock(); schedule()->AddBranch(CurrentBlock(), branch, true_block, false_block); diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h index a811fa7bf9c9bb..bff7bda0a37a87 100644 --- a/deps/v8/src/compiler/raw-machine-assembler.h +++ b/deps/v8/src/compiler/raw-machine-assembler.h @@ -52,9 +52,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { MachineOperatorBuilder::Flag::kNoFlags, MachineOperatorBuilder::AlignmentRequirements alignment_requirements = MachineOperatorBuilder::AlignmentRequirements:: - FullUnalignedAccessSupport(), - PoisoningMitigationLevel poisoning_level = - PoisoningMitigationLevel::kPoisonCriticalOnly); + FullUnalignedAccessSupport()); ~RawMachineAssembler() = default; RawMachineAssembler(const RawMachineAssembler&) = delete; @@ -67,7 +65,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { CommonOperatorBuilder* common() { return &common_; } SimplifiedOperatorBuilder* simplified() { return &simplified_; } CallDescriptor* call_descriptor() const { return call_descriptor_; } - PoisoningMitigationLevel poisoning_level() const { return poisoning_level_; } // Only used for tests: Finalizes the schedule and exports it to be used for // code generation. Note that this RawMachineAssembler becomes invalid after @@ -132,19 +129,11 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { } // Memory Operations. - Node* Load(MachineType type, Node* base, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { - return Load(type, base, IntPtrConstant(0), needs_poisoning); + Node* Load(MachineType type, Node* base) { + return Load(type, base, IntPtrConstant(0)); } - Node* Load(MachineType type, Node* base, Node* index, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { + Node* Load(MachineType type, Node* base, Node* index) { const Operator* op = machine()->Load(type); - CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level_); - if (needs_poisoning == LoadSensitivity::kCritical && - poisoning_level_ == PoisoningMitigationLevel::kPoisonCriticalOnly) { - op = machine()->PoisonedLoad(type); - } - Node* load = AddNode(op, base, index); return load; } @@ -174,10 +163,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { bool IsMapOffsetConstantMinusTag(int offset) { return offset == HeapObject::kMapOffset - kHeapObjectTag; } - Node* LoadFromObject( - MachineType type, Node* base, Node* offset, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) { - CHECK_EQ(needs_poisoning, LoadSensitivity::kSafe); + Node* LoadFromObject(MachineType type, Node* base, Node* offset) { DCHECK_IMPLIES(V8_MAP_PACKING_BOOL && IsMapOffsetConstantMinusTag(offset), type == MachineType::MapInHeader()); ObjectAccess access = {type, WriteBarrierKind::kNoWriteBarrier}; @@ -959,20 +945,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { return HeapConstant(isolate()->factory()->InternalizeUtf8String(string)); } - Node* TaggedPoisonOnSpeculation(Node* value) { - if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) { - return AddNode(machine()->TaggedPoisonOnSpeculation(), value); - } - return value; - } - - Node* WordPoisonOnSpeculation(Node* value) { - if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) { - return AddNode(machine()->WordPoisonOnSpeculation(), value); - } - return value; - } - // Call a given call descriptor and the given arguments. // The call target is passed as part of the {inputs} array. Node* CallN(CallDescriptor* call_descriptor, int input_count, @@ -1136,6 +1108,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { CommonOperatorBuilder* common); Isolate* isolate_; + Graph* graph_; Schedule* schedule_; SourcePositionTable* source_positions_; @@ -1146,7 +1119,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { Node* target_parameter_; NodeVector parameters_; BasicBlock* current_block_; - PoisoningMitigationLevel poisoning_level_; }; class V8_EXPORT_PRIVATE RawMachineLabel final { diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc index 1c07a23dded32a..0a8332b775720d 100644 --- a/deps/v8/src/compiler/simplified-lowering.cc +++ b/deps/v8/src/compiler/simplified-lowering.cc @@ -1735,11 +1735,9 @@ class RepresentationSelector { VisitBinop(node, UseInfo::TruncatingWord32(), MachineRepresentation::kWord32); if (lower()) { - if (lowering->poisoning_level_ == - PoisoningMitigationLevel::kDontPoison && - (index_type.IsNone() || length_type.IsNone() || - (index_type.Min() >= 0.0 && - index_type.Max() < length_type.Min()))) { + if (index_type.IsNone() || length_type.IsNone() || + (index_type.Min() >= 0.0 && + index_type.Max() < length_type.Min())) { // The bounds check is redundant if we already know that // the index is within the bounds of [0.0, length[. // TODO(neis): Move this into TypedOptimization? @@ -3181,11 +3179,6 @@ class RepresentationSelector { } case IrOpcode::kCheckBounds: return VisitCheckBounds(node, lowering); - case IrOpcode::kPoisonIndex: { - VisitUnop(node, UseInfo::TruncatingWord32(), - MachineRepresentation::kWord32); - return; - } case IrOpcode::kCheckHeapObject: { if (InputCannotBe(node, Type::SignedSmall())) { VisitUnop(node, UseInfo::AnyTagged(), @@ -4225,18 +4218,19 @@ void RepresentationSelector::InsertUnreachableIfNecessary(Node* node) { } } -SimplifiedLowering::SimplifiedLowering( - JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone, - SourcePositionTable* source_positions, NodeOriginTable* node_origins, - PoisoningMitigationLevel poisoning_level, TickCounter* tick_counter, - Linkage* linkage, ObserveNodeManager* observe_node_manager) +SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, + Zone* zone, + SourcePositionTable* source_positions, + NodeOriginTable* node_origins, + TickCounter* tick_counter, + Linkage* linkage, + ObserveNodeManager* observe_node_manager) : jsgraph_(jsgraph), broker_(broker), zone_(zone), type_cache_(TypeCache::Get()), source_positions_(source_positions), node_origins_(node_origins), - poisoning_level_(poisoning_level), tick_counter_(tick_counter), linkage_(linkage), observe_node_manager_(observe_node_manager) {} diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h index 54017b34f7a936..f60bc1a7e3ef89 100644 --- a/deps/v8/src/compiler/simplified-lowering.h +++ b/deps/v8/src/compiler/simplified-lowering.h @@ -31,7 +31,6 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final { SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone, SourcePositionTable* source_position, NodeOriginTable* node_origins, - PoisoningMitigationLevel poisoning_level, TickCounter* tick_counter, Linkage* linkage, ObserveNodeManager* observe_node_manager = nullptr); ~SimplifiedLowering() = default; @@ -83,8 +82,6 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final { SourcePositionTable* source_positions_; NodeOriginTable* node_origins_; - PoisoningMitigationLevel poisoning_level_; - TickCounter* const tick_counter_; Linkage* const linkage_; diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc index 9c4f8f083ac3a4..9461194b559480 100644 --- a/deps/v8/src/compiler/simplified-operator.cc +++ b/deps/v8/src/compiler/simplified-operator.cc @@ -73,22 +73,6 @@ size_t hash_value(FieldAccess const& access) { access.is_store_in_literal); } -size_t hash_value(LoadSensitivity load_sensitivity) { - return static_cast(load_sensitivity); -} - -std::ostream& operator<<(std::ostream& os, LoadSensitivity load_sensitivity) { - switch (load_sensitivity) { - case LoadSensitivity::kCritical: - return os << "Critical"; - case LoadSensitivity::kSafe: - return os << "Safe"; - case LoadSensitivity::kUnsafe: - return os << "Unsafe"; - } - UNREACHABLE(); -} - std::ostream& operator<<(std::ostream& os, FieldAccess const& access) { os << "[" << access.base_is_tagged << ", " << access.offset << ", "; #ifdef OBJECT_PRINT @@ -107,9 +91,6 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) { if (access.is_store_in_literal) { os << " (store in literal)"; } - if (FLAG_untrusted_code_mitigations) { - os << ", " << access.load_sensitivity; - } os << "]"; return os; } @@ -145,9 +126,6 @@ std::ostream& operator<<(std::ostream& os, ElementAccess const& access) { os << access.base_is_tagged << ", " << access.header_size << ", " << access.type << ", " << access.machine_type << ", " << access.write_barrier_kind; - if (FLAG_untrusted_code_mitigations) { - os << ", " << access.load_sensitivity; - } return os; } @@ -719,129 +697,128 @@ bool operator==(CheckMinusZeroParameters const& lhs, return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback(); } -#define PURE_OP_LIST(V) \ - V(BooleanNot, Operator::kNoProperties, 1, 0) \ - V(NumberEqual, Operator::kCommutative, 2, 0) \ - V(NumberLessThan, Operator::kNoProperties, 2, 0) \ - V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \ - V(NumberAdd, Operator::kCommutative, 2, 0) \ - V(NumberSubtract, Operator::kNoProperties, 2, 0) \ - V(NumberMultiply, Operator::kCommutative, 2, 0) \ - V(NumberDivide, Operator::kNoProperties, 2, 0) \ - V(NumberModulus, Operator::kNoProperties, 2, 0) \ - V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \ - V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \ - V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \ - V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \ - V(NumberShiftRight, Operator::kNoProperties, 2, 0) \ - V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \ - V(NumberImul, Operator::kCommutative, 2, 0) \ - V(NumberAbs, Operator::kNoProperties, 1, 0) \ - V(NumberClz32, Operator::kNoProperties, 1, 0) \ - V(NumberCeil, Operator::kNoProperties, 1, 0) \ - V(NumberFloor, Operator::kNoProperties, 1, 0) \ - V(NumberFround, Operator::kNoProperties, 1, 0) \ - V(NumberAcos, Operator::kNoProperties, 1, 0) \ - V(NumberAcosh, Operator::kNoProperties, 1, 0) \ - V(NumberAsin, Operator::kNoProperties, 1, 0) \ - V(NumberAsinh, Operator::kNoProperties, 1, 0) \ - V(NumberAtan, Operator::kNoProperties, 1, 0) \ - V(NumberAtan2, Operator::kNoProperties, 2, 0) \ - V(NumberAtanh, Operator::kNoProperties, 1, 0) \ - V(NumberCbrt, Operator::kNoProperties, 1, 0) \ - V(NumberCos, Operator::kNoProperties, 1, 0) \ - V(NumberCosh, Operator::kNoProperties, 1, 0) \ - V(NumberExp, Operator::kNoProperties, 1, 0) \ - V(NumberExpm1, Operator::kNoProperties, 1, 0) \ - V(NumberLog, Operator::kNoProperties, 1, 0) \ - V(NumberLog1p, Operator::kNoProperties, 1, 0) \ - V(NumberLog10, Operator::kNoProperties, 1, 0) \ - V(NumberLog2, Operator::kNoProperties, 1, 0) \ - V(NumberMax, Operator::kNoProperties, 2, 0) \ - V(NumberMin, Operator::kNoProperties, 2, 0) \ - V(NumberPow, Operator::kNoProperties, 2, 0) \ - V(NumberRound, Operator::kNoProperties, 1, 0) \ - V(NumberSign, Operator::kNoProperties, 1, 0) \ - V(NumberSin, Operator::kNoProperties, 1, 0) \ - V(NumberSinh, Operator::kNoProperties, 1, 0) \ - V(NumberSqrt, Operator::kNoProperties, 1, 0) \ - V(NumberTan, Operator::kNoProperties, 1, 0) \ - V(NumberTanh, Operator::kNoProperties, 1, 0) \ - V(NumberTrunc, Operator::kNoProperties, 1, 0) \ - V(NumberToBoolean, Operator::kNoProperties, 1, 0) \ - V(NumberToInt32, Operator::kNoProperties, 1, 0) \ - V(NumberToString, Operator::kNoProperties, 1, 0) \ - V(NumberToUint32, Operator::kNoProperties, 1, 0) \ - V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \ - V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \ - V(BigIntNegate, Operator::kNoProperties, 1, 0) \ - V(StringConcat, Operator::kNoProperties, 3, 0) \ - V(StringToNumber, Operator::kNoProperties, 1, 0) \ - V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \ - V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \ - V(StringIndexOf, Operator::kNoProperties, 3, 0) \ - V(StringLength, Operator::kNoProperties, 1, 0) \ - V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \ - V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \ - V(TypeOf, Operator::kNoProperties, 1, 1) \ - V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \ - V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \ - V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \ - V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \ - V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \ - V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \ - V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \ - V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \ - V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \ - V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \ - V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \ - V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \ - V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \ - V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \ - V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \ - V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \ - V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \ - V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \ - V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \ - V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \ - V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \ - V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \ - V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \ - V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \ - V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \ - V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \ - V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \ - V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \ - V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \ - V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \ - V(NumberIsMinusZero, Operator::kNoProperties, 1, 0) \ - V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \ - V(NumberIsNaN, Operator::kNoProperties, 1, 0) \ - V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \ - V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \ - V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \ - V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \ - V(ObjectIsString, Operator::kNoProperties, 1, 0) \ - V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \ - V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \ - V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \ - V(NumberIsFinite, Operator::kNoProperties, 1, 0) \ - V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \ - V(NumberIsInteger, Operator::kNoProperties, 1, 0) \ - V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \ - V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \ - V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \ - V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \ - V(SameValue, Operator::kCommutative, 2, 0) \ - V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \ - V(NumberSameValue, Operator::kCommutative, 2, 0) \ - V(ReferenceEqual, Operator::kCommutative, 2, 0) \ - V(StringEqual, Operator::kCommutative, 2, 0) \ - V(StringLessThan, Operator::kNoProperties, 2, 0) \ - V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \ - V(ToBoolean, Operator::kNoProperties, 1, 0) \ - V(NewConsString, Operator::kNoProperties, 3, 0) \ - V(PoisonIndex, Operator::kNoProperties, 1, 0) +#define PURE_OP_LIST(V) \ + V(BooleanNot, Operator::kNoProperties, 1, 0) \ + V(NumberEqual, Operator::kCommutative, 2, 0) \ + V(NumberLessThan, Operator::kNoProperties, 2, 0) \ + V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \ + V(NumberAdd, Operator::kCommutative, 2, 0) \ + V(NumberSubtract, Operator::kNoProperties, 2, 0) \ + V(NumberMultiply, Operator::kCommutative, 2, 0) \ + V(NumberDivide, Operator::kNoProperties, 2, 0) \ + V(NumberModulus, Operator::kNoProperties, 2, 0) \ + V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \ + V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \ + V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \ + V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \ + V(NumberShiftRight, Operator::kNoProperties, 2, 0) \ + V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \ + V(NumberImul, Operator::kCommutative, 2, 0) \ + V(NumberAbs, Operator::kNoProperties, 1, 0) \ + V(NumberClz32, Operator::kNoProperties, 1, 0) \ + V(NumberCeil, Operator::kNoProperties, 1, 0) \ + V(NumberFloor, Operator::kNoProperties, 1, 0) \ + V(NumberFround, Operator::kNoProperties, 1, 0) \ + V(NumberAcos, Operator::kNoProperties, 1, 0) \ + V(NumberAcosh, Operator::kNoProperties, 1, 0) \ + V(NumberAsin, Operator::kNoProperties, 1, 0) \ + V(NumberAsinh, Operator::kNoProperties, 1, 0) \ + V(NumberAtan, Operator::kNoProperties, 1, 0) \ + V(NumberAtan2, Operator::kNoProperties, 2, 0) \ + V(NumberAtanh, Operator::kNoProperties, 1, 0) \ + V(NumberCbrt, Operator::kNoProperties, 1, 0) \ + V(NumberCos, Operator::kNoProperties, 1, 0) \ + V(NumberCosh, Operator::kNoProperties, 1, 0) \ + V(NumberExp, Operator::kNoProperties, 1, 0) \ + V(NumberExpm1, Operator::kNoProperties, 1, 0) \ + V(NumberLog, Operator::kNoProperties, 1, 0) \ + V(NumberLog1p, Operator::kNoProperties, 1, 0) \ + V(NumberLog10, Operator::kNoProperties, 1, 0) \ + V(NumberLog2, Operator::kNoProperties, 1, 0) \ + V(NumberMax, Operator::kNoProperties, 2, 0) \ + V(NumberMin, Operator::kNoProperties, 2, 0) \ + V(NumberPow, Operator::kNoProperties, 2, 0) \ + V(NumberRound, Operator::kNoProperties, 1, 0) \ + V(NumberSign, Operator::kNoProperties, 1, 0) \ + V(NumberSin, Operator::kNoProperties, 1, 0) \ + V(NumberSinh, Operator::kNoProperties, 1, 0) \ + V(NumberSqrt, Operator::kNoProperties, 1, 0) \ + V(NumberTan, Operator::kNoProperties, 1, 0) \ + V(NumberTanh, Operator::kNoProperties, 1, 0) \ + V(NumberTrunc, Operator::kNoProperties, 1, 0) \ + V(NumberToBoolean, Operator::kNoProperties, 1, 0) \ + V(NumberToInt32, Operator::kNoProperties, 1, 0) \ + V(NumberToString, Operator::kNoProperties, 1, 0) \ + V(NumberToUint32, Operator::kNoProperties, 1, 0) \ + V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \ + V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \ + V(BigIntNegate, Operator::kNoProperties, 1, 0) \ + V(StringConcat, Operator::kNoProperties, 3, 0) \ + V(StringToNumber, Operator::kNoProperties, 1, 0) \ + V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \ + V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \ + V(StringIndexOf, Operator::kNoProperties, 3, 0) \ + V(StringLength, Operator::kNoProperties, 1, 0) \ + V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \ + V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \ + V(TypeOf, Operator::kNoProperties, 1, 1) \ + V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \ + V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \ + V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \ + V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \ + V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \ + V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \ + V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \ + V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \ + V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \ + V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \ + V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \ + V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \ + V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \ + V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \ + V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \ + V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \ + V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \ + V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \ + V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \ + V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \ + V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \ + V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \ + V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \ + V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \ + V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \ + V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \ + V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \ + V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \ + V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \ + V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \ + V(NumberIsMinusZero, Operator::kNoProperties, 1, 0) \ + V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \ + V(NumberIsNaN, Operator::kNoProperties, 1, 0) \ + V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \ + V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \ + V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \ + V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \ + V(ObjectIsString, Operator::kNoProperties, 1, 0) \ + V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \ + V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \ + V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \ + V(NumberIsFinite, Operator::kNoProperties, 1, 0) \ + V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \ + V(NumberIsInteger, Operator::kNoProperties, 1, 0) \ + V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \ + V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \ + V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \ + V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \ + V(SameValue, Operator::kCommutative, 2, 0) \ + V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \ + V(NumberSameValue, Operator::kCommutative, 2, 0) \ + V(ReferenceEqual, Operator::kCommutative, 2, 0) \ + V(StringEqual, Operator::kCommutative, 2, 0) \ + V(StringLessThan, Operator::kNoProperties, 2, 0) \ + V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \ + V(ToBoolean, Operator::kNoProperties, 1, 0) \ + V(NewConsString, Operator::kNoProperties, 3, 0) #define EFFECT_DEPENDENT_OP_LIST(V) \ V(BigIntAdd, Operator::kNoProperties, 2, 1) \ diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h index d7a5901448624b..0602b795a93477 100644 --- a/deps/v8/src/compiler/simplified-operator.h +++ b/deps/v8/src/compiler/simplified-operator.h @@ -46,10 +46,6 @@ size_t hash_value(BaseTaggedness); std::ostream& operator<<(std::ostream&, BaseTaggedness); -size_t hash_value(LoadSensitivity); - -std::ostream& operator<<(std::ostream&, LoadSensitivity); - struct ConstFieldInfo { // the map that introduced the const field, if any. An access is considered // mutable iff the handle is null. @@ -82,7 +78,6 @@ struct FieldAccess { Type type; // type of the field. MachineType machine_type; // machine type of the field. WriteBarrierKind write_barrier_kind; // write barrier hint. - LoadSensitivity load_sensitivity; // load safety for poisoning. ConstFieldInfo const_field_info; // the constness of this access, and the // field owner map, if the access is const bool is_store_in_literal; // originates from a kStoreInLiteral access @@ -96,14 +91,12 @@ struct FieldAccess { type(Type::None()), machine_type(MachineType::None()), write_barrier_kind(kFullWriteBarrier), - load_sensitivity(LoadSensitivity::kUnsafe), const_field_info(ConstFieldInfo::None()), is_store_in_literal(false) {} FieldAccess(BaseTaggedness base_is_tagged, int offset, MaybeHandle name, MaybeHandle map, Type type, MachineType machine_type, WriteBarrierKind write_barrier_kind, - LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe, ConstFieldInfo const_field_info = ConstFieldInfo::None(), bool is_store_in_literal = false #ifdef V8_HEAP_SANDBOX @@ -118,7 +111,6 @@ struct FieldAccess { type(type), machine_type(machine_type), write_barrier_kind(write_barrier_kind), - load_sensitivity(load_sensitivity), const_field_info(const_field_info), is_store_in_literal(is_store_in_literal) #ifdef V8_HEAP_SANDBOX @@ -162,25 +154,21 @@ struct ElementAccess { Type type; // type of the element. MachineType machine_type; // machine type of the element. WriteBarrierKind write_barrier_kind; // write barrier hint. - LoadSensitivity load_sensitivity; // load safety for poisoning. ElementAccess() : base_is_tagged(kTaggedBase), header_size(0), type(Type::None()), machine_type(MachineType::None()), - write_barrier_kind(kFullWriteBarrier), - load_sensitivity(LoadSensitivity::kUnsafe) {} + write_barrier_kind(kFullWriteBarrier) {} ElementAccess(BaseTaggedness base_is_tagged, int header_size, Type type, - MachineType machine_type, WriteBarrierKind write_barrier_kind, - LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe) + MachineType machine_type, WriteBarrierKind write_barrier_kind) : base_is_tagged(base_is_tagged), header_size(header_size), type(type), machine_type(machine_type), - write_barrier_kind(write_barrier_kind), - load_sensitivity(load_sensitivity) {} + write_barrier_kind(write_barrier_kind) {} int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; } }; @@ -926,7 +914,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final const Operator* TruncateTaggedToBit(); const Operator* TruncateTaggedPointerToBit(); - const Operator* PoisonIndex(); const Operator* CompareMaps(ZoneHandleSet); const Operator* MapGuard(ZoneHandleSet maps); diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc index 529f1cc7bba2eb..3aaf5d2c8d440b 100644 --- a/deps/v8/src/compiler/typer.cc +++ b/deps/v8/src/compiler/typer.cc @@ -2065,10 +2065,6 @@ Type Typer::Visitor::TypeStringLength(Node* node) { Type Typer::Visitor::TypeStringSubstring(Node* node) { return Type::String(); } -Type Typer::Visitor::TypePoisonIndex(Node* node) { - return Type::Union(Operand(node, 0), typer_->cache_->kSingletonZero, zone()); -} - Type Typer::Visitor::TypeCheckBounds(Node* node) { return typer_->operation_typer_.CheckBounds(Operand(node, 0), Operand(node, 1)); diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc index f33edaa6c0d4aa..a0f2aa569dc985 100644 --- a/deps/v8/src/compiler/verifier.cc +++ b/deps/v8/src/compiler/verifier.cc @@ -1422,10 +1422,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { CheckValueInputIs(node, 1, TypeCache::Get()->kPositiveSafeInteger); CheckTypeIs(node, TypeCache::Get()->kPositiveSafeInteger); break; - case IrOpcode::kPoisonIndex: - CheckValueInputIs(node, 0, Type::Unsigned32()); - CheckTypeIs(node, Type::Unsigned32()); - break; case IrOpcode::kCheckClosure: // Any -> Function CheckValueInputIs(node, 0, Type::Any()); @@ -1641,7 +1637,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { // ----------------------- case IrOpcode::kLoad: case IrOpcode::kLoadImmutable: - case IrOpcode::kPoisonedLoad: case IrOpcode::kProtectedLoad: case IrOpcode::kProtectedStore: case IrOpcode::kStore: @@ -1817,9 +1812,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kWord32PairShl: case IrOpcode::kWord32PairShr: case IrOpcode::kWord32PairSar: - case IrOpcode::kTaggedPoisonOnSpeculation: - case IrOpcode::kWord32PoisonOnSpeculation: - case IrOpcode::kWord64PoisonOnSpeculation: case IrOpcode::kLoadStackCheckOffset: case IrOpcode::kLoadFramePointer: case IrOpcode::kLoadParentFramePointer: diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc index f91c21fd1d4cb0..f2fe3c5e619a64 100644 --- a/deps/v8/src/compiler/wasm-compiler.cc +++ b/deps/v8/src/compiler/wasm-compiler.cc @@ -472,7 +472,6 @@ WasmGraphBuilder::WasmGraphBuilder( mcgraph_(mcgraph), env_(env), has_simd_(ContainsSimd(sig)), - untrusted_code_mitigations_(FLAG_untrusted_code_mitigations), sig_(sig), source_position_table_(source_position_table), isolate_(isolate) { @@ -2901,13 +2900,13 @@ Node* WasmGraphBuilder::BuildCallNode(const wasm::FunctionSig* sig, return call; } -Node* WasmGraphBuilder::BuildWasmCall( - const wasm::FunctionSig* sig, base::Vector args, - base::Vector rets, wasm::WasmCodePosition position, - Node* instance_node, UseRetpoline use_retpoline, Node* frame_state) { - CallDescriptor* call_descriptor = - GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline, - kWasmFunction, frame_state != nullptr); +Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig, + base::Vector args, + base::Vector rets, + wasm::WasmCodePosition position, + Node* instance_node, Node* frame_state) { + CallDescriptor* call_descriptor = GetWasmCallDescriptor( + mcgraph()->zone(), sig, kWasmFunction, frame_state != nullptr); const Operator* op = mcgraph()->common()->Call(call_descriptor); Node* call = BuildCallNode(sig, args, position, instance_node, op, frame_state); @@ -2935,10 +2934,9 @@ Node* WasmGraphBuilder::BuildWasmCall( Node* WasmGraphBuilder::BuildWasmReturnCall(const wasm::FunctionSig* sig, base::Vector args, wasm::WasmCodePosition position, - Node* instance_node, - UseRetpoline use_retpoline) { + Node* instance_node) { CallDescriptor* call_descriptor = - GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline); + GetWasmCallDescriptor(mcgraph()->zone(), sig); const Operator* op = mcgraph()->common()->TailCall(call_descriptor); Node* call = BuildCallNode(sig, args, position, instance_node, op); @@ -2982,15 +2980,13 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig, Node* target_node = gasm_->LoadFromObject( MachineType::Pointer(), imported_targets, func_index_times_pointersize); args[0] = target_node; - const UseRetpoline use_retpoline = - untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline; switch (continuation) { case kCallContinues: - return BuildWasmCall(sig, args, rets, position, ref_node, use_retpoline); + return BuildWasmCall(sig, args, rets, position, ref_node); case kReturnCall: DCHECK(rets.empty()); - return BuildWasmReturnCall(sig, args, position, ref_node, use_retpoline); + return BuildWasmReturnCall(sig, args, position, ref_node); } } @@ -3010,7 +3006,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, base::Vector args, Address code = static_cast
(index); args[0] = mcgraph()->RelocatableIntPtrConstant(code, RelocInfo::WASM_CALL); - return BuildWasmCall(sig, args, rets, position, nullptr, kNoRetpoline); + return BuildWasmCall(sig, args, rets, position, nullptr); } Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index, @@ -3095,16 +3091,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, Node* in_bounds = gasm_->Uint32LessThan(key, ift_size); TrapIfFalse(wasm::kTrapTableOutOfBounds, in_bounds, position); - // Mask the key to prevent SSCA. - if (untrusted_code_mitigations_) { - // mask = ((key - size) & ~key) >> 31 - Node* neg_key = gasm_->Word32Xor(key, Int32Constant(-1)); - Node* masked_diff = - gasm_->Word32And(gasm_->Int32Sub(key, ift_size), neg_key); - Node* mask = gasm_->Word32Sar(masked_diff, Int32Constant(31)); - key = gasm_->Word32And(key, mask); - } - const wasm::ValueType table_type = env_->module->tables[table_index].type; // Check that the table entry is not null and that the type of the function is // **identical with** the function type declared at the call site (no @@ -3140,16 +3126,12 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index, intptr_scaled_key); args[0] = target; - const UseRetpoline use_retpoline = - untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline; switch (continuation) { case kCallContinues: - return BuildWasmCall(sig, args, rets, position, target_instance, - use_retpoline); + return BuildWasmCall(sig, args, rets, position, target_instance); case kReturnCall: - return BuildWasmReturnCall(sig, args, position, target_instance, - use_retpoline); + return BuildWasmReturnCall(sig, args, position, target_instance); } } @@ -3244,14 +3226,9 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index, args[0] = end_label.PhiAt(0); - const UseRetpoline use_retpoline = - untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline; - Node* call = continuation == kCallContinues - ? BuildWasmCall(sig, args, rets, position, instance_node, - use_retpoline) - : BuildWasmReturnCall(sig, args, position, instance_node, - use_retpoline); + ? BuildWasmCall(sig, args, rets, position, instance_node) + : BuildWasmReturnCall(sig, args, position, instance_node); return call; } @@ -3287,7 +3264,7 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, base::Vector args, Address code = static_cast
(index); args[0] = mcgraph()->RelocatableIntPtrConstant(code, RelocInfo::WASM_CALL); - return BuildWasmReturnCall(sig, args, position, nullptr, kNoRetpoline); + return BuildWasmReturnCall(sig, args, position, nullptr); } Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index, @@ -3416,15 +3393,6 @@ void WasmGraphBuilder::InitInstanceCache( // Load the memory size. instance_cache->mem_size = LOAD_MUTABLE_INSTANCE_FIELD(MemorySize, MachineType::UintPtr()); - - if (untrusted_code_mitigations_) { - // Load the memory mask. - instance_cache->mem_mask = - LOAD_INSTANCE_FIELD(MemoryMask, MachineType::UintPtr()); - } else { - // Explicitly set to nullptr to ensure a SEGV when we try to use it. - instance_cache->mem_mask = nullptr; - } } void WasmGraphBuilder::PrepareInstanceCacheForLoop( @@ -3435,10 +3403,6 @@ void WasmGraphBuilder::PrepareInstanceCacheForLoop( INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation()); INTRODUCE_PHI(mem_size, MachineType::PointerRepresentation()); - if (untrusted_code_mitigations_) { - INTRODUCE_PHI(mem_mask, MachineType::PointerRepresentation()); - } - #undef INTRODUCE_PHI } @@ -3453,10 +3417,6 @@ void WasmGraphBuilder::NewInstanceCacheMerge(WasmInstanceCacheNodes* to, INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation()); INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32); - if (untrusted_code_mitigations_) { - INTRODUCE_PHI(mem_mask, MachineRepresentation::kWord32); - } - #undef INTRODUCE_PHI } @@ -3467,10 +3427,6 @@ void WasmGraphBuilder::MergeInstanceCacheInto(WasmInstanceCacheNodes* to, merge, to->mem_size, from->mem_size); to->mem_start = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(), merge, to->mem_start, from->mem_start); - if (untrusted_code_mitigations_) { - to->mem_mask = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(), - merge, to->mem_mask, from->mem_mask); - } } Node* WasmGraphBuilder::CreateOrMergeIntoPhi(MachineRepresentation rep, @@ -3839,13 +3795,6 @@ WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index, // Introduce the actual bounds check. Node* cond = gasm_->UintLessThan(index, effective_size); TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position); - - if (untrusted_code_mitigations_) { - // In the fallthrough case, condition the index with the memory mask. - Node* mem_mask = instance_cache_->mem_mask; - DCHECK_NOT_NULL(mem_mask); - index = gasm_->WordAnd(index, mem_mask); - } return {index, kDynamicallyChecked}; } @@ -4345,13 +4294,6 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) { gasm_->UintLessThan(index, mem_size), BranchHint::kTrue); bounds_check.Chain(control()); - if (untrusted_code_mitigations_) { - // Condition the index with the memory mask. - Node* mem_mask = instance_cache_->mem_mask; - DCHECK_NOT_NULL(mem_mask); - index = gasm_->WordAnd(index, mem_mask); - } - Node* load = graph()->NewNode(mcgraph()->machine()->Load(type), mem_start, index, effect(), bounds_check.if_true); SetEffectControl(bounds_check.EffectPhi(load, effect()), bounds_check.merge); @@ -4396,13 +4338,6 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index, BranchHint::kTrue); bounds_check.Chain(control()); - if (untrusted_code_mitigations_) { - // Condition the index with the memory mask. - Node* mem_mask = instance_cache_->mem_mask; - DCHECK_NOT_NULL(mem_mask); - index = gasm_->Word32And(index, mem_mask); - } - index = BuildChangeUint32ToUintPtr(index); const Operator* store_op = mcgraph()->machine()->Store(StoreRepresentation( type.representation(), WriteBarrierKind::kNoWriteBarrier)); @@ -6659,8 +6594,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder { // The (cached) call target is the jump table slot for that function. args[0] = BuildLoadCallTargetFromExportedFunctionData(function_data); BuildWasmCall(sig_, base::VectorOf(args), base::VectorOf(rets), - wasm::kNoCodePosition, nullptr, kNoRetpoline, - frame_state); + wasm::kNoCodePosition, nullptr, frame_state); } } @@ -7623,8 +7557,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper( // Schedule and compile to machine code. CallDescriptor* incoming = - GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline, - WasmCallKind::kWasmImportWrapper); + GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmImportWrapper); if (machine->Is32()) { incoming = GetI32WasmCallDescriptor(&zone, incoming); } @@ -7665,8 +7598,7 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module, // Run the compiler pipeline to generate machine code. CallDescriptor* call_descriptor = - GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline, - WasmCallKind::kWasmCapiFunction); + GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmCapiFunction); if (mcgraph->machine()->Is32()) { call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor); } @@ -7716,8 +7648,7 @@ MaybeHandle CompileWasmToJSWrapper(Isolate* isolate, // Generate the call descriptor. CallDescriptor* incoming = - GetWasmCallDescriptor(zone.get(), sig, WasmGraphBuilder::kNoRetpoline, - WasmCallKind::kWasmImportWrapper); + GetWasmCallDescriptor(zone.get(), sig, WasmCallKind::kWasmImportWrapper); // Run the compilation job synchronously. std::unique_ptr job( @@ -7997,10 +7928,9 @@ class LinkageLocationAllocator { } // namespace // General code uses the above configuration data. -CallDescriptor* GetWasmCallDescriptor( - Zone* zone, const wasm::FunctionSig* fsig, - WasmGraphBuilder::UseRetpoline use_retpoline, WasmCallKind call_kind, - bool need_frame_state) { +CallDescriptor* GetWasmCallDescriptor(Zone* zone, const wasm::FunctionSig* fsig, + WasmCallKind call_kind, + bool need_frame_state) { // The extra here is to accomodate the instance object as first parameter // and, when specified, the additional callable. bool extra_callable_param = @@ -8078,10 +8008,9 @@ CallDescriptor* GetWasmCallDescriptor( descriptor_kind = CallDescriptor::kCallWasmCapiFunction; } - CallDescriptor::Flags flags = - use_retpoline ? CallDescriptor::kRetpoline - : need_frame_state ? CallDescriptor::kNeedsFrameState - : CallDescriptor::kNoFlags; + CallDescriptor::Flags flags = need_frame_state + ? CallDescriptor::kNeedsFrameState + : CallDescriptor::kNoFlags; return zone->New( // -- descriptor_kind, // kind target_type, // target MachineType diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h index 71e3111c8c6a4f..4cbf6baa67a23a 100644 --- a/deps/v8/src/compiler/wasm-compiler.h +++ b/deps/v8/src/compiler/wasm-compiler.h @@ -176,7 +176,6 @@ class JSWasmCallData { struct WasmInstanceCacheNodes { Node* mem_start; Node* mem_size; - Node* mem_mask; }; struct WasmLoopInfo { @@ -207,10 +206,6 @@ class WasmGraphBuilder { kNeedsBoundsCheck = true, kCanOmitBoundsCheck = false }; - enum UseRetpoline : bool { // -- - kRetpoline = true, - kNoRetpoline = false - }; enum CheckForNull : bool { // -- kWithNullCheck = true, kWithoutNullCheck = false @@ -576,12 +571,11 @@ class WasmGraphBuilder { IsReturnCall continuation); Node* BuildWasmCall(const wasm::FunctionSig* sig, base::Vector args, base::Vector rets, wasm::WasmCodePosition position, - Node* instance_node, UseRetpoline use_retpoline, - Node* frame_state = nullptr); + Node* instance_node, Node* frame_state = nullptr); Node* BuildWasmReturnCall(const wasm::FunctionSig* sig, base::Vector args, wasm::WasmCodePosition position, - Node* instance_node, UseRetpoline use_retpoline); + Node* instance_node); Node* BuildImportCall(const wasm::FunctionSig* sig, base::Vector args, base::Vector rets, wasm::WasmCodePosition position, int func_index, @@ -765,7 +759,6 @@ class WasmGraphBuilder { bool use_js_isolate_and_params() const { return isolate_ != nullptr; } bool has_simd_ = false; bool needs_stack_check_ = false; - const bool untrusted_code_mitigations_ = true; const wasm::FunctionSig* const sig_; @@ -791,8 +784,6 @@ V8_EXPORT_PRIVATE void BuildInlinedJSToWasmWrapper( V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor( Zone* zone, const wasm::FunctionSig* signature, - WasmGraphBuilder::UseRetpoline use_retpoline = - WasmGraphBuilder::kNoRetpoline, WasmCallKind kind = kWasmFunction, bool need_frame_state = false); V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor( diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc index b18b9ee8ca6cfa..0d18118101d82a 100644 --- a/deps/v8/src/diagnostics/objects-printer.cc +++ b/deps/v8/src/diagnostics/objects-printer.cc @@ -1984,7 +1984,6 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) { } os << "\n - memory_start: " << static_cast(memory_start()); os << "\n - memory_size: " << memory_size(); - os << "\n - memory_mask: " << AsHex(memory_mask()); os << "\n - imported_function_targets: " << static_cast(imported_function_targets()); os << "\n - globals_start: " << static_cast(globals_start()); diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc index 7ee5f40d3d927c..a95a485d3f379c 100644 --- a/deps/v8/src/execution/isolate.cc +++ b/deps/v8/src/execution/isolate.cc @@ -151,26 +151,6 @@ uint32_t DefaultEmbeddedBlobDataSize() { return v8_Default_embedded_blob_data_size_; } -#ifdef V8_MULTI_SNAPSHOTS -extern "C" const uint8_t* v8_Trusted_embedded_blob_code_; -extern "C" uint32_t v8_Trusted_embedded_blob_code_size_; -extern "C" const uint8_t* v8_Trusted_embedded_blob_data_; -extern "C" uint32_t v8_Trusted_embedded_blob_data_size_; - -const uint8_t* TrustedEmbeddedBlobCode() { - return v8_Trusted_embedded_blob_code_; -} -uint32_t TrustedEmbeddedBlobCodeSize() { - return v8_Trusted_embedded_blob_code_size_; -} -const uint8_t* TrustedEmbeddedBlobData() { - return v8_Trusted_embedded_blob_data_; -} -uint32_t TrustedEmbeddedBlobDataSize() { - return v8_Trusted_embedded_blob_data_size_; -} -#endif - namespace { // These variables provide access to the current embedded blob without requiring // an isolate instance. This is needed e.g. by Code::InstructionStart, which may @@ -282,9 +262,6 @@ bool Isolate::CurrentEmbeddedBlobIsBinaryEmbedded() { const uint8_t* code = current_embedded_blob_code_.load(std::memory_order::memory_order_relaxed); if (code == nullptr) return false; -#ifdef V8_MULTI_SNAPSHOTS - if (code == TrustedEmbeddedBlobCode()) return true; -#endif return code == DefaultEmbeddedBlobCode(); } @@ -3407,15 +3384,6 @@ void Isolate::InitializeDefaultEmbeddedBlob() { const uint8_t* data = DefaultEmbeddedBlobData(); uint32_t data_size = DefaultEmbeddedBlobDataSize(); -#ifdef V8_MULTI_SNAPSHOTS - if (!FLAG_untrusted_code_mitigations) { - code = TrustedEmbeddedBlobCode(); - code_size = TrustedEmbeddedBlobCodeSize(); - data = TrustedEmbeddedBlobData(); - data_size = TrustedEmbeddedBlobDataSize(); - } -#endif - if (StickyEmbeddedBlobCode() != nullptr) { base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer()); // Check again now that we hold the lock. diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h index 5daf17566f8087..d8e1dd6b9c133a 100644 --- a/deps/v8/src/flags/flag-definitions.h +++ b/deps/v8/src/flags/flag-definitions.h @@ -882,15 +882,6 @@ DEFINE_BOOL(optimize_for_size, false, "speed") DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1) -#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS -#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS false -#else -#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS true -#endif -DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS, - "Enable mitigations for executing untrusted code") -#undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS - // Flags for WebAssembly. #if V8_ENABLE_WEBASSEMBLY diff --git a/deps/v8/src/init/startup-data-util.cc b/deps/v8/src/init/startup-data-util.cc index d480e3dcc2b0d5..fc880631cdedcd 100644 --- a/deps/v8/src/init/startup-data-util.cc +++ b/deps/v8/src/init/startup-data-util.cc @@ -76,11 +76,6 @@ void LoadFromFile(const char* snapshot_blob) { void InitializeExternalStartupData(const char* directory_path) { #ifdef V8_USE_EXTERNAL_STARTUP_DATA const char* snapshot_name = "snapshot_blob.bin"; -#ifdef V8_MULTI_SNAPSHOTS - if (!FLAG_untrusted_code_mitigations) { - snapshot_name = "snapshot_blob_trusted.bin"; - } -#endif std::unique_ptr snapshot = base::RelativePath(directory_path, snapshot_name); LoadFromFile(snapshot.get()); diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc index c6d6e44a2f08c9..090a77b34637a0 100644 --- a/deps/v8/src/interpreter/interpreter-assembler.cc +++ b/deps/v8/src/interpreter/interpreter-assembler.cc @@ -157,7 +157,7 @@ TNode InterpreterAssembler::GetAccumulator() { DCHECK(Bytecodes::ReadsAccumulator(bytecode_)); implicit_register_use_ = implicit_register_use_ | ImplicitRegisterUse::kReadAccumulator; - return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked()); + return GetAccumulatorUnchecked(); } void InterpreterAssembler::SetAccumulator(TNode value) { @@ -204,8 +204,8 @@ TNode InterpreterAssembler::GetContextAtDepth(TNode context, TNode InterpreterAssembler::RegisterLocation( TNode reg_index) { - return Signed(WordPoisonOnSpeculation( - IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)))); + return Signed( + IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index))); } TNode InterpreterAssembler::RegisterLocation(Register reg) { @@ -218,8 +218,7 @@ TNode InterpreterAssembler::RegisterFrameOffset(TNode index) { TNode InterpreterAssembler::LoadRegister(TNode reg_index) { return LoadFullTagged(GetInterpretedFramePointer(), - RegisterFrameOffset(reg_index), - LoadSensitivity::kCritical); + RegisterFrameOffset(reg_index)); } TNode InterpreterAssembler::LoadRegister(Register reg) { @@ -242,16 +241,14 @@ TNode InterpreterAssembler::LoadAndUntagRegister(Register reg) { TNode InterpreterAssembler::LoadRegisterAtOperandIndex( int operand_index) { - return LoadRegister( - BytecodeOperandReg(operand_index, LoadSensitivity::kSafe)); + return LoadRegister(BytecodeOperandReg(operand_index)); } std::pair, TNode> InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) { DCHECK_EQ(OperandType::kRegPair, Bytecodes::GetOperandType(bytecode_, operand_index)); - TNode first_reg_index = - BytecodeOperandReg(operand_index, LoadSensitivity::kSafe); + TNode first_reg_index = BytecodeOperandReg(operand_index); TNode second_reg_index = NextRegister(first_reg_index); return std::make_pair(LoadRegister(first_reg_index), LoadRegister(second_reg_index)); @@ -263,8 +260,7 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) { Bytecodes::GetOperandType(bytecode_, operand_index))); DCHECK_EQ(OperandType::kRegCount, Bytecodes::GetOperandType(bytecode_, operand_index + 1)); - TNode base_reg = RegisterLocation( - BytecodeOperandReg(operand_index, LoadSensitivity::kSafe)); + TNode base_reg = RegisterLocation(BytecodeOperandReg(operand_index)); TNode reg_count = BytecodeOperandCount(operand_index + 1); return RegListNodePair(base_reg, reg_count); } @@ -272,7 +268,6 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) { TNode InterpreterAssembler::LoadRegisterFromRegisterList( const RegListNodePair& reg_list, int index) { TNode location = RegisterLocationInRegisterList(reg_list, index); - // Location is already poisoned on speculation, so no need to poison here. return LoadFullTagged(location); } @@ -329,8 +324,7 @@ void InterpreterAssembler::StoreRegisterForShortStar(TNode value, void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode value, int operand_index) { - StoreRegister(value, - BytecodeOperandReg(operand_index, LoadSensitivity::kSafe)); + StoreRegister(value, BytecodeOperandReg(operand_index)); } void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode value1, @@ -338,8 +332,7 @@ void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode value1, int operand_index) { DCHECK_EQ(OperandType::kRegOutPair, Bytecodes::GetOperandType(bytecode_, operand_index)); - TNode first_reg_index = - BytecodeOperandReg(operand_index, LoadSensitivity::kSafe); + TNode first_reg_index = BytecodeOperandReg(operand_index); StoreRegister(value1, first_reg_index); TNode second_reg_index = NextRegister(first_reg_index); StoreRegister(value2, second_reg_index); @@ -350,8 +343,7 @@ void InterpreterAssembler::StoreRegisterTripleAtOperandIndex( int operand_index) { DCHECK_EQ(OperandType::kRegOutTriple, Bytecodes::GetOperandType(bytecode_, operand_index)); - TNode first_reg_index = - BytecodeOperandReg(operand_index, LoadSensitivity::kSafe); + TNode first_reg_index = BytecodeOperandReg(operand_index); StoreRegister(value1, first_reg_index); TNode second_reg_index = NextRegister(first_reg_index); StoreRegister(value2, second_reg_index); @@ -370,30 +362,27 @@ TNode InterpreterAssembler::OperandOffset(int operand_index) { } TNode InterpreterAssembler::BytecodeOperandUnsignedByte( - int operand_index, LoadSensitivity needs_poisoning) { + int operand_index) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); TNode operand_offset = OperandOffset(operand_index); return Load(BytecodeArrayTaggedPointer(), - IntPtrAdd(BytecodeOffset(), operand_offset), - needs_poisoning); + IntPtrAdd(BytecodeOffset(), operand_offset)); } TNode InterpreterAssembler::BytecodeOperandSignedByte( - int operand_index, LoadSensitivity needs_poisoning) { + int operand_index) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); TNode operand_offset = OperandOffset(operand_index); return Load(BytecodeArrayTaggedPointer(), - IntPtrAdd(BytecodeOffset(), operand_offset), - needs_poisoning); + IntPtrAdd(BytecodeOffset(), operand_offset)); } TNode InterpreterAssembler::BytecodeOperandReadUnaligned( - int relative_offset, MachineType result_type, - LoadSensitivity needs_poisoning) { + int relative_offset, MachineType result_type) { static const int kMaxCount = 4; DCHECK(!TargetSupportsUnalignedAccess()); @@ -430,9 +419,8 @@ TNode InterpreterAssembler::BytecodeOperandReadUnaligned( TNode offset = IntPtrConstant(relative_offset + msb_offset + i * kStep); TNode array_offset = IntPtrAdd(BytecodeOffset(), offset); - bytes[i] = - UncheckedCast(Load(machine_type, BytecodeArrayTaggedPointer(), - array_offset, needs_poisoning)); + bytes[i] = UncheckedCast( + Load(machine_type, BytecodeArrayTaggedPointer(), array_offset)); } // Pack LSB to MSB. @@ -446,7 +434,7 @@ TNode InterpreterAssembler::BytecodeOperandReadUnaligned( } TNode InterpreterAssembler::BytecodeOperandUnsignedShort( - int operand_index, LoadSensitivity needs_poisoning) { + int operand_index) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ( OperandSize::kShort, @@ -456,16 +444,15 @@ TNode InterpreterAssembler::BytecodeOperandUnsignedShort( if (TargetSupportsUnalignedAccess()) { return Load( BytecodeArrayTaggedPointer(), - IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)), - needs_poisoning); + IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); } else { - return UncheckedCast(BytecodeOperandReadUnaligned( - operand_offset, MachineType::Uint16(), needs_poisoning)); + return UncheckedCast( + BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16())); } } TNode InterpreterAssembler::BytecodeOperandSignedShort( - int operand_index, LoadSensitivity needs_poisoning) { + int operand_index) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ( OperandSize::kShort, @@ -475,16 +462,15 @@ TNode InterpreterAssembler::BytecodeOperandSignedShort( if (TargetSupportsUnalignedAccess()) { return Load( BytecodeArrayTaggedPointer(), - IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)), - needs_poisoning); + IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); } else { - return UncheckedCast(BytecodeOperandReadUnaligned( - operand_offset, MachineType::Int16(), needs_poisoning)); + return UncheckedCast( + BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16())); } } TNode InterpreterAssembler::BytecodeOperandUnsignedQuad( - int operand_index, LoadSensitivity needs_poisoning) { + int operand_index) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); @@ -493,16 +479,15 @@ TNode InterpreterAssembler::BytecodeOperandUnsignedQuad( if (TargetSupportsUnalignedAccess()) { return Load( BytecodeArrayTaggedPointer(), - IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)), - needs_poisoning); + IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); } else { - return UncheckedCast(BytecodeOperandReadUnaligned( - operand_offset, MachineType::Uint32(), needs_poisoning)); + return UncheckedCast( + BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32())); } } TNode InterpreterAssembler::BytecodeOperandSignedQuad( - int operand_index, LoadSensitivity needs_poisoning) { + int operand_index) { DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_)); DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize( bytecode_, operand_index, operand_scale())); @@ -511,43 +496,40 @@ TNode InterpreterAssembler::BytecodeOperandSignedQuad( if (TargetSupportsUnalignedAccess()) { return Load( BytecodeArrayTaggedPointer(), - IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)), - needs_poisoning); + IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset))); } else { - return UncheckedCast(BytecodeOperandReadUnaligned( - operand_offset, MachineType::Int32(), needs_poisoning)); + return UncheckedCast( + BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32())); } } TNode InterpreterAssembler::BytecodeSignedOperand( - int operand_index, OperandSize operand_size, - LoadSensitivity needs_poisoning) { + int operand_index, OperandSize operand_size) { DCHECK(!Bytecodes::IsUnsignedOperandType( Bytecodes::GetOperandType(bytecode_, operand_index))); switch (operand_size) { case OperandSize::kByte: - return BytecodeOperandSignedByte(operand_index, needs_poisoning); + return BytecodeOperandSignedByte(operand_index); case OperandSize::kShort: - return BytecodeOperandSignedShort(operand_index, needs_poisoning); + return BytecodeOperandSignedShort(operand_index); case OperandSize::kQuad: - return BytecodeOperandSignedQuad(operand_index, needs_poisoning); + return BytecodeOperandSignedQuad(operand_index); case OperandSize::kNone: UNREACHABLE(); } } TNode InterpreterAssembler::BytecodeUnsignedOperand( - int operand_index, OperandSize operand_size, - LoadSensitivity needs_poisoning) { + int operand_index, OperandSize operand_size) { DCHECK(Bytecodes::IsUnsignedOperandType( Bytecodes::GetOperandType(bytecode_, operand_index))); switch (operand_size) { case OperandSize::kByte: - return BytecodeOperandUnsignedByte(operand_index, needs_poisoning); + return BytecodeOperandUnsignedByte(operand_index); case OperandSize::kShort: - return BytecodeOperandUnsignedShort(operand_index, needs_poisoning); + return BytecodeOperandUnsignedShort(operand_index); case OperandSize::kQuad: - return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning); + return BytecodeOperandUnsignedQuad(operand_index); case OperandSize::kNone: UNREACHABLE(); } @@ -629,23 +611,22 @@ TNode InterpreterAssembler::BytecodeOperandIdxTaggedIndex( } TNode InterpreterAssembler::BytecodeOperandConstantPoolIdx( - int operand_index, LoadSensitivity needs_poisoning) { + int operand_index) { DCHECK_EQ(OperandType::kIdx, Bytecodes::GetOperandType(bytecode_, operand_index)); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); return ChangeUint32ToWord( - BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning)); + BytecodeUnsignedOperand(operand_index, operand_size)); } -TNode InterpreterAssembler::BytecodeOperandReg( - int operand_index, LoadSensitivity needs_poisoning) { +TNode InterpreterAssembler::BytecodeOperandReg(int operand_index) { DCHECK(Bytecodes::IsRegisterOperandType( Bytecodes::GetOperandType(bytecode_, operand_index))); OperandSize operand_size = Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale()); return ChangeInt32ToIntPtr( - BytecodeSignedOperand(operand_index, operand_size, needs_poisoning)); + BytecodeSignedOperand(operand_index, operand_size)); } TNode InterpreterAssembler::BytecodeOperandRuntimeId( @@ -682,8 +663,7 @@ TNode InterpreterAssembler::LoadConstantPoolEntry(TNode index) { TNode constant_pool = CAST(LoadObjectField( BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset)); return UnsafeLoadFixedArrayElement(constant_pool, - UncheckedCast(index), 0, - LoadSensitivity::kCritical); + UncheckedCast(index), 0); } TNode InterpreterAssembler::LoadAndUntagConstantPoolEntry( @@ -693,8 +673,7 @@ TNode InterpreterAssembler::LoadAndUntagConstantPoolEntry( TNode InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex( int operand_index) { - TNode index = - BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe); + TNode index = BytecodeOperandConstantPoolIdx(operand_index); return LoadConstantPoolEntry(index); } @@ -1224,13 +1203,9 @@ void InterpreterAssembler::DispatchToBytecode( void InterpreterAssembler::DispatchToBytecodeHandlerEntry( TNode handler_entry, TNode bytecode_offset) { - // Propagate speculation poisoning. - TNode poisoned_handler_entry = - UncheckedCast(WordPoisonOnSpeculation(handler_entry)); - TailCallBytecodeDispatch(InterpreterDispatchDescriptor{}, - poisoned_handler_entry, GetAccumulatorUnchecked(), - bytecode_offset, BytecodeArrayTaggedPointer(), - DispatchTablePointer()); + TailCallBytecodeDispatch( + InterpreterDispatchDescriptor{}, handler_entry, GetAccumulatorUnchecked(), + bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTablePointer()); } void InterpreterAssembler::DispatchWide(OperandScale operand_scale) { diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h index bf4641200bbd12..d89c05e2d389a4 100644 --- a/deps/v8/src/interpreter/interpreter-assembler.h +++ b/deps/v8/src/interpreter/interpreter-assembler.h @@ -308,51 +308,32 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { // The |result_type| determines the size and signedness. of the // value read. This method should only be used on architectures that // do not support unaligned memory accesses. - TNode BytecodeOperandReadUnaligned( - int relative_offset, MachineType result_type, - LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); + TNode BytecodeOperandReadUnaligned(int relative_offset, + MachineType result_type); // Returns zero- or sign-extended to word32 value of the operand. - TNode BytecodeOperandUnsignedByte( - int operand_index, - LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); - TNode BytecodeOperandSignedByte( - int operand_index, - LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); - TNode BytecodeOperandUnsignedShort( - int operand_index, - LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); - TNode BytecodeOperandSignedShort( - int operand_index, - LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); - TNode BytecodeOperandUnsignedQuad( - int operand_index, - LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); - TNode BytecodeOperandSignedQuad( - int operand_index, - LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); + TNode BytecodeOperandUnsignedByte(int operand_index); + TNode BytecodeOperandSignedByte(int operand_index); + TNode BytecodeOperandUnsignedShort(int operand_index); + TNode BytecodeOperandSignedShort(int operand_index); + TNode BytecodeOperandUnsignedQuad(int operand_index); + TNode BytecodeOperandSignedQuad(int operand_index); // Returns zero- or sign-extended to word32 value of the operand of // given size. - TNode BytecodeSignedOperand( - int operand_index, OperandSize operand_size, - LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); - TNode BytecodeUnsignedOperand( - int operand_index, OperandSize operand_size, - LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); + TNode BytecodeSignedOperand(int operand_index, + OperandSize operand_size); + TNode BytecodeUnsignedOperand(int operand_index, + OperandSize operand_size); // Returns the word-size sign-extended register index for bytecode operand - // |operand_index| in the current bytecode. Value is not poisoned on - // speculation since the value loaded from the register is poisoned instead. - TNode BytecodeOperandReg( - int operand_index, - LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); + // |operand_index| in the current bytecode. + TNode BytecodeOperandReg(int operand_index); // Returns the word zero-extended index immediate for bytecode operand - // |operand_index| in the current bytecode for use when loading a . - TNode BytecodeOperandConstantPoolIdx( - int operand_index, - LoadSensitivity needs_poisoning = LoadSensitivity::kCritical); + // |operand_index| in the current bytecode for use when loading a constant + // pool element. + TNode BytecodeOperandConstantPoolIdx(int operand_index); // Jump relative to the current bytecode by the |jump_offset|. If |backward|, // then jump backward (subtract the offset), otherwise jump forward (add the diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc index e010ab2f640607..5508c07e3f4708 100644 --- a/deps/v8/src/interpreter/interpreter-generator.cc +++ b/deps/v8/src/interpreter/interpreter-generator.cc @@ -3074,9 +3074,6 @@ Handle GenerateBytecodeHandler(Isolate* isolate, const char* debug_name, compiler::CodeAssemblerState state( isolate, &zone, InterpreterDispatchDescriptor{}, CodeKind::BYTECODE_HANDLER, debug_name, - FLAG_untrusted_code_mitigations - ? PoisoningMitigationLevel::kPoisonCriticalOnly - : PoisoningMitigationLevel::kDontPoison, builtin); switch (bytecode) { diff --git a/deps/v8/src/snapshot/embedded/embedded-empty.cc b/deps/v8/src/snapshot/embedded/embedded-empty.cc index c32b459d9d7bfa..e5355215f252cc 100644 --- a/deps/v8/src/snapshot/embedded/embedded-empty.cc +++ b/deps/v8/src/snapshot/embedded/embedded-empty.cc @@ -17,15 +17,3 @@ const uint8_t* v8_Default_embedded_blob_code_ = nullptr; uint32_t v8_Default_embedded_blob_code_size_ = 0; const uint8_t* v8_Default_embedded_blob_data_ = nullptr; uint32_t v8_Default_embedded_blob_data_size_ = 0; - -#ifdef V8_MULTI_SNAPSHOTS -extern "C" const uint8_t* v8_Trusted_embedded_blob_code_; -extern "C" uint32_t v8_Trusted_embedded_blob_code_size_; -extern "C" const uint8_t* v8_Trusted_embedded_blob_data_; -extern "C" uint32_t v8_Trusted_embedded_blob_data_size_; - -const uint8_t* v8_Trusted_embedded_blob_code_ = nullptr; -uint32_t v8_Trusted_embedded_blob_code_size_ = 0; -const uint8_t* v8_Trusted_embedded_blob_data_ = nullptr; -uint32_t v8_Trusted_embedded_blob_data_size_ = 0; -#endif diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index bb2fed83c6566f..4a0d4a72552200 100644 --- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -4787,22 +4787,14 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig, // Since we have more cache registers than parameter registers, the // {LiftoffCompiler} should always be able to place {target} in a register. DCHECK(target.is_valid()); - if (FLAG_untrusted_code_mitigations) { - RetpolineCall(target); - } else { - call(target); - } + call(target); } void LiftoffAssembler::TailCallIndirect(Register target) { // Since we have more cache registers than parameter registers, the // {LiftoffCompiler} should always be able to place {target} in a register. DCHECK(target.is_valid()); - if (FLAG_untrusted_code_mitigations) { - RetpolineJump(target); - } else { - jmp(target); - } + jmp(target); } void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc index 92ee9efd67ae3a..c19a351b3c92a3 100644 --- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc +++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc @@ -2815,30 +2815,6 @@ class LiftoffCompiler { __ DeallocateStackSlot(sizeof(MemoryTracingInfo)); } - Register AddMemoryMasking(Register index, uintptr_t* offset, - LiftoffRegList* pinned) { - if (!FLAG_untrusted_code_mitigations || - env_->bounds_checks == kTrapHandler) { - return index; - } - CODE_COMMENT("mask memory index"); - // Make sure that we can overwrite {index}. - if (__ cache_state()->is_used(LiftoffRegister(index))) { - Register old_index = index; - pinned->clear(LiftoffRegister{old_index}); - index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp(); - if (index != old_index) { - __ Move(index, old_index, kPointerKind); - } - } - Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp(); - LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize, *pinned); - if (*offset) __ emit_ptrsize_addi(index, index, *offset); - __ emit_ptrsize_and(index, index, tmp); - *offset = 0; - return index; - } - bool IndexStaticallyInBounds(const LiftoffAssembler::VarState& index_slot, int access_size, uintptr_t* offset) { if (!index_slot.is_const()) return false; @@ -2899,7 +2875,6 @@ class LiftoffCompiler { CODE_COMMENT("load from memory"); LiftoffRegList pinned = LiftoffRegList::ForRegs(index); - index = AddMemoryMasking(index, &offset, &pinned); // Load the memory start address only now to reduce register pressure // (important on ia32). @@ -2944,7 +2919,6 @@ class LiftoffCompiler { uintptr_t offset = imm.offset; LiftoffRegList pinned = LiftoffRegList::ForRegs(index); - index = AddMemoryMasking(index, &offset, &pinned); CODE_COMMENT("load with transformation"); Register addr = GetMemoryStart(pinned); LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {}); @@ -2984,7 +2958,6 @@ class LiftoffCompiler { uintptr_t offset = imm.offset; pinned.set(index); - index = AddMemoryMasking(index, &offset, &pinned); CODE_COMMENT("load lane"); Register addr = GetMemoryStart(pinned); LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {}); @@ -3030,7 +3003,6 @@ class LiftoffCompiler { if (index == no_reg) return; pinned.set(index); - index = AddMemoryMasking(index, &offset, &pinned); CODE_COMMENT("store to memory"); uint32_t protected_store_pc = 0; // Load the memory start address only now to reduce register pressure @@ -3065,7 +3037,6 @@ class LiftoffCompiler { uintptr_t offset = imm.offset; pinned.set(index); - index = AddMemoryMasking(index, &offset, &pinned); CODE_COMMENT("store lane to memory"); Register addr = pinned.set(GetMemoryStart(pinned)); uint32_t protected_store_pc = 0; @@ -4347,7 +4318,6 @@ class LiftoffCompiler { pinned.set(index); AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); uintptr_t offset = imm.offset; - index = AddMemoryMasking(index, &offset, &pinned); CODE_COMMENT("atomic store to memory"); Register addr = pinned.set(GetMemoryStart(pinned)); LiftoffRegList outer_pinned; @@ -4370,7 +4340,6 @@ class LiftoffCompiler { LiftoffRegList pinned = LiftoffRegList::ForRegs(index); AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); uintptr_t offset = imm.offset; - index = AddMemoryMasking(index, &offset, &pinned); CODE_COMMENT("atomic load from memory"); Register addr = pinned.set(GetMemoryStart(pinned)); RegClass rc = reg_class_for(kind); @@ -4418,7 +4387,6 @@ class LiftoffCompiler { AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); uintptr_t offset = imm.offset; - index = AddMemoryMasking(index, &offset, &pinned); Register addr = pinned.set(GetMemoryStart(pinned)); (asm_.*emit_fn)(addr, index, offset, value, result, type); @@ -4441,7 +4409,6 @@ class LiftoffCompiler { AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); uintptr_t offset = imm.offset; - index = AddMemoryMasking(index, &offset, &pinned); Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned); __ emit_i32_add(addr, addr, index); @@ -4474,7 +4441,6 @@ class LiftoffCompiler { AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned); uintptr_t offset = imm.offset; - index = AddMemoryMasking(index, &offset, &pinned); Register addr = pinned.set(GetMemoryStart(pinned)); LiftoffRegister result = pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned)); @@ -4521,7 +4487,6 @@ class LiftoffCompiler { pinned); uintptr_t offset = imm.offset; - index_reg = AddMemoryMasking(index_reg, &offset, &pinned); Register index_plus_offset = __ cache_state()->is_used(LiftoffRegister(index_reg)) ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp() @@ -4538,8 +4503,7 @@ class LiftoffCompiler { __ cache_state()->stack_state.end()[-2]; LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3]; - // We have to set the correct register for the index. It may have changed - // above in {AddMemoryMasking}. + // We have to set the correct register for the index. index.MakeRegister(LiftoffRegister(index_plus_offset)); static constexpr WasmCode::RuntimeStubId kTargets[2][2]{ @@ -4569,7 +4533,6 @@ class LiftoffCompiler { AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned); uintptr_t offset = imm.offset; - index_reg = AddMemoryMasking(index_reg, &offset, &pinned); Register index_plus_offset = __ cache_state()->is_used(LiftoffRegister(index_reg)) ? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp() @@ -5785,28 +5748,6 @@ class LiftoffCompiler { __ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index, tmp_const); - // Mask the index to prevent SSCA. - if (FLAG_untrusted_code_mitigations) { - CODE_COMMENT("Mask indirect call index"); - // mask = ((index - size) & ~index) >> 31 - // Reuse allocated registers; note: size is still stored in {tmp_const}. - Register diff = table; - Register neg_index = tmp_const; - Register mask = scratch; - // 1) diff = index - size - __ emit_i32_sub(diff, index, tmp_const); - // 2) neg_index = ~index - __ LoadConstant(LiftoffRegister(neg_index), WasmValue(int32_t{-1})); - __ emit_i32_xor(neg_index, neg_index, index); - // 3) mask = diff & neg_index - __ emit_i32_and(mask, diff, neg_index); - // 4) mask = mask >> 31 - __ emit_i32_sari(mask, mask, 31); - - // Apply mask. - __ emit_i32_and(index, index, mask); - } - CODE_COMMENT("Check indirect call signature"); // Load the signature from {instance->ift_sig_ids[key]} if (imm.table_imm.index == 0) { diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h index d5cda7b3c482a7..daee2964c51eef 100644 --- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h +++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h @@ -4322,11 +4322,7 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig, popq(kScratchRegister); target = kScratchRegister; } - if (FLAG_untrusted_code_mitigations) { - RetpolineCall(target); - } else { - call(target); - } + call(target); } void LiftoffAssembler::TailCallIndirect(Register target) { @@ -4334,11 +4330,7 @@ void LiftoffAssembler::TailCallIndirect(Register target) { popq(kScratchRegister); target = kScratchRegister; } - if (FLAG_untrusted_code_mitigations) { - RetpolineJump(target); - } else { - jmp(target); - } + jmp(target); } void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc index 84f34cc0ed8d30..5e85cb0d27f0bc 100644 --- a/deps/v8/src/wasm/graph-builder-interface.cc +++ b/deps/v8/src/wasm/graph-builder-interface.cc @@ -1547,7 +1547,6 @@ class WasmGraphBuildingInterface { WRAP_CACHE_FIELD(mem_start); WRAP_CACHE_FIELD(mem_size); - WRAP_CACHE_FIELD(mem_mask); #undef WRAP_CACHE_FIELD } } diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc index 101d5638765538..a95a0d40cc8441 100644 --- a/deps/v8/src/wasm/wasm-external-refs.cc +++ b/deps/v8/src/wasm/wasm-external-refs.cc @@ -451,7 +451,6 @@ class V8_NODISCARD ThreadNotInWasmScope { #endif }; -#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) { return instance.memory_start() + index; } @@ -460,19 +459,6 @@ inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) { return base + index; } -#else -inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) { - // Compute the effective address of the access, making sure to condition - // the index even in the in-bounds case. - return instance.memory_start() + (index & instance.memory_mask()); -} - -inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) { - size_t mem_mask = base::bits::RoundUpToPowerOfTwo(size) - 1; - return base + (index & mem_mask); -} -#endif - template V ReadAndIncrementOffset(Address data, size_t* offset) { V result = ReadUnalignedValue(data + *offset); diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h index a75d83df027b25..00d075b83e1dd2 100644 --- a/deps/v8/src/wasm/wasm-objects-inl.h +++ b/deps/v8/src/wasm/wasm-objects-inl.h @@ -186,7 +186,6 @@ bool WasmGlobalObject::SetFuncRef(Isolate* isolate, Handle value) { // WasmInstanceObject PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset) PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, size_t, kMemorySizeOffset) -PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, size_t, kMemoryMaskOffset) PRIMITIVE_ACCESSORS(WasmInstanceObject, isolate_root, Address, kIsolateRootOffset) PRIMITIVE_ACCESSORS(WasmInstanceObject, stack_limit_address, Address, diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc index a6ff80f624231e..fa15ab24c21a50 100644 --- a/deps/v8/src/wasm/wasm-objects.cc +++ b/deps/v8/src/wasm/wasm-objects.cc @@ -1242,21 +1242,13 @@ bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize( void WasmInstanceObject::SetRawMemory(byte* mem_start, size_t mem_size) { CHECK_LE(mem_size, wasm::max_mem_bytes()); #if V8_HOST_ARCH_64_BIT - uint64_t mem_mask64 = base::bits::RoundUpToPowerOfTwo64(mem_size) - 1; set_memory_start(mem_start); set_memory_size(mem_size); - set_memory_mask(mem_mask64); #else // Must handle memory > 2GiB specially. CHECK_LE(mem_size, size_t{kMaxUInt32}); - uint32_t mem_mask32 = - (mem_size > 2 * size_t{GB}) - ? 0xFFFFFFFFu - : base::bits::RoundUpToPowerOfTwo32(static_cast(mem_size)) - - 1; set_memory_start(mem_start); set_memory_size(mem_size); - set_memory_mask(mem_mask32); #endif } diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h index 11d5c265ed5feb..f0795322bba03e 100644 --- a/deps/v8/src/wasm/wasm-objects.h +++ b/deps/v8/src/wasm/wasm-objects.h @@ -356,7 +356,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject { DECL_ACCESSORS(managed_object_maps, FixedArray) DECL_PRIMITIVE_ACCESSORS(memory_start, byte*) DECL_PRIMITIVE_ACCESSORS(memory_size, size_t) - DECL_PRIMITIVE_ACCESSORS(memory_mask, size_t) DECL_PRIMITIVE_ACCESSORS(isolate_root, Address) DECL_PRIMITIVE_ACCESSORS(stack_limit_address, Address) DECL_PRIMITIVE_ACCESSORS(real_stack_limit_address, Address) @@ -397,7 +396,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject { V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \ V(kMemoryStartOffset, kSystemPointerSize) \ V(kMemorySizeOffset, kSizetSize) \ - V(kMemoryMaskOffset, kSizetSize) \ V(kStackLimitAddressOffset, kSystemPointerSize) \ V(kImportedFunctionTargetsOffset, kSystemPointerSize) \ V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \ diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn index e7f011df74dcdf..2c84d21258f119 100644 --- a/deps/v8/test/cctest/BUILD.gn +++ b/deps/v8/test/cctest/BUILD.gn @@ -119,7 +119,6 @@ v8_source_set("cctest_sources") { "compiler/test-run-jsops.cc", "compiler/test-run-load-store.cc", "compiler/test-run-machops.cc", - "compiler/test-run-retpoline.cc", "compiler/test-run-stackcheck.cc", "compiler/test-run-tail-calls.cc", "compiler/test-run-unwinding-info.cc", @@ -307,7 +306,6 @@ v8_source_set("cctest_sources") { "test-assembler-arm.cc", "test-disasm-arm.cc", "test-macro-assembler-arm.cc", - "test-poison-disasm-arm.cc", "test-sync-primitives-arm.cc", ] } else if (v8_current_cpu == "arm64") { @@ -319,7 +317,6 @@ v8_source_set("cctest_sources") { "test-js-arm64-variables.cc", "test-macro-assembler-arm64.cc", "test-pointer-auth-arm64.cc", - "test-poison-disasm-arm64.cc", "test-sync-primitives-arm64.cc", "test-utils-arm64.cc", "test-utils-arm64.h", diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status index 9b369044754443..1401395f2ebadf 100644 --- a/deps/v8/test/cctest/cctest.status +++ b/deps/v8/test/cctest/cctest.status @@ -649,7 +649,6 @@ 'test-run-load-store/*': [SKIP], 'test-run-machops/*': [SKIP], 'test-run-native-calls/*': [SKIP], - 'test-run-retpoline/*': [SKIP], 'test-run-stackcheck/*': [SKIP], 'test-run-tail-calls/*': [SKIP], 'test-run-unwinding-info/*': [SKIP], diff --git a/deps/v8/test/cctest/compiler/code-assembler-tester.h b/deps/v8/test/cctest/compiler/code-assembler-tester.h index 6dc343fa08de93..d1e904a6a86b2b 100644 --- a/deps/v8/test/cctest/compiler/code-assembler-tester.h +++ b/deps/v8/test/cctest/compiler/code-assembler-tester.h @@ -24,7 +24,7 @@ class CodeAssemblerTester { : zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone), scope_(isolate), state_(isolate, &zone_, descriptor, CodeKind::FOR_TESTING, name, - PoisoningMitigationLevel::kDontPoison, Builtin::kNoBuiltinId) {} + Builtin::kNoBuiltinId) {} // Test generating code for a stub. Assumes VoidDescriptor call interface. explicit CodeAssemblerTester(Isolate* isolate, const char* name = "test") @@ -36,8 +36,7 @@ class CodeAssemblerTester { const char* name = "test") : zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone), scope_(isolate), - state_(isolate, &zone_, parameter_count, kind, name, - PoisoningMitigationLevel::kDontPoison) {} + state_(isolate, &zone_, parameter_count, kind, name) {} CodeAssemblerTester(Isolate* isolate, CodeKind kind, const char* name = "test") @@ -48,7 +47,7 @@ class CodeAssemblerTester { : zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone), scope_(isolate), state_(isolate, &zone_, call_descriptor, CodeKind::FOR_TESTING, name, - PoisoningMitigationLevel::kDontPoison, Builtin::kNoBuiltinId) {} + Builtin::kNoBuiltinId) {} CodeAssemblerState* state() { return &state_; } diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc index 997d7ade73fece..6df1820b3a50cd 100644 --- a/deps/v8/test/cctest/compiler/test-code-generator.cc +++ b/deps/v8/test/cctest/compiler/test-code-generator.cc @@ -1002,7 +1002,6 @@ class CodeGeneratorTester { environment->main_zone(), &frame_, &linkage_, environment->instructions(), &info_, environment->main_isolate(), base::Optional(), kNoSourcePosition, nullptr, - PoisoningMitigationLevel::kDontPoison, AssemblerOptions::Default(environment->main_isolate()), Builtin::kNoBuiltinId, kMaxUnoptimizedFrameHeight, kMaxPushedArgumentCount); @@ -1056,7 +1055,6 @@ class CodeGeneratorTester { AllocatedOperand(LocationOperand::REGISTER, MachineRepresentation::kTagged, kReturnRegister0.code()), - ImmediateOperand(ImmediateOperand::INLINE_INT32, -1), // poison index. ImmediateOperand(ImmediateOperand::INLINE_INT32, optional_padding_slot), ImmediateOperand(ImmediateOperand::INLINE_INT32, stack_slot_delta)}; Instruction* tail_call = @@ -1145,7 +1143,6 @@ class CodeGeneratorTester { AllocatedOperand(LocationOperand::REGISTER, MachineRepresentation::kTagged, kReturnRegister0.code()), - ImmediateOperand(ImmediateOperand::INLINE_INT32, -1), // poison index. ImmediateOperand(ImmediateOperand::INLINE_INT32, optional_padding_slot), ImmediateOperand(ImmediateOperand::INLINE_INT32, first_unused_stack_slot)}; diff --git a/deps/v8/test/common/wasm/wasm-interpreter.cc b/deps/v8/test/common/wasm/wasm-interpreter.cc index 84871cccb6319b..0d3364169f1c2f 100644 --- a/deps/v8/test/common/wasm/wasm-interpreter.cc +++ b/deps/v8/test/common/wasm/wasm-interpreter.cc @@ -1621,8 +1621,7 @@ class WasmInterpreterInternals { DCHECK_GE(instance_object_->memory_size(), index); // Compute the effective address of the access, making sure to condition // the index even in the in-bounds case. - return reinterpret_cast
(instance_object_->memory_start()) + - (index & instance_object_->memory_mask()); + return reinterpret_cast
(instance_object_->memory_start()) + index; } template diff --git a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc index 121190bdb8ca53..b53461b8b01289 100644 --- a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc +++ b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc @@ -21,10 +21,9 @@ namespace internal { CodeStubAssemblerTestState::CodeStubAssemblerTestState( CodeStubAssemblerTest* test) - : compiler::CodeAssemblerState( - test->isolate(), test->zone(), VoidDescriptor{}, - CodeKind::FOR_TESTING, "test", - PoisoningMitigationLevel::kPoisonCriticalOnly) {} + : compiler::CodeAssemblerState(test->isolate(), test->zone(), + VoidDescriptor{}, CodeKind::FOR_TESTING, + "test") {} TARGET_TEST_F(CodeStubAssemblerTest, SmiTag) { CodeStubAssemblerTestState state(this); diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc index f4e3ea07b1c28f..e52661fae260ef 100644 --- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc +++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc @@ -50,8 +50,7 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build( InstructionSelector::kEnableSwitchJumpTable, &tick_counter, nullptr, &max_unoptimized_frame_height, &max_pushed_argument_count, source_position_mode, features, InstructionSelector::kDisableScheduling, - InstructionSelector::kEnableRootsRelativeAddressing, - PoisoningMitigationLevel::kPoisonAll); + InstructionSelector::kEnableRootsRelativeAddressing); selector.SelectInstructions(); if (FLAG_trace_turbo) { StdoutStream{} << "=== Code sequence after instruction selection ===" @@ -452,7 +451,6 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) { EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode()); size_t num_operands = 1 + // Code object. - 1 + // Poison index 6 + // Frame state deopt id + one input for each value in frame state. 1 + // Function. 1; // Context. @@ -462,23 +460,23 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) { EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate()); // Deoptimization id. - int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(2)); + int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1)); FrameStateDescriptor* desc_before = s.GetFrameStateDescriptor(deopt_id_before); EXPECT_EQ(bailout_id_before, desc_before->bailout_id()); EXPECT_EQ(1u, desc_before->parameters_count()); EXPECT_EQ(1u, desc_before->locals_count()); EXPECT_EQ(1u, desc_before->stack_count()); - EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(4))); - EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(5))); // This should be a context. + EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(3))); + EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(4))); // This should be a context. // We inserted 0 here. - EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(6))); - EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(7))->IsUndefined(isolate())); + EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(5))); + EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(6))->IsUndefined(isolate())); // Function. - EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(8))); + EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(7))); // Context. - EXPECT_EQ(s.ToVreg(context), s.ToVreg(call_instr->InputAt(9))); + EXPECT_EQ(s.ToVreg(context), s.ToVreg(call_instr->InputAt(8))); EXPECT_EQ(kArchRet, s[index++]->arch_opcode()); @@ -559,7 +557,6 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) { EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode()); size_t num_operands = 1 + // Code object. - 1 + // Poison index. 1 + // Frame state deopt id 5 + // One input for each value in frame state + context. 5 + // One input for each value in the parent frame state + context. @@ -570,7 +567,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) { EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate()); // Deoptimization id. - int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(2)); + int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1)); FrameStateDescriptor* desc_before = s.GetFrameStateDescriptor(deopt_id_before); FrameStateDescriptor* desc_before_outer = desc_before->outer_state(); @@ -579,24 +576,24 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) { EXPECT_EQ(1u, desc_before_outer->locals_count()); EXPECT_EQ(1u, desc_before_outer->stack_count()); // Values from parent environment. - EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(4))); + EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(3))); // Context: - EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(5))); - EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(6))); - EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(7))); + EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(4))); + EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(5))); + EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(6))); // Values from the nested frame. EXPECT_EQ(1u, desc_before->parameters_count()); EXPECT_EQ(1u, desc_before->locals_count()); EXPECT_EQ(1u, desc_before->stack_count()); - EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(9))); - EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(10))); - EXPECT_EQ(0.25, s.ToFloat64(call_instr->InputAt(11))); - EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(12))); + EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(8))); + EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(9))); + EXPECT_EQ(0.25, s.ToFloat64(call_instr->InputAt(10))); + EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(11))); // Function. - EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(13))); + EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(12))); // Context. - EXPECT_EQ(s.ToVreg(context2), s.ToVreg(call_instr->InputAt(14))); + EXPECT_EQ(s.ToVreg(context2), s.ToVreg(call_instr->InputAt(13))); // Continuation. EXPECT_EQ(kArchRet, s[index++]->arch_opcode()); diff --git a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc index 2b76e5289f227b..03960705e1a571 100644 --- a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc +++ b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc @@ -86,8 +86,7 @@ TEST_F(EffectControlLinearizerTest, SimpleLoad) { // Run the state effect introducer. LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(), - node_origins(), PoisoningMitigationLevel::kDontPoison, - broker()); + node_origins(), broker()); EXPECT_THAT(load, IsLoadField(AccessBuilder::ForHeapNumberValue(), heap_number, @@ -148,8 +147,7 @@ TEST_F(EffectControlLinearizerTest, DiamondLoad) { // Run the state effect introducer. LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(), - node_origins(), PoisoningMitigationLevel::kDontPoison, - broker()); + node_origins(), broker()); // The effect input to the return should be an effect phi with the // newly introduced effectful change operators. @@ -215,8 +213,7 @@ TEST_F(EffectControlLinearizerTest, LoopLoad) { // Run the state effect introducer. LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(), - node_origins(), PoisoningMitigationLevel::kDontPoison, - broker()); + node_origins(), broker()); ASSERT_THAT(ret, IsReturn(load, load, if_true)); EXPECT_THAT(load, IsLoadField(AccessBuilder::ForHeapNumberValue(), @@ -278,8 +275,7 @@ TEST_F(EffectControlLinearizerTest, CloneBranch) { schedule.AddNode(mblock, graph()->end()); LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(), - node_origins(), PoisoningMitigationLevel::kDontPoison, - broker()); + node_origins(), broker()); Capture branch1_capture, branch2_capture; EXPECT_THAT( @@ -337,8 +333,7 @@ TEST_F(EffectControlLinearizerTest, UnreachableThenBranch) { // Run the state effect linearizer and machine lowering, maintaining the // schedule. LowerToMachineSchedule(jsgraph(), &schedule, zone(), source_positions(), - node_origins(), PoisoningMitigationLevel::kDontPoison, - broker()); + node_origins(), broker()); ASSERT_THAT(end(), IsEnd(IsThrow())); } @@ -390,8 +385,7 @@ TEST_F(EffectControlLinearizerTest, UnreachableThenDiamond) { // Run the state effect linearizer and machine lowering, maintaining the // schedule. LowerToMachineSchedule(jsgraph(), &schedule, zone(), source_positions(), - node_origins(), PoisoningMitigationLevel::kDontPoison, - broker()); + node_origins(), broker()); ASSERT_THAT(end(), IsEnd(IsThrow())); } @@ -448,8 +442,7 @@ TEST_F(EffectControlLinearizerTest, UnreachableThenLoop) { // Run the state effect linearizer and machine lowering, maintaining the // schedule. LowerToMachineSchedule(jsgraph(), &schedule, zone(), source_positions(), - node_origins(), PoisoningMitigationLevel::kDontPoison, - broker()); + node_origins(), broker()); ASSERT_THAT(end(), IsEnd(IsThrow())); } @@ -502,8 +495,7 @@ TEST_F(EffectControlLinearizerTest, UnreachableInChangedBlockThenBranch) { // Run the state effect linearizer and machine lowering, maintaining the // schedule. LowerToMachineSchedule(jsgraph(), &schedule, zone(), source_positions(), - node_origins(), PoisoningMitigationLevel::kDontPoison, - broker()); + node_origins(), broker()); ASSERT_THAT(end(), IsEnd(IsThrow())); } diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc index 5305fef5741be3..b449faee8db871 100644 --- a/deps/v8/test/unittests/compiler/node-test-utils.cc +++ b/deps/v8/test/unittests/compiler/node-test-utils.cc @@ -1150,7 +1150,6 @@ class IsStoreElementMatcher final : public TestNodeMatcher { LOAD_MATCHER(Load) LOAD_MATCHER(UnalignedLoad) -LOAD_MATCHER(PoisonedLoad) LOAD_MATCHER(LoadFromObject) class IsLoadImmutableMatcher final : public TestNodeMatcher { @@ -2103,16 +2102,6 @@ Matcher IsLoad(const Matcher& rep_matcher, effect_matcher, control_matcher)); } -Matcher IsPoisonedLoad(const Matcher& rep_matcher, - const Matcher& base_matcher, - const Matcher& index_matcher, - const Matcher& effect_matcher, - const Matcher& control_matcher) { - return MakeMatcher(new IsPoisonedLoadMatcher(rep_matcher, base_matcher, - index_matcher, effect_matcher, - control_matcher)); -} - Matcher IsUnalignedLoad(const Matcher& rep_matcher, const Matcher& base_matcher, const Matcher& index_matcher, @@ -2366,7 +2355,6 @@ IS_UNOP_MATCHER(Word32Ctz) IS_UNOP_MATCHER(Word32Popcnt) IS_UNOP_MATCHER(Word32ReverseBytes) IS_UNOP_MATCHER(SpeculativeToNumber) -IS_UNOP_MATCHER(TaggedPoisonOnSpeculation) #undef IS_UNOP_MATCHER // Special-case Bitcast operators which are disabled when ENABLE_VERIFY_CSA is diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h index 0e5e99679c9a70..f727a14c34d350 100644 --- a/deps/v8/test/unittests/compiler/node-test-utils.h +++ b/deps/v8/test/unittests/compiler/node-test-utils.h @@ -328,11 +328,6 @@ Matcher IsLoad(const Matcher& rep_matcher, const Matcher& index_matcher, const Matcher& effect_matcher, const Matcher& control_matcher); -Matcher IsPoisonedLoad(const Matcher& rep_matcher, - const Matcher& base_matcher, - const Matcher& index_matcher, - const Matcher& effect_matcher, - const Matcher& control_matcher); Matcher IsUnalignedLoad(const Matcher& rep_matcher, const Matcher& base_matcher, const Matcher& index_matcher, @@ -486,7 +481,6 @@ Matcher IsNumberToBoolean(const Matcher& input_matcher); Matcher IsNumberToInt32(const Matcher& input_matcher); Matcher IsNumberToUint32(const Matcher& input_matcher); Matcher IsParameter(const Matcher index_matcher); -Matcher IsSpeculationPoison(); Matcher IsLoadFramePointer(); Matcher IsLoadParentFramePointer(); Matcher IsPlainPrimitiveToNumber(const Matcher& input_matcher); diff --git a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc index 6387f814e1b19f..6eddb961cad0fc 100644 --- a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc +++ b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc @@ -49,9 +49,8 @@ class SimplifiedLoweringTest : public GraphTest { Linkage* linkage = zone()->New(Linkage::GetJSCallDescriptor( zone(), false, num_parameters_ + 1, CallDescriptor::kCanUseRoots)); - SimplifiedLowering lowering( - jsgraph(), broker(), zone(), source_positions(), node_origins(), - PoisoningMitigationLevel::kDontPoison, tick_counter(), linkage); + SimplifiedLowering lowering(jsgraph(), broker(), zone(), source_positions(), + node_origins(), tick_counter(), linkage); lowering.LowerAllNodes(); } diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc index 63ccfc5b76affb..3eddc5c99c7ec9 100644 --- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc +++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc @@ -27,8 +27,7 @@ InterpreterAssemblerTestState::InterpreterAssemblerTestState( InterpreterAssemblerTest* test, Bytecode bytecode) : compiler::CodeAssemblerState( test->isolate(), test->zone(), InterpreterDispatchDescriptor{}, - CodeKind::BYTECODE_HANDLER, Bytecodes::ToString(bytecode), - PoisoningMitigationLevel::kPoisonCriticalOnly) {} + CodeKind::BYTECODE_HANDLER, Bytecodes::ToString(bytecode)) {} const interpreter::Bytecode kBytecodes[] = { #define DEFINE_BYTECODE(Name, ...) interpreter::Bytecode::k##Name, @@ -55,14 +54,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest:: Matcher InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad( const Matcher& rep_matcher, const Matcher& base_matcher, - const Matcher& index_matcher, LoadSensitivity needs_poisoning) { - CHECK_NE(LoadSensitivity::kUnsafe, needs_poisoning); - CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level()); - if (poisoning_level() == PoisoningMitigationLevel::kPoisonCriticalOnly && - needs_poisoning == LoadSensitivity::kCritical) { - return ::i::compiler::IsPoisonedLoad(rep_matcher, base_matcher, - index_matcher, _, _); - } + const Matcher& index_matcher) { return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _); } @@ -71,7 +63,6 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadFromObject( const Matcher& rep_matcher, const Matcher& base_matcher, const Matcher& index_matcher) { - CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level()); return ::i::compiler::IsLoadFromObject(rep_matcher, base_matcher, index_matcher, _, _); } @@ -96,39 +87,36 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsWordNot( Matcher InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedByteOperand( - int offset, LoadSensitivity needs_poisoning) { + int offset) { return IsLoad( MachineType::Uint8(), c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsIntPtrAdd( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset), - c::IsIntPtrConstant(offset)), - needs_poisoning); + c::IsIntPtrConstant(offset))); } Matcher InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedByteOperand( - int offset, LoadSensitivity needs_poisoning) { + int offset) { return IsLoad( MachineType::Int8(), c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsIntPtrAdd( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset), - c::IsIntPtrConstant(offset)), - needs_poisoning); + c::IsIntPtrConstant(offset))); } Matcher InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand( - int offset, LoadSensitivity needs_poisoning) { + int offset) { if (TargetSupportsUnalignedAccess()) { return IsLoad( MachineType::Uint16(), c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsIntPtrAdd( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset), - c::IsIntPtrConstant(offset)), - needs_poisoning); + c::IsIntPtrConstant(offset))); } else { #if V8_TARGET_LITTLE_ENDIAN const int kStep = -1; @@ -146,8 +134,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsIntPtrAdd( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset), - c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)), - needs_poisoning); + c::IsIntPtrConstant(offset + kMsbOffset + kStep * i))); } return c::IsWord32Or( c::IsWord32Shl(bytes[0], c::IsInt32Constant(kBitsPerByte)), bytes[1]); @@ -156,15 +143,14 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand( Matcher InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand( - int offset, LoadSensitivity needs_poisoning) { + int offset) { if (TargetSupportsUnalignedAccess()) { return IsLoad( MachineType::Int16(), c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsIntPtrAdd( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset), - c::IsIntPtrConstant(offset)), - needs_poisoning); + c::IsIntPtrConstant(offset))); } else { #if V8_TARGET_LITTLE_ENDIAN const int kStep = -1; @@ -182,8 +168,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsIntPtrAdd( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset), - c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)), - needs_poisoning); + c::IsIntPtrConstant(offset + kMsbOffset + kStep * i))); } return c::IsWord32Or( c::IsWord32Shl(bytes[0], c::IsInt32Constant(kBitsPerByte)), bytes[1]); @@ -192,15 +177,14 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand( Matcher InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand( - int offset, LoadSensitivity needs_poisoning) { + int offset) { if (TargetSupportsUnalignedAccess()) { return IsLoad( MachineType::Uint32(), c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsIntPtrAdd( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset), - c::IsIntPtrConstant(offset)), - needs_poisoning); + c::IsIntPtrConstant(offset))); } else { #if V8_TARGET_LITTLE_ENDIAN const int kStep = -1; @@ -218,8 +202,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsIntPtrAdd( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset), - c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)), - needs_poisoning); + c::IsIntPtrConstant(offset + kMsbOffset + kStep * i))); } return c::IsWord32Or( c::IsWord32Shl(bytes[0], c::IsInt32Constant(3 * kBitsPerByte)), @@ -233,15 +216,14 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand( Matcher InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand( - int offset, LoadSensitivity needs_poisoning) { + int offset) { if (TargetSupportsUnalignedAccess()) { return IsLoad( MachineType::Int32(), c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsIntPtrAdd( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset), - c::IsIntPtrConstant(offset)), - needs_poisoning); + c::IsIntPtrConstant(offset))); } else { #if V8_TARGET_LITTLE_ENDIAN const int kStep = -1; @@ -259,8 +241,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsIntPtrAdd( c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset), - c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)), - needs_poisoning); + c::IsIntPtrConstant(offset + kMsbOffset + kStep * i))); } return c::IsWord32Or( c::IsWord32Shl(bytes[0], c::IsInt32Constant(3 * kBitsPerByte)), @@ -274,14 +255,14 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand( Matcher InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand( - int offset, OperandSize operand_size, LoadSensitivity needs_poisoning) { + int offset, OperandSize operand_size) { switch (operand_size) { case OperandSize::kByte: - return IsSignedByteOperand(offset, needs_poisoning); + return IsSignedByteOperand(offset); case OperandSize::kShort: - return IsSignedShortOperand(offset, needs_poisoning); + return IsSignedShortOperand(offset); case OperandSize::kQuad: - return IsSignedQuadOperand(offset, needs_poisoning); + return IsSignedQuadOperand(offset); case OperandSize::kNone: UNREACHABLE(); } @@ -290,14 +271,14 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand( Matcher InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand( - int offset, OperandSize operand_size, LoadSensitivity needs_poisoning) { + int offset, OperandSize operand_size) { switch (operand_size) { case OperandSize::kByte: - return IsUnsignedByteOperand(offset, needs_poisoning); + return IsUnsignedByteOperand(offset); case OperandSize::kShort: - return IsUnsignedShortOperand(offset, needs_poisoning); + return IsUnsignedShortOperand(offset); case OperandSize::kQuad: - return IsUnsignedQuadOperand(offset, needs_poisoning); + return IsUnsignedQuadOperand(offset); case OperandSize::kNone: UNREACHABLE(); } @@ -307,12 +288,11 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand( Matcher InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadRegisterOperand( int offset, OperandSize operand_size) { - Matcher reg_operand = IsChangeInt32ToIntPtr( - IsSignedOperand(offset, operand_size, LoadSensitivity::kSafe)); + Matcher reg_operand = + IsChangeInt32ToIntPtr(IsSignedOperand(offset, operand_size)); return IsBitcastWordToTagged(IsLoad( MachineType::Pointer(), c::IsLoadParentFramePointer(), - c::IsWordShl(reg_operand, c::IsIntPtrConstant(kSystemPointerSizeLog2)), - LoadSensitivity::kCritical)); + c::IsWordShl(reg_operand, c::IsIntPtrConstant(kSystemPointerSizeLog2)))); } TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) { @@ -334,44 +314,38 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) { switch (interpreter::Bytecodes::GetOperandType(bytecode, i)) { case interpreter::OperandType::kRegCount: EXPECT_THAT(m.BytecodeOperandCount(i), - m.IsUnsignedOperand(offset, operand_size, - LoadSensitivity::kCritical)); + m.IsUnsignedOperand(offset, operand_size)); break; case interpreter::OperandType::kFlag8: EXPECT_THAT(m.BytecodeOperandFlag(i), - m.IsUnsignedOperand(offset, operand_size, - LoadSensitivity::kCritical)); + m.IsUnsignedOperand(offset, operand_size)); break; case interpreter::OperandType::kIdx: EXPECT_THAT(m.BytecodeOperandIdx(i), - c::IsChangeUint32ToWord(m.IsUnsignedOperand( - offset, operand_size, LoadSensitivity::kCritical))); + c::IsChangeUint32ToWord( + m.IsUnsignedOperand(offset, operand_size))); break; case interpreter::OperandType::kNativeContextIndex: EXPECT_THAT(m.BytecodeOperandNativeContextIndex(i), - c::IsChangeUint32ToWord(m.IsUnsignedOperand( - offset, operand_size, LoadSensitivity::kCritical))); + c::IsChangeUint32ToWord( + m.IsUnsignedOperand(offset, operand_size))); break; case interpreter::OperandType::kUImm: EXPECT_THAT(m.BytecodeOperandUImm(i), - m.IsUnsignedOperand(offset, operand_size, - LoadSensitivity::kCritical)); + m.IsUnsignedOperand(offset, operand_size)); break; case interpreter::OperandType::kImm: { EXPECT_THAT(m.BytecodeOperandImm(i), - m.IsSignedOperand(offset, operand_size, - LoadSensitivity::kCritical)); + m.IsSignedOperand(offset, operand_size)); break; } case interpreter::OperandType::kRuntimeId: EXPECT_THAT(m.BytecodeOperandRuntimeId(i), - m.IsUnsignedOperand(offset, operand_size, - LoadSensitivity::kCritical)); + m.IsUnsignedOperand(offset, operand_size)); break; case interpreter::OperandType::kIntrinsicId: EXPECT_THAT(m.BytecodeOperandIntrinsicId(i), - m.IsUnsignedOperand(offset, operand_size, - LoadSensitivity::kCritical)); + m.IsUnsignedOperand(offset, operand_size)); break; case interpreter::OperandType::kRegList: case interpreter::OperandType::kReg: @@ -416,12 +390,11 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) { c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset - kHeapObjectTag)); - EXPECT_THAT( - load_constant, - m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher, - c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) - - kHeapObjectTag), - LoadSensitivity::kCritical)); + EXPECT_THAT(load_constant, + m.IsLoadFromObject( + MachineType::AnyTagged(), constant_pool_matcher, + c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) - + kHeapObjectTag))); } { c::Node* index = m.UntypedParameter(2); @@ -434,12 +407,11 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) { kHeapObjectTag)); EXPECT_THAT( load_constant, - m.IsLoad( + m.IsLoadFromObject( MachineType::AnyTagged(), constant_pool_matcher, c::IsIntPtrAdd( c::IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag), - c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))), - LoadSensitivity::kCritical)); + c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))))); } } } diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h index c2539d8a2819fd..d02b80698a8886 100644 --- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h +++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h @@ -45,8 +45,7 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone { Matcher IsLoad( const Matcher& rep_matcher, const Matcher& base_matcher, - const Matcher& index_matcher, - LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); + const Matcher& index_matcher); Matcher IsLoadFromObject( const Matcher& rep_matcher, const Matcher& base_matcher, @@ -60,30 +59,17 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone { Matcher IsWordNot( const Matcher& value_matcher); - Matcher IsUnsignedByteOperand( - int offset, LoadSensitivity needs_poisoning); - Matcher IsSignedByteOperand( - int offset, LoadSensitivity needs_poisoning); - Matcher IsUnsignedShortOperand( - int offset, LoadSensitivity needs_poisoning); - Matcher IsSignedShortOperand( - int offset, LoadSensitivity needs_poisoning); - Matcher IsUnsignedQuadOperand( - int offset, LoadSensitivity needs_poisoning); - Matcher IsSignedQuadOperand( - int offset, LoadSensitivity needs_poisoning); - - Matcher IsUnpoisonedSignedOperand( - int offset, OperandSize operand_size, LoadSensitivity needs_poisoning); - Matcher IsUnpoisonedUnsignedOperand( - int offset, OperandSize operand_size, LoadSensitivity needs_poisoning); + Matcher IsUnsignedByteOperand(int offset); + Matcher IsSignedByteOperand(int offset); + Matcher IsUnsignedShortOperand(int offset); + Matcher IsSignedShortOperand(int offset); + Matcher IsUnsignedQuadOperand(int offset); + Matcher IsSignedQuadOperand(int offset); Matcher IsSignedOperand(int offset, - OperandSize operand_size, - LoadSensitivity needs_poisoning); + OperandSize operand_size); Matcher IsUnsignedOperand(int offset, - OperandSize operand_size, - LoadSensitivity needs_poisoning); + OperandSize operand_size); Matcher IsLoadRegisterOperand(int offset, OperandSize operand_size); diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie.py b/deps/v8/tools/clusterfuzz/v8_foozzie.py index 52b7954093919c..d9009bfb7c8a3a 100755 --- a/deps/v8/tools/clusterfuzz/v8_foozzie.py +++ b/deps/v8/tools/clusterfuzz/v8_foozzie.py @@ -78,13 +78,6 @@ '--always-opt', '--force-slow-path', ], - trusted=[ - '--no-untrusted-code-mitigations', - ], - trusted_opt=[ - '--always-opt', - '--no-untrusted-code-mitigations', - ], ) BASELINE_CONFIG = 'ignition' diff --git a/deps/v8/tools/testrunner/local/android.py b/deps/v8/tools/testrunner/local/android.py index ebf04afad61de0..cfc4e537f57fef 100644 --- a/deps/v8/tools/testrunner/local/android.py +++ b/deps/v8/tools/testrunner/local/android.py @@ -126,12 +126,6 @@ def push_executable(self, shell_dir, target_dir, binary): target_dir, skip_if_missing=True, ) - self.push_file( - shell_dir, - 'snapshot_blob_trusted.bin', - target_dir, - skip_if_missing=True, - ) self.push_file( shell_dir, 'icudtl.dat', diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py index ba4eff451adeaa..c34b957cb27990 100644 --- a/deps/v8/tools/testrunner/local/variants.py +++ b/deps/v8/tools/testrunner/local/variants.py @@ -38,7 +38,8 @@ "stress_snapshot": [["--stress-snapshot"]], # Trigger stress sampling allocation profiler with sample interval = 2^14 "stress_sampling": [["--stress-sampling-allocation-profiler=16384"]], - "trusted": [["--no-untrusted-code-mitigations"]], + # TODO(rmcilroy): Remove trusted variant once bots don't use it. + "trusted": [[]], "no_wasm_traps": [["--no-wasm-trap-handler"]], "turboprop": [["--turboprop"]], "turboprop_as_toptier": [["--turboprop-as-toptier", "--turboprop"]], diff --git a/test/parallel/test-v8-untrusted-code-mitigations.js b/test/parallel/test-v8-untrusted-code-mitigations.js deleted file mode 100644 index cb6360beb74bb0..00000000000000 --- a/test/parallel/test-v8-untrusted-code-mitigations.js +++ /dev/null @@ -1,19 +0,0 @@ -'use strict'; - -require('../common'); -const assert = require('assert'); -const { execFileSync } = require('child_process'); - -// This test checks that untrusted code mitigations in V8 are disabled -// by default. - -const v8Options = execFileSync(process.execPath, ['--v8-options']).toString(); - -const untrustedFlag = v8Options.indexOf('--untrusted-code-mitigations'); -assert.notStrictEqual(untrustedFlag, -1); - -const nextFlag = v8Options.indexOf('--', untrustedFlag + 2); -const slice = v8Options.substring(untrustedFlag, nextFlag); - -// eslint-disable-next-line no-regex-spaces -assert(slice.match(/type: bool default: false/));