From fa45d6a3586234588a2c23389d1a3b246c9b74c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Micha=C3=ABl=20Zasso?= Date: Sat, 17 Apr 2021 17:34:16 +0200 Subject: [PATCH] deps: V8: cherry-pick 93b2105fbe44 Original commit message: [wasm][ia32][liftoff] Implement AtomicCompareExchange As there are not enough registers on ia32 to execute the platform- independent code, the CL also adds ia32-specific code to liftoff-compiler.cc. For this we first retrieve the memory index from the stack, do a bounds check, and calculate the final address. Only afterwards we pop all other values from the stack and pass them to the platform-dependent code. R=clemensb@chromium.org Bug: v8:10108 Change-Id: I741266a9523c8b5c46acc0b29817fd143a75752e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2316305 Reviewed-by: Clemens Backes Commit-Queue: Andreas Haas Cr-Commit-Position: refs/heads/master@{#69047} Refs: https://github.com/v8/v8/commit/93b2105fbe44c7d1260bbc8b466042624002e9c6 PR-URL: https://github.com/nodejs/node/pull/38275 Reviewed-By: Matteo Collina Reviewed-By: Jiawen Geng Reviewed-By: Shelley Vohr --- common.gypi | 2 +- .../baseline/ia32/liftoff-assembler-ia32.h | 118 +++++++++++++++++- deps/v8/src/wasm/baseline/liftoff-compiler.cc | 35 +++++- 3 files changed, 150 insertions(+), 5 deletions(-) diff --git a/common.gypi b/common.gypi index 27fa34f8adf75a..621b8627944482 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.53', + 'v8_embedder_string': '-node.54', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index 7a1d629bf2dddd..b9466195236cfc 100644 --- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -532,7 +532,123 @@ void LiftoffAssembler::AtomicCompareExchange( Register dst_addr, Register offset_reg, uint32_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result, StoreType type) { - bailout(kAtomics, "AtomicCompareExchange"); + // We expect that the offset has already been added to {dst_addr}, and no + // {offset_reg} is provided. This is to save registers. + DCHECK_EQ(offset_reg, no_reg); + + DCHECK_EQ(result, expected); + + if (type.value() != StoreType::kI64Store) { + bool is_64_bit_op = type.value_type() == kWasmI64; + + Register value_reg = is_64_bit_op ? new_value.low_gp() : new_value.gp(); + Register expected_reg = is_64_bit_op ? expected.low_gp() : expected.gp(); + Register result_reg = expected_reg; + + bool is_byte_store = type.size() == 1; + LiftoffRegList pinned = + LiftoffRegList::ForRegs(dst_addr, value_reg, expected_reg); + + // Ensure that {value_reg} is a valid register. + if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) { + Register safe_value_reg = + pinned.set(GetUnusedRegister(liftoff::kByteRegs, pinned)).gp(); + mov(safe_value_reg, value_reg); + value_reg = safe_value_reg; + pinned.clear(LiftoffRegister(value_reg)); + } + + // The cmpxchg instruction uses eax to store the old value of the + // compare-exchange primitive. Therefore we have to spill the register and + // move any use to another register. + ClearRegister(eax, {&dst_addr, &value_reg}, pinned); + if (expected_reg != eax) { + mov(eax, expected_reg); + } + + Operand dst_op = Operand(dst_addr, offset_imm); + + lock(); + switch (type.value()) { + case StoreType::kI32Store8: + case StoreType::kI64Store8: { + cmpxchg_b(dst_op, value_reg); + movzx_b(result_reg, eax); + break; + } + case StoreType::kI32Store16: + case StoreType::kI64Store16: { + cmpxchg_w(dst_op, value_reg); + movzx_w(result_reg, eax); + break; + } + case StoreType::kI32Store: + case StoreType::kI64Store32: { + cmpxchg(dst_op, value_reg); + if (result_reg != eax) { + mov(result_reg, eax); + } + break; + } + default: + UNREACHABLE(); + } + if (is_64_bit_op) { + xor_(result.high_gp(), result.high_gp()); + } + return; + } + + // The following code handles kExprI64AtomicCompareExchange. + + // We need {ebx} here, which is the root register. The root register it + // needs special treatment. As we use {ebx} directly in the code below, we + // have to make sure here that the root register is actually {ebx}. + static_assert(kRootRegister == ebx, + "The following code assumes that kRootRegister == ebx"); + push(kRootRegister); + + // The compare-exchange instruction uses registers as follows: + // old-value = EDX:EAX; new-value = ECX:EBX. + Register expected_hi = edx; + Register expected_lo = eax; + Register new_hi = ecx; + Register new_lo = ebx; + // The address needs a separate registers that does not alias with the + // ones above. + Register address = esi; + + // Spill all these registers if they are still holding other values. + liftoff::SpillRegisters(this, expected_hi, expected_lo, new_hi, address); + + // We have to set new_lo specially, because it's the root register. We do it + // before setting all other registers so that the original value does not get + // overwritten. + mov(new_lo, new_value.low_gp()); + + // Move all other values into the right register. + { + LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{ + {LiftoffRegister(address), LiftoffRegister(dst_addr), kWasmI32}, + {LiftoffRegister::ForPair(expected_lo, expected_hi), expected, kWasmI64}, + {LiftoffRegister(new_hi), new_value.high(), kWasmI32}}; + ParallelRegisterMove(ArrayVector(reg_moves)); + }; + + Operand dst_op = Operand(address, offset_imm); + + lock(); + cmpxchg8b(dst_op); + + // Restore the root register, and we are done. + pop(kRootRegister); + + // Move the result into the correct registers. + { + LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{ + {result, LiftoffRegister::ForPair(expected_lo, expected_hi), kWasmI64}}; + ParallelRegisterMove(ArrayVector(reg_moves)); + } } void LiftoffAssembler::AtomicFence() { mfence(); } diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc index 4d0d9dbcecac3a..86c33b387ec563 100644 --- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc +++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc @@ -2879,9 +2879,38 @@ class LiftoffCompiler { void AtomicCompareExchange(FullDecoder* decoder, StoreType type, const MemoryAccessImmediate& imm) { #ifdef V8_TARGET_ARCH_IA32 - // With the current implementation we do not have enough registers on ia32 - // to even get to the platform-specific code. Therefore we bailout early. - unsupported(decoder, kAtomics, "AtomicCompareExchange"); + // On ia32 we don't have enough registers to first pop all the values off + // the stack and then start with the code generation. Instead we do the + // complete address calculation first, so that the address only needs a + // single register. Afterwards we load all remaining values into the + // other registers. + LiftoffRegList pinned; + Register index_reg = pinned.set(__ PeekToRegister(2, pinned)).gp(); + if (BoundsCheckMem(decoder, type.size(), imm.offset, index_reg, pinned, + kDoForceCheck)) { + return; + } + AlignmentCheckMem(decoder, type.size(), imm.offset, index_reg, pinned); + + uint32_t offset = imm.offset; + index_reg = AddMemoryMasking(index_reg, &offset, &pinned); + Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); + LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize); + __ emit_i32_add(addr, addr, index_reg); + pinned.clear(LiftoffRegister(index_reg)); + LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned)); + LiftoffRegister expected = pinned.set(__ PopToRegister(pinned)); + + // Pop the index from the stack. + __ cache_state()->stack_state.pop_back(1); + + LiftoffRegister result = expected; + + // We already added the index to addr, so we can just pass no_reg to the + // assembler now. + __ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result, + type); + __ PushRegister(type.value_type(), result); return; #else ValueType result_type = type.value_type();