Skip to content

Commit

Permalink
deps: V8: cherry-pick 93b2105fbe44
Browse files Browse the repository at this point in the history
Original commit message:

    [wasm][ia32][liftoff] Implement AtomicCompareExchange

    As there are not enough registers on ia32 to execute the platform-
    independent code, the CL also adds ia32-specific code to
    liftoff-compiler.cc. For this we first retrieve the memory index from
    the stack, do a bounds check, and calculate the final address. Only
    afterwards we pop all other values from the stack and pass them to the
    platform-dependent code.

    R=clemensb@chromium.org

    Bug: v8:10108
    Change-Id: I741266a9523c8b5c46acc0b29817fd143a75752e
    Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2316305
    Reviewed-by: Clemens Backes <clemensb@chromium.org>
    Commit-Queue: Andreas Haas <ahaas@chromium.org>
    Cr-Commit-Position: refs/heads/master@{#69047}

Refs: v8/v8@93b2105

PR-URL: #38275
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Reviewed-By: Jiawen Geng <technicalcute@gmail.com>
Reviewed-By: Shelley Vohr <codebytere@gmail.com>
  • Loading branch information
targos committed Apr 30, 2021
1 parent c5fe3a2 commit fa45d6a
Show file tree
Hide file tree
Showing 3 changed files with 150 additions and 5 deletions.
2 changes: 1 addition & 1 deletion common.gypi
Expand Up @@ -36,7 +36,7 @@

# Reset this number to 0 on major V8 upgrades.
# Increment by one for each non-official patch applied to deps/v8.
'v8_embedder_string': '-node.53',
'v8_embedder_string': '-node.54',

##### V8 defaults for Node.js #####

Expand Down
118 changes: 117 additions & 1 deletion deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
Expand Up @@ -532,7 +532,123 @@ void LiftoffAssembler::AtomicCompareExchange(
Register dst_addr, Register offset_reg, uint32_t offset_imm,
LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
StoreType type) {
bailout(kAtomics, "AtomicCompareExchange");
// We expect that the offset has already been added to {dst_addr}, and no
// {offset_reg} is provided. This is to save registers.
DCHECK_EQ(offset_reg, no_reg);

DCHECK_EQ(result, expected);

if (type.value() != StoreType::kI64Store) {
bool is_64_bit_op = type.value_type() == kWasmI64;

Register value_reg = is_64_bit_op ? new_value.low_gp() : new_value.gp();
Register expected_reg = is_64_bit_op ? expected.low_gp() : expected.gp();
Register result_reg = expected_reg;

bool is_byte_store = type.size() == 1;
LiftoffRegList pinned =
LiftoffRegList::ForRegs(dst_addr, value_reg, expected_reg);

// Ensure that {value_reg} is a valid register.
if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) {
Register safe_value_reg =
pinned.set(GetUnusedRegister(liftoff::kByteRegs, pinned)).gp();
mov(safe_value_reg, value_reg);
value_reg = safe_value_reg;
pinned.clear(LiftoffRegister(value_reg));
}

// The cmpxchg instruction uses eax to store the old value of the
// compare-exchange primitive. Therefore we have to spill the register and
// move any use to another register.
ClearRegister(eax, {&dst_addr, &value_reg}, pinned);
if (expected_reg != eax) {
mov(eax, expected_reg);
}

Operand dst_op = Operand(dst_addr, offset_imm);

lock();
switch (type.value()) {
case StoreType::kI32Store8:
case StoreType::kI64Store8: {
cmpxchg_b(dst_op, value_reg);
movzx_b(result_reg, eax);
break;
}
case StoreType::kI32Store16:
case StoreType::kI64Store16: {
cmpxchg_w(dst_op, value_reg);
movzx_w(result_reg, eax);
break;
}
case StoreType::kI32Store:
case StoreType::kI64Store32: {
cmpxchg(dst_op, value_reg);
if (result_reg != eax) {
mov(result_reg, eax);
}
break;
}
default:
UNREACHABLE();
}
if (is_64_bit_op) {
xor_(result.high_gp(), result.high_gp());
}
return;
}

// The following code handles kExprI64AtomicCompareExchange.

// We need {ebx} here, which is the root register. The root register it
// needs special treatment. As we use {ebx} directly in the code below, we
// have to make sure here that the root register is actually {ebx}.
static_assert(kRootRegister == ebx,
"The following code assumes that kRootRegister == ebx");
push(kRootRegister);

// The compare-exchange instruction uses registers as follows:
// old-value = EDX:EAX; new-value = ECX:EBX.
Register expected_hi = edx;
Register expected_lo = eax;
Register new_hi = ecx;
Register new_lo = ebx;
// The address needs a separate registers that does not alias with the
// ones above.
Register address = esi;

// Spill all these registers if they are still holding other values.
liftoff::SpillRegisters(this, expected_hi, expected_lo, new_hi, address);

// We have to set new_lo specially, because it's the root register. We do it
// before setting all other registers so that the original value does not get
// overwritten.
mov(new_lo, new_value.low_gp());

// Move all other values into the right register.
{
LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{
{LiftoffRegister(address), LiftoffRegister(dst_addr), kWasmI32},
{LiftoffRegister::ForPair(expected_lo, expected_hi), expected, kWasmI64},
{LiftoffRegister(new_hi), new_value.high(), kWasmI32}};
ParallelRegisterMove(ArrayVector(reg_moves));
};

Operand dst_op = Operand(address, offset_imm);

lock();
cmpxchg8b(dst_op);

// Restore the root register, and we are done.
pop(kRootRegister);

// Move the result into the correct registers.
{
LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{
{result, LiftoffRegister::ForPair(expected_lo, expected_hi), kWasmI64}};
ParallelRegisterMove(ArrayVector(reg_moves));
}
}

void LiftoffAssembler::AtomicFence() { mfence(); }
Expand Down
35 changes: 32 additions & 3 deletions deps/v8/src/wasm/baseline/liftoff-compiler.cc
Expand Up @@ -2879,9 +2879,38 @@ class LiftoffCompiler {
void AtomicCompareExchange(FullDecoder* decoder, StoreType type,
const MemoryAccessImmediate<validate>& imm) {
#ifdef V8_TARGET_ARCH_IA32
// With the current implementation we do not have enough registers on ia32
// to even get to the platform-specific code. Therefore we bailout early.
unsupported(decoder, kAtomics, "AtomicCompareExchange");
// On ia32 we don't have enough registers to first pop all the values off
// the stack and then start with the code generation. Instead we do the
// complete address calculation first, so that the address only needs a
// single register. Afterwards we load all remaining values into the
// other registers.
LiftoffRegList pinned;
Register index_reg = pinned.set(__ PeekToRegister(2, pinned)).gp();
if (BoundsCheckMem(decoder, type.size(), imm.offset, index_reg, pinned,
kDoForceCheck)) {
return;
}
AlignmentCheckMem(decoder, type.size(), imm.offset, index_reg, pinned);

uint32_t offset = imm.offset;
index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize);
__ emit_i32_add(addr, addr, index_reg);
pinned.clear(LiftoffRegister(index_reg));
LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister expected = pinned.set(__ PopToRegister(pinned));

// Pop the index from the stack.
__ cache_state()->stack_state.pop_back(1);

LiftoffRegister result = expected;

// We already added the index to addr, so we can just pass no_reg to the
// assembler now.
__ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result,
type);
__ PushRegister(type.value_type(), result);
return;
#else
ValueType result_type = type.value_type();
Expand Down

0 comments on commit fa45d6a

Please sign in to comment.