Skip to content

Commit

Permalink
deps: V8: cherry-pick 6771d3e31883
Browse files Browse the repository at this point in the history
Original commit message:

    [wasm][liftoff][cleanup] Remove default parameter of GetUnusedRegister

    This CL removes the default parameter of GetUnusedRegister to avoid bugs
    where the default parameter is used accidentially. With "{}" the default
    value of the parameter is easy to write, and also not much more difficult to read.

    R=clemensb@chromium.org

    Bug: v8:10506
    Change-Id: I3debe5eb91578c82abdac81dc6c252435fdf30d6
    Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2202991
    Reviewed-by: Clemens Backes <clemensb@chromium.org>
    Commit-Queue: Andreas Haas <ahaas@chromium.org>
    Cr-Commit-Position: refs/heads/master@{#67822}

Refs: v8/v8@6771d3e

PR-URL: #38275
Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Reviewed-By: Jiawen Geng <technicalcute@gmail.com>
Reviewed-By: Shelley Vohr <codebytere@gmail.com>
  • Loading branch information
targos committed Apr 30, 2021
1 parent 9829378 commit 4d0bc38
Show file tree
Hide file tree
Showing 8 changed files with 54 additions and 53 deletions.
2 changes: 1 addition & 1 deletion common.gypi
Expand Up @@ -36,7 +36,7 @@

# Reset this number to 0 on major V8 upgrades.
# Increment by one for each non-official patch applied to deps/v8.
'v8_embedder_string': '-node.55',
'v8_embedder_string': '-node.56',

##### V8 defaults for Node.js #####

Expand Down
10 changes: 5 additions & 5 deletions deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
Expand Up @@ -437,7 +437,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
vmov(liftoff::GetFloatRegister(reg.fp()), value.to_f32_boxed());
break;
case ValueType::kF64: {
Register extra_scratch = GetUnusedRegister(kGpReg).gp();
Register extra_scratch = GetUnusedRegister(kGpReg, {}).gp();
vmov(reg.fp(), Double(value.to_f64_boxed().get_bits()), extra_scratch);
break;
}
Expand Down Expand Up @@ -1173,7 +1173,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
Expand Down Expand Up @@ -1216,7 +1216,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
// The scratch register will be required by str if multiple instructions
// are required to encode the offset, and so we cannot use it in that case.
if (!ImmediateFitsAddrMode2Instruction(dst.offset())) {
src = GetUnusedRegister(kGpReg).gp();
src = GetUnusedRegister(kGpReg, {}).gp();
} else {
src = temps.Acquire();
}
Expand Down Expand Up @@ -1758,7 +1758,7 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
constexpr uint32_t kF32SignBit = uint32_t{1} << 31;
UseScratchRegisterScope temps(this);
Register scratch = GetUnusedRegister(kGpReg).gp();
Register scratch = GetUnusedRegister(kGpReg, {}).gp();
Register scratch2 = temps.Acquire();
VmovLow(scratch, lhs);
// Clear sign bit in {scratch}.
Expand All @@ -1777,7 +1777,7 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
// On arm, we cannot hold the whole f64 value in a gp register, so we just
// operate on the upper half (UH).
UseScratchRegisterScope temps(this);
Register scratch = GetUnusedRegister(kGpReg).gp();
Register scratch = GetUnusedRegister(kGpReg, {}).gp();
Register scratch2 = temps.Acquire();
VmovHigh(scratch, lhs);
// Clear sign bit in {scratch}.
Expand Down
20 changes: 10 additions & 10 deletions deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
Expand Up @@ -130,7 +130,7 @@ inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) {
if (candidate.is_byte_register()) return candidate;
// {GetUnusedRegister()} may insert move instructions to spill registers to
// the stack. This is OK because {mov} does not change the status flags.
return assm->GetUnusedRegister(liftoff::kByteRegs).gp();
return assm->GetUnusedRegister(liftoff::kByteRegs, {}).gp();
}

inline void MoveStackValue(LiftoffAssembler* assm, const Operand& src,
Expand Down Expand Up @@ -1468,7 +1468,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, DoubleRegister dst,

// We need one tmp register to extract the sign bit. Get it right at the
// beginning, such that the spilling code is not accidentially jumped over.
Register tmp = assm->GetUnusedRegister(kGpReg).gp();
Register tmp = assm->GetUnusedRegister(kGpReg, {}).gp();

#define dop(name, ...) \
do { \
Expand Down Expand Up @@ -1531,9 +1531,9 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
DoubleRegister rhs) {
static constexpr int kF32SignBit = 1 << 31;
Register scratch = GetUnusedRegister(kGpReg).gp();
Register scratch2 =
GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
LiftoffRegList pinned;
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();
Movd(scratch, lhs); // move {lhs} into {scratch}.
and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}.
Movd(scratch2, rhs); // move {rhs} into {scratch2}.
Expand Down Expand Up @@ -1660,9 +1660,9 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
static constexpr int kF32SignBit = 1 << 31;
// On ia32, we cannot hold the whole f64 value in a gp register, so we just
// operate on the upper half (UH).
Register scratch = GetUnusedRegister(kGpReg).gp();
Register scratch2 =
GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp();
LiftoffRegList pinned;
Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp();
Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp();

Pextrd(scratch, lhs, 1); // move UH of {lhs} into {scratch}.
and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}.
Expand Down Expand Up @@ -2500,7 +2500,7 @@ void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32);
LiftoffRegister tmp = GetUnusedRegister(tmp_rc);
LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {});
byte shift = static_cast<byte>(rhs & 0x7);
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
Expand Down Expand Up @@ -3389,7 +3389,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}

void LiftoffAssembler::CallTrapCallbackForTesting() {
PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}

Expand Down
4 changes: 2 additions & 2 deletions deps/v8/src/wasm/baseline/liftoff-assembler.cc
Expand Up @@ -542,7 +542,7 @@ LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(VarState slot,
if (slot.is_reg()) {
return half == kLowWord ? slot.reg().low() : slot.reg().high();
}
LiftoffRegister dst = GetUnusedRegister(kGpReg);
LiftoffRegister dst = GetUnusedRegister(kGpReg, {});
if (slot.is_stack()) {
FillI64Half(dst.gp(), slot.offset(), half);
return dst;
Expand Down Expand Up @@ -581,7 +581,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) {
if (!slot.is_const()) continue;
RegClass rc =
kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg;
LiftoffRegister reg = GetUnusedRegister(rc);
LiftoffRegister reg = GetUnusedRegister(rc, {});
LoadConstant(reg, slot.constant());
slot.MakeRegister(reg);
cache_state_.inc_used(reg);
Expand Down
4 changes: 2 additions & 2 deletions deps/v8/src/wasm/baseline/liftoff-assembler.h
Expand Up @@ -340,7 +340,7 @@ class LiftoffAssembler : public TurboAssembler {
// possible.
LiftoffRegister GetUnusedRegister(
RegClass rc, std::initializer_list<LiftoffRegister> try_first,
LiftoffRegList pinned = {}) {
LiftoffRegList pinned) {
for (LiftoffRegister reg : try_first) {
DCHECK_EQ(reg.reg_class(), rc);
if (cache_state_.is_free(reg)) return reg;
Expand All @@ -349,7 +349,7 @@ class LiftoffAssembler : public TurboAssembler {
}

// Get an unused register for class {rc}, potentially spilling to free one.
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) {
LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned) {
if (kNeedI64RegPair && rc == kGpRegPair) {
LiftoffRegList candidates = kGpCacheRegList;
Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp();
Expand Down
51 changes: 26 additions & 25 deletions deps/v8/src/wasm/baseline/liftoff-compiler.cc
Expand Up @@ -495,7 +495,7 @@ class LiftoffCompiler {
position, __ cache_state()->used_registers,
RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling)));
OutOfLineCode& ool = out_of_line_code_.back();
Register limit_address = __ GetUnusedRegister(kGpReg).gp();
Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize);
__ StackCheck(ool.label.get(), limit_address);
__ bind(ool.continuation.get());
Expand Down Expand Up @@ -604,7 +604,7 @@ class LiftoffCompiler {
*next_breakpoint_ptr_ == decoder->position());
if (!has_breakpoint) {
DEBUG_CODE_COMMENT("check hook on function call");
Register flag = __ GetUnusedRegister(kGpReg).gp();
Register flag = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress,
kSystemPointerSize);
Label no_break;
Expand Down Expand Up @@ -923,8 +923,8 @@ class LiftoffCompiler {
constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src})
: __ GetUnusedRegister(result_rc);
? __ GetUnusedRegister(result_rc, {src}, {})
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src);
__ PushRegister(ValueType(result_type), dst);
}
Expand All @@ -951,8 +951,9 @@ class LiftoffCompiler {
static constexpr RegClass src_rc = reg_class_for(src_type);
static constexpr RegClass dst_rc = reg_class_for(dst_type);
LiftoffRegister src = __ PopToRegister();
LiftoffRegister dst = src_rc == dst_rc ? __ GetUnusedRegister(dst_rc, {src})
: __ GetUnusedRegister(dst_rc);
LiftoffRegister dst = src_rc == dst_rc
? __ GetUnusedRegister(dst_rc, {src}, {})
: __ GetUnusedRegister(dst_rc, {});
DCHECK_EQ(!!can_trap, trap_position > 0);
Label* trap = can_trap ? AddOutOfLineTrap(
trap_position,
Expand Down Expand Up @@ -1122,8 +1123,8 @@ class LiftoffCompiler {

LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs})
: __ GetUnusedRegister(result_rc);
? __ GetUnusedRegister(result_rc, {lhs}, {})
: __ GetUnusedRegister(result_rc, {});

CallEmitFn(fnImm, dst, lhs, imm);
__ PushRegister(ValueType(result_type), dst);
Expand All @@ -1141,8 +1142,8 @@ class LiftoffCompiler {
LiftoffRegister rhs = __ PopToRegister();
LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs));
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs, rhs})
: __ GetUnusedRegister(result_rc);
? __ GetUnusedRegister(result_rc, {lhs, rhs}, {})
: __ GetUnusedRegister(result_rc, {});

if (swap_lhs_rhs) std::swap(lhs, rhs);

Expand Down Expand Up @@ -1483,20 +1484,20 @@ class LiftoffCompiler {
if (value_i32 == value) {
__ PushConstant(kWasmI64, value_i32);
} else {
LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64));
LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64), {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmI64, reg);
}
}

void F32Const(FullDecoder* decoder, Value* result, float value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF32, reg);
}

void F64Const(FullDecoder* decoder, Value* result, double value) {
LiftoffRegister reg = __ GetUnusedRegister(kFpReg);
LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {});
__ LoadConstant(reg, WasmValue(value));
__ PushRegister(kWasmF64, reg);
}
Expand Down Expand Up @@ -1546,7 +1547,7 @@ class LiftoffCompiler {
break;
case kStack: {
auto rc = reg_class_for(imm.type);
LiftoffRegister reg = __ GetUnusedRegister(rc);
LiftoffRegister reg = __ GetUnusedRegister(rc, {});
__ Fill(reg, slot.offset(), imm.type);
__ PushRegister(slot.type(), reg);
break;
Expand All @@ -1570,7 +1571,7 @@ class LiftoffCompiler {
}
DCHECK_EQ(type, __ local_type(local_index));
RegClass rc = reg_class_for(type);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc);
LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {});
__ Fill(dst_reg, src_slot.offset(), type);
*dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset());
__ cache_state()->inc_used(dst_reg);
Expand Down Expand Up @@ -1609,7 +1610,7 @@ class LiftoffCompiler {

Register GetGlobalBaseAndOffset(const WasmGlobal* global,
LiftoffRegList* pinned, uint32_t* offset) {
Register addr = pinned->set(__ GetUnusedRegister(kGpReg)).gp();
Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp();
if (global->mutability && global->imported) {
LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize);
__ Load(LiftoffRegister(addr), addr, no_reg,
Expand Down Expand Up @@ -1675,8 +1676,8 @@ class LiftoffCompiler {
DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type());
LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned));
LiftoffRegister true_value = __ PopToRegister(pinned);
LiftoffRegister dst =
__ GetUnusedRegister(true_value.reg_class(), {true_value, false_value});
LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(),
{true_value, false_value}, {});
__ PushRegister(type, dst);

// Now emit the actual code to move either {true_value} or {false_value}
Expand Down Expand Up @@ -2075,7 +2076,7 @@ class LiftoffCompiler {
}

void CurrentMemoryPages(FullDecoder* decoder, Value* result) {
Register mem_size = __ GetUnusedRegister(kGpReg).gp();
Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp();
LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize);
__ emit_ptrsize_shri(mem_size, mem_size, kWasmPageSizeLog2);
__ PushRegister(kWasmI32, LiftoffRegister(mem_size));
Expand Down Expand Up @@ -2344,7 +2345,7 @@ class LiftoffCompiler {
src_rc == result_rc
? __ GetUnusedRegister(result_rc, {src3},
LiftoffRegList::ForRegs(src1, src2))
: __ GetUnusedRegister(result_rc);
: __ GetUnusedRegister(result_rc, {});
CallEmitFn(fn, dst, src1, src2, src3);
__ PushRegister(ValueType(result_type), dst);
}
Expand All @@ -2360,14 +2361,14 @@ class LiftoffCompiler {
int32_t imm = rhs_slot.i32_const();

LiftoffRegister operand = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand});
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});

CallEmitFn(fnImm, dst, operand, imm);
__ PushRegister(kWasmS128, dst);
} else {
LiftoffRegister count = __ PopToRegister();
LiftoffRegister operand = __ PopToRegister();
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand});
LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {});

CallEmitFn(fn, dst, operand, count);
__ PushRegister(kWasmS128, dst);
Expand Down Expand Up @@ -2689,8 +2690,8 @@ class LiftoffCompiler {
static constexpr RegClass result_rc = reg_class_for(result_type);
LiftoffRegister lhs = __ PopToRegister();
LiftoffRegister dst = src_rc == result_rc
? __ GetUnusedRegister(result_rc, {lhs})
: __ GetUnusedRegister(result_rc);
? __ GetUnusedRegister(result_rc, {lhs}, {})
: __ GetUnusedRegister(result_rc, {});
fn(dst, lhs, imm.lane);
__ PushRegister(ValueType(result_type), dst);
}
Expand All @@ -2716,7 +2717,7 @@ class LiftoffCompiler {
(src2_rc == result_rc || pin_src2)
? __ GetUnusedRegister(result_rc, {src1},
LiftoffRegList::ForRegs(src2))
: __ GetUnusedRegister(result_rc, {src1});
: __ GetUnusedRegister(result_rc, {src1}, {});
fn(dst, src1, src2, imm.lane);
__ PushRegister(kWasmS128, dst);
}
Expand Down
8 changes: 4 additions & 4 deletions deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
Expand Up @@ -603,7 +603,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
Expand Down Expand Up @@ -646,13 +646,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
case ValueType::kI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
case ValueType::kI64: {
LiftoffRegister tmp = GetUnusedRegister(kGpRegPair);
LiftoffRegister tmp = GetUnusedRegister(kGpRegPair, {});

int32_t low_word = value.to_i64();
int32_t high_word = value.to_i64() >> 32;
Expand Down Expand Up @@ -2251,7 +2251,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}

void LiftoffAssembler::CallTrapCallbackForTesting() {
PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}

Expand Down
8 changes: 4 additions & 4 deletions deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
Expand Up @@ -532,7 +532,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
ValueType type) {
DCHECK_NE(dst_offset, src_offset);
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type));
LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {});
Fill(reg, src_offset, type);
Spill(dst_offset, reg, type);
}
Expand Down Expand Up @@ -582,13 +582,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) {
MemOperand dst = liftoff::GetStackSlot(offset);
switch (value.type().kind()) {
case ValueType::kI32: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
sw(tmp.gp(), dst);
break;
}
case ValueType::kI64: {
LiftoffRegister tmp = GetUnusedRegister(kGpReg);
LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
TurboAssembler::li(tmp.gp(), value.to_i64());
sd(tmp.gp(), dst);
break;
Expand Down Expand Up @@ -2197,7 +2197,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
}

void LiftoffAssembler::CallTrapCallbackForTesting() {
PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp());
PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
}

Expand Down

0 comments on commit 4d0bc38

Please sign in to comment.