diff --git a/Makefile b/Makefile index 09018aac3095e7..d13d843bc49c8e 100644 --- a/Makefile +++ b/Makefile @@ -648,8 +648,7 @@ test-with-async-hooks: ifneq ("","$(wildcard deps/v8/tools/run-tests.py)") # Related CI job: node-test-commit-v8-linux test-v8: v8 ## Runs the V8 test suite on deps/v8. - deps/v8/tools/run-tests.py --gn --arch=$(V8_ARCH) \ - --mode=$(BUILDTYPE_LOWER) $(V8_TEST_OPTIONS) \ + deps/v8/tools/run-tests.py --gn --arch=$(V8_ARCH) $(V8_TEST_OPTIONS) \ mjsunit cctest debugger inspector message preparser \ $(TAP_V8) $(info Testing hash seed) diff --git a/common.gypi b/common.gypi index 1e05129315c2de..79d4e69615aecb 100644 --- a/common.gypi +++ b/common.gypi @@ -36,7 +36,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.29', + 'v8_embedder_string': '-node.60', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS index 6f602e44560bca..13a915c130b7a2 100644 --- a/deps/v8/AUTHORS +++ b/deps/v8/AUTHORS @@ -67,6 +67,7 @@ Ben Newman Ben Noordhuis Benjamin Tan Bert Belder +Brendon Tiszka Burcu Dogan Caitlin Potter Craig Schlenter diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl index 72f739487ccec3..113413bbe8c331 100644 --- a/deps/v8/infra/testing/builders.pyl +++ b/deps/v8/infra/testing/builders.pyl @@ -1078,7 +1078,7 @@ {'name': 'v8testing', 'variant': 'slow_path', 'shards': 1}, ], }, - 'V8 Linux64 TSAN - concurrent marking': { + 'V8 Linux64 TSAN - stress-incremental-marking': { 'swarming_dimensions' : { 'os': 'Ubuntu-16.04', }, @@ -1105,7 +1105,7 @@ { 'name': 'v8testing', 'test_args': ['--extra-flags=--stress-incremental-marking'], - 'shards': 4, + 'shards': 6, }, ], }, diff --git a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc index b583b5e4214ad4..8c9318bfe7475d 100644 --- a/deps/v8/src/ast/ast-function-literal-id-reindexer.cc +++ b/deps/v8/src/ast/ast-function-literal-id-reindexer.cc @@ -54,10 +54,10 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) { // Private fields have their key and value present in // instance_members_initializer_function, so they will // already have been visited. - if (prop->value()->IsFunctionLiteral()) { - Visit(prop->value()); - } else { + if (prop->kind() == ClassLiteralProperty::Kind::FIELD) { CheckVisited(prop->value()); + } else { + Visit(prop->value()); } } ZonePtrList* props = expr->public_members(); @@ -67,7 +67,8 @@ void AstFunctionLiteralIdReindexer::VisitClassLiteral(ClassLiteral* expr) { // Public fields with computed names have their key // and value present in instance_members_initializer_function, so they will // already have been visited. - if (prop->is_computed_name() && !prop->value()->IsFunctionLiteral()) { + if (prop->is_computed_name() && + prop->kind() == ClassLiteralProperty::Kind::FIELD) { if (!prop->key()->IsLiteral()) { CheckVisited(prop->key()); } diff --git a/deps/v8/src/builtins/builtins-array.cc b/deps/v8/src/builtins/builtins-array.cc index 3c2fe33c5b4b33..8055d8382d48f6 100644 --- a/deps/v8/src/builtins/builtins-array.cc +++ b/deps/v8/src/builtins/builtins-array.cc @@ -649,11 +649,14 @@ class ArrayConcatVisitor { index_offset_(0u), bit_field_(FastElementsField::encode(fast_elements) | ExceedsLimitField::encode(false) | - IsFixedArrayField::encode(storage->IsFixedArray()) | + IsFixedArrayField::encode(storage->IsFixedArray(isolate)) | HasSimpleElementsField::encode( - storage->IsFixedArray() || - !storage->map().IsCustomElementsReceiverMap())) { - DCHECK(!(this->fast_elements() && !is_fixed_array())); + storage->IsFixedArray(isolate) || + // Don't take fast path for storages that might have + // side effects when storing to them. + (!storage->map(isolate).IsCustomElementsReceiverMap() && + !storage->IsJSTypedArray(isolate)))) { + DCHECK_IMPLIES(this->fast_elements(), is_fixed_array()); } ~ArrayConcatVisitor() { clear_storage(); } @@ -1063,8 +1066,8 @@ bool IterateElements(Isolate* isolate, Handle receiver, return IterateElementsSlow(isolate, receiver, length, visitor); } - if (!HasOnlySimpleElements(isolate, *receiver) || - !visitor->has_simple_elements()) { + if (!visitor->has_simple_elements() || + !HasOnlySimpleElements(isolate, *receiver)) { return IterateElementsSlow(isolate, receiver, length, visitor); } Handle array = Handle::cast(receiver); @@ -1080,6 +1083,9 @@ bool IterateElements(Isolate* isolate, Handle receiver, case HOLEY_SEALED_ELEMENTS: case HOLEY_NONEXTENSIBLE_ELEMENTS: case HOLEY_ELEMENTS: { + // Disallow execution so the cached elements won't change mid execution. + DisallowJavascriptExecution no_js(isolate); + // Run through the elements FixedArray and use HasElement and GetElement // to check the prototype for missing elements. Handle elements(FixedArray::cast(array->elements()), isolate); @@ -1106,6 +1112,9 @@ bool IterateElements(Isolate* isolate, Handle receiver, } case HOLEY_DOUBLE_ELEMENTS: case PACKED_DOUBLE_ELEMENTS: { + // Disallow execution so the cached elements won't change mid execution. + DisallowJavascriptExecution no_js(isolate); + // Empty array is FixedArray but not FixedDoubleArray. if (length == 0) break; // Run through the elements FixedArray and use HasElement and GetElement @@ -1142,6 +1151,9 @@ bool IterateElements(Isolate* isolate, Handle receiver, } case DICTIONARY_ELEMENTS: { + // Disallow execution so the cached dictionary won't change mid execution. + DisallowJavascriptExecution no_js(isolate); + Handle dict(array->element_dictionary(), isolate); std::vector indices; indices.reserve(dict->Capacity() / 2); diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc index 7e5fa8cef1c1c2..fa9aba33876f04 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc @@ -722,23 +722,22 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, void TurboAssembler::CallRecordWriteStub( Register object, Operand offset, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) { - CallRecordWriteStub( - object, offset, remembered_set_action, fp_mode, - isolate()->builtins()->builtin_handle(Builtins::kRecordWrite), - kNullAddress); + CallRecordWriteStub(object, offset, remembered_set_action, fp_mode, + Builtins::kRecordWrite, kNullAddress); } void TurboAssembler::CallRecordWriteStub( Register object, Operand offset, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, Address wasm_target) { CallRecordWriteStub(object, offset, remembered_set_action, fp_mode, - Handle::null(), wasm_target); + Builtins::kNoBuiltinId, wasm_target); } void TurboAssembler::CallRecordWriteStub( Register object, Operand offset, RememberedSetAction remembered_set_action, - SaveFPRegsMode fp_mode, Handle code_target, Address wasm_target) { - DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress); + SaveFPRegsMode fp_mode, int builtin_index, Address wasm_target) { + DCHECK_NE(builtin_index == Builtins::kNoBuiltinId, + wasm_target == kNullAddress); // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode, // i.e. always emit remember set and save FP registers in RecordWriteStub. If // large performance regression is observed, we should use these values to @@ -762,9 +761,13 @@ void TurboAssembler::CallRecordWriteStub( Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action)); Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); - if (code_target.is_null()) { + if (builtin_index == Builtins::kNoBuiltinId) { Call(wasm_target, RelocInfo::WASM_STUB_CALL); + } else if (options().inline_offheap_trampolines) { + CallBuiltin(builtin_index); } else { + Handle code_target = + isolate()->builtins()->builtin_handle(Builtins::kRecordWrite); Call(code_target, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h index a7dc5498b8b8ec..4821eb79c943fb 100644 --- a/deps/v8/src/codegen/arm/macro-assembler-arm.h +++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h @@ -588,7 +588,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CallRecordWriteStub(Register object, Operand offset, RememberedSetAction remembered_set_action, - SaveFPRegsMode fp_mode, Handle code_target, + SaveFPRegsMode fp_mode, int builtin_index, Address wasm_target); }; diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc index c157df29966975..c93f5797257d6f 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc @@ -2738,23 +2738,22 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset, void TurboAssembler::CallRecordWriteStub( Register object, Operand offset, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) { - CallRecordWriteStub( - object, offset, remembered_set_action, fp_mode, - isolate()->builtins()->builtin_handle(Builtins::kRecordWrite), - kNullAddress); + CallRecordWriteStub(object, offset, remembered_set_action, fp_mode, + Builtins::kRecordWrite, kNullAddress); } void TurboAssembler::CallRecordWriteStub( Register object, Operand offset, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, Address wasm_target) { CallRecordWriteStub(object, offset, remembered_set_action, fp_mode, - Handle::null(), wasm_target); + Builtins::kNoBuiltinId, wasm_target); } void TurboAssembler::CallRecordWriteStub( Register object, Operand offset, RememberedSetAction remembered_set_action, - SaveFPRegsMode fp_mode, Handle code_target, Address wasm_target) { - DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress); + SaveFPRegsMode fp_mode, int builtin_index, Address wasm_target) { + DCHECK_NE(builtin_index == Builtins::kNoBuiltinId, + wasm_target == kNullAddress); // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode, // i.e. always emit remember set and save FP registers in RecordWriteStub. If // large performance regression is observed, we should use these values to @@ -2778,9 +2777,13 @@ void TurboAssembler::CallRecordWriteStub( Mov(remembered_set_parameter, Smi::FromEnum(remembered_set_action)); Mov(fp_mode_parameter, Smi::FromEnum(fp_mode)); - if (code_target.is_null()) { + if (builtin_index == Builtins::kNoBuiltinId) { Call(wasm_target, RelocInfo::WASM_STUB_CALL); + } else if (options().inline_offheap_trampolines) { + CallBuiltin(builtin_index); } else { + Handle code_target = + isolate()->builtins()->builtin_handle(Builtins::kRecordWrite); Call(code_target, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h index 109e73c3c229d4..e701a5b12a3168 100644 --- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h +++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h @@ -1419,7 +1419,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CallRecordWriteStub(Register object, Operand offset, RememberedSetAction remembered_set_action, - SaveFPRegsMode fp_mode, Handle code_target, + SaveFPRegsMode fp_mode, int builtin_index, Address wasm_target); }; diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc index 901ce0c7b49410..50fb563244cee4 100644 --- a/deps/v8/src/codegen/code-stub-assembler.cc +++ b/deps/v8/src/codegen/code-stub-assembler.cc @@ -1801,12 +1801,13 @@ TNode CodeStubAssembler::LoadJSReceiverIdentityHash( return var_hash.value(); } -TNode CodeStubAssembler::LoadNameHashField(SloppyTNode name) { - CSA_ASSERT(this, IsName(name)); - return LoadObjectField(name, Name::kHashFieldOffset); +TNode CodeStubAssembler::LoadNameHashAssumeComputed(TNode name) { + TNode hash_field = LoadNameHashField(name); + CSA_ASSERT(this, IsClearWord32(hash_field, Name::kHashNotComputedMask)); + return Unsigned(Word32Shr(hash_field, Int32Constant(Name::kHashShift))); } -TNode CodeStubAssembler::LoadNameHash(SloppyTNode name, +TNode CodeStubAssembler::LoadNameHash(TNode name, Label* if_hash_not_computed) { TNode hash_field = LoadNameHashField(name); if (if_hash_not_computed != nullptr) { @@ -1994,13 +1995,13 @@ TNode CodeStubAssembler::LoadArrayElement(TNode array, } } -template TNode +template V8_EXPORT_PRIVATE TNode CodeStubAssembler::LoadArrayElement(TNode, int, Node*, int, ParameterMode, LoadSensitivity); -template TNode +template V8_EXPORT_PRIVATE TNode CodeStubAssembler::LoadArrayElement(TNode, int, Node*, int, ParameterMode, @@ -8063,7 +8064,7 @@ void CodeStubAssembler::LookupBinary(TNode unique_name, TNode limit = Unsigned(Int32Sub(NumberOfEntries(array), Int32Constant(1))); TVARIABLE(Uint32T, var_high, limit); - TNode hash = LoadNameHashField(unique_name); + TNode hash = LoadNameHashAssumeComputed(unique_name); CSA_ASSERT(this, Word32NotEqual(hash, Int32Constant(0))); // Assume non-empty array. @@ -8081,7 +8082,7 @@ void CodeStubAssembler::LookupBinary(TNode unique_name, TNode sorted_key_index = GetSortedKeyIndex(array, mid); TNode mid_name = GetKey(array, sorted_key_index); - TNode mid_hash = LoadNameHashField(mid_name); + TNode mid_hash = LoadNameHashAssumeComputed(mid_name); Label mid_greater(this), mid_less(this), merge(this); Branch(Uint32GreaterThanOrEqual(mid_hash, hash), &mid_greater, &mid_less); @@ -8108,7 +8109,7 @@ void CodeStubAssembler::LookupBinary(TNode unique_name, TNode sort_index = GetSortedKeyIndex(array, var_low.value()); TNode current_name = GetKey(array, sort_index); - TNode current_hash = LoadNameHashField(current_name); + TNode current_hash = LoadNameHashAssumeComputed(current_name); GotoIf(Word32NotEqual(current_hash, hash), if_not_found); Label next(this); GotoIf(TaggedNotEqual(current_name, unique_name), &next); diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h index b01729c73db8d4..23b99377d8dccc 100644 --- a/deps/v8/src/codegen/code-stub-assembler.h +++ b/deps/v8/src/codegen/code-stub-assembler.h @@ -1353,13 +1353,12 @@ class V8_EXPORT_PRIVATE CodeStubAssembler // Check if the map is set for slow properties. TNode IsDictionaryMap(SloppyTNode map); - // Load the hash field of a name as an uint32 value. - TNode LoadNameHashField(SloppyTNode name); - // Load the hash value of a name as an uint32 value. + // Load the Name::hash() value of a name as an uint32 value. // If {if_hash_not_computed} label is specified then it also checks if // hash is actually computed. - TNode LoadNameHash(SloppyTNode name, + TNode LoadNameHash(TNode name, Label* if_hash_not_computed = nullptr); + TNode LoadNameHashAssumeComputed(TNode name); // Load length field of a String object as Smi value. TNode LoadStringLengthAsSmi(TNode string); diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h index 60d978df5be26f..ded1e020e28627 100644 --- a/deps/v8/src/codegen/ia32/assembler-ia32.h +++ b/deps/v8/src/codegen/ia32/assembler-ia32.h @@ -959,6 +959,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void movapd(XMMRegister dst, Operand src) { sse2_instr(dst, src, 0x66, 0x0F, 0x28); } + void movupd(XMMRegister dst, Operand src) { + sse2_instr(dst, src, 0x66, 0x0F, 0x10); + } void movmskpd(Register dst, XMMRegister src); void movmskps(Register dst, XMMRegister src); @@ -1331,6 +1334,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { void vmovapd(XMMRegister dst, Operand src) { vpd(0x28, dst, xmm0, src); } void vmovups(XMMRegister dst, XMMRegister src) { vmovups(dst, Operand(src)); } void vmovups(XMMRegister dst, Operand src) { vps(0x10, dst, xmm0, src); } + void vmovupd(XMMRegister dst, Operand src) { vpd(0x10, dst, xmm0, src); } void vshufps(XMMRegister dst, XMMRegister src1, XMMRegister src2, byte imm8) { vshufps(dst, src1, Operand(src2), imm8); } diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc index b73050a680dc21..4b31481acdd85a 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc @@ -415,10 +415,8 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address, void TurboAssembler::CallRecordWriteStub( Register object, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) { - CallRecordWriteStub( - object, address, remembered_set_action, fp_mode, - isolate()->builtins()->builtin_handle(Builtins::kRecordWrite), - kNullAddress); + CallRecordWriteStub(object, address, remembered_set_action, fp_mode, + Builtins::kRecordWrite, kNullAddress); } void TurboAssembler::CallRecordWriteStub( @@ -426,14 +424,15 @@ void TurboAssembler::CallRecordWriteStub( RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, Address wasm_target) { CallRecordWriteStub(object, address, remembered_set_action, fp_mode, - Handle::null(), wasm_target); + Builtins::kNoBuiltinId, wasm_target); } void TurboAssembler::CallRecordWriteStub( Register object, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, - Handle code_target, Address wasm_target) { - DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress); + int builtin_index, Address wasm_target) { + DCHECK_NE(builtin_index == Builtins::kNoBuiltinId, + wasm_target == kNullAddress); // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode, // i.e. always emit remember set and save FP registers in RecordWriteStub. If // large performance regression is observed, we should use these values to @@ -461,10 +460,14 @@ void TurboAssembler::CallRecordWriteStub( Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action)); Move(fp_mode_parameter, Smi::FromEnum(fp_mode)); - if (code_target.is_null()) { + if (builtin_index == Builtins::kNoBuiltinId) { // Use {wasm_call} for direct Wasm call within a module. wasm_call(wasm_target, RelocInfo::WASM_STUB_CALL); + } else if (options().inline_offheap_trampolines) { + CallBuiltin(builtin_index); } else { + Handle code_target = + isolate()->builtins()->builtin_handle(Builtins::kRecordWrite); Call(code_target, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h index 94ddb2f784795a..0847599295a9b5 100644 --- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h @@ -292,6 +292,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { AVX_OP2_WITH_TYPE(Movaps, movaps, XMMRegister, XMMRegister) AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, XMMRegister) AVX_OP2_WITH_TYPE(Movapd, movapd, XMMRegister, const Operand&) + AVX_OP2_WITH_TYPE(Movupd, movupd, XMMRegister, const Operand&) AVX_OP2_WITH_TYPE(Pmovmskb, pmovmskb, Register, XMMRegister) AVX_OP2_WITH_TYPE(Movmskps, movmskps, Register, XMMRegister) @@ -566,7 +567,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CallRecordWriteStub(Register object, Register address, RememberedSetAction remembered_set_action, - SaveFPRegsMode fp_mode, Handle code_target, + SaveFPRegsMode fp_mode, int builtin_index, Address wasm_target); }; diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h index 24eb9765782f21..c1c4194f9c3745 100644 --- a/deps/v8/src/codegen/x64/assembler-x64.h +++ b/deps/v8/src/codegen/x64/assembler-x64.h @@ -1562,6 +1562,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG); emit(imm8); } + void vpalignr(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8) { + vinstr(0x0F, dst, src1, src2, k66, k0F3A, kWIG); + emit(imm8); + } void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2); void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2); diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc index 7d6fdc5eb3d589..44e590843ebbc4 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc @@ -385,10 +385,8 @@ void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address, void TurboAssembler::CallRecordWriteStub( Register object, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) { - CallRecordWriteStub( - object, address, remembered_set_action, fp_mode, - isolate()->builtins()->builtin_handle(Builtins::kRecordWrite), - kNullAddress); + CallRecordWriteStub(object, address, remembered_set_action, fp_mode, + Builtins::kRecordWrite, kNullAddress); } void TurboAssembler::CallRecordWriteStub( @@ -396,14 +394,15 @@ void TurboAssembler::CallRecordWriteStub( RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, Address wasm_target) { CallRecordWriteStub(object, address, remembered_set_action, fp_mode, - Handle::null(), wasm_target); + Builtins::kNoBuiltinId, wasm_target); } void TurboAssembler::CallRecordWriteStub( Register object, Register address, RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode, - Handle code_target, Address wasm_target) { - DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress); + int builtin_index, Address wasm_target) { + DCHECK_NE(builtin_index == Builtins::kNoBuiltinId, + wasm_target == kNullAddress); RecordWriteDescriptor descriptor; RegList registers = descriptor.allocatable_registers(); @@ -432,10 +431,14 @@ void TurboAssembler::CallRecordWriteStub( } else { movq(fp_mode_parameter, remembered_set_parameter); } - if (code_target.is_null()) { + if (builtin_index == Builtins::kNoBuiltinId) { // Use {near_call} for direct Wasm call within a module. near_call(wasm_target, RelocInfo::WASM_STUB_CALL); + } else if (options().inline_offheap_trampolines) { + CallBuiltin(builtin_index); } else { + Handle code_target = + isolate()->builtins()->builtin_handle(Builtins::kRecordWrite); Call(code_target, RelocInfo::CODE_TARGET); } diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h index 8382bf5a287bca..ea87002b08e862 100644 --- a/deps/v8/src/codegen/x64/macro-assembler-x64.h +++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h @@ -693,7 +693,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { void CallRecordWriteStub(Register object, Register address, RememberedSetAction remembered_set_action, - SaveFPRegsMode fp_mode, Handle code_target, + SaveFPRegsMode fp_mode, int builtin_index, Address wasm_target); }; diff --git a/deps/v8/src/common/checks.h b/deps/v8/src/common/checks.h index ef9eb27ca07e9f..eef59701d1d4a9 100644 --- a/deps/v8/src/common/checks.h +++ b/deps/v8/src/common/checks.h @@ -18,9 +18,11 @@ namespace internal { #ifdef ENABLE_SLOW_DCHECKS #define SLOW_DCHECK(condition) \ CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition)) +#define SLOW_DCHECK_IMPLIES(lhs, rhs) SLOW_DCHECK(!(lhs) || (rhs)) V8_EXPORT_PRIVATE extern bool FLAG_enable_slow_asserts; #else #define SLOW_DCHECK(condition) ((void)0) +#define SLOW_DCHECK_IMPLIES(v1, v2) ((void)0) static const bool FLAG_enable_slow_asserts = false; #endif diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc index 72c5750035a19c..67e2ad74702e53 100644 --- a/deps/v8/src/compiler/backend/code-generator.cc +++ b/deps/v8/src/compiler/backend/code-generator.cc @@ -607,8 +607,8 @@ void CodeGenerator::GetPushCompatibleMoves(Instruction* instr, // then the full gap resolver must be used since optimization with // pushes don't participate in the parallel move and might clobber // values needed for the gap resolve. - if (source.IsStackSlot() && LocationOperand::cast(source).index() >= - first_push_compatible_index) { + if (source.IsAnyStackSlot() && LocationOperand::cast(source).index() >= + first_push_compatible_index) { pushes->clear(); return; } diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc index c673458c75371b..52371f9d1f9696 100644 --- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc @@ -1966,7 +1966,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( tmp = i.TempSimd128Register(0); // The minpd instruction doesn't propagate NaNs and +0's in its first // operand. Perform minpd in both orders, merge the resuls, and adjust. - __ Movapd(tmp, src1); + __ Movupd(tmp, src1); __ Minpd(tmp, tmp, src); __ Minpd(dst, src, src1); // propagate -0's and NaNs, which may be non-canonical. @@ -1985,7 +1985,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( tmp = i.TempSimd128Register(0); // The maxpd instruction doesn't propagate NaNs and +0's in its first // operand. Perform maxpd in both orders, merge the resuls, and adjust. - __ Movapd(tmp, src1); + __ Movupd(tmp, src1); __ Maxpd(tmp, tmp, src); __ Maxpd(dst, src, src1); // Find discrepancies. @@ -2375,7 +2375,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( XMMRegister dst = i.OutputSimd128Register(); Operand src1 = i.InputOperand(1); // See comment above for correction of maxps. - __ movaps(kScratchDoubleReg, src1); + __ vmovups(kScratchDoubleReg, src1); __ vmaxps(kScratchDoubleReg, kScratchDoubleReg, dst); __ vmaxps(dst, dst, src1); __ vxorps(dst, dst, kScratchDoubleReg); diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc index 4f99ad49ba8980..e32a98e78fc157 100644 --- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc +++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc @@ -579,10 +579,14 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, ASSEMBLE_SIMD_INSTR(opcode, dst, input_index); \ } while (false) -#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, imm) \ - do { \ - DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \ - __ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1), imm); \ +#define ASSEMBLE_SIMD_IMM_SHUFFLE(opcode, imm) \ + do { \ + DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0)); \ + if (instr->InputAt(1)->IsSimd128Register()) { \ + __ opcode(i.OutputSimd128Register(), i.InputSimd128Register(1), imm); \ + } else { \ + __ opcode(i.OutputSimd128Register(), i.InputOperand(1), imm); \ + } \ } while (false) #define ASSEMBLE_SIMD_ALL_TRUE(opcode) \ diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc index dd3f556937d096..56dd17ac693e7e 100644 --- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc @@ -1270,7 +1270,9 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { opcode = load_rep.IsSigned() ? kX64Movsxwq : kX64Movzxwq; break; case MachineRepresentation::kWord32: - opcode = load_rep.IsSigned() ? kX64Movsxlq : kX64Movl; + // ChangeInt32ToInt64 must interpret its input as a _signed_ 32-bit + // integer, so here we must sign-extend the loaded value in any case. + opcode = kX64Movsxlq; break; default: UNREACHABLE(); diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc index 1c2bf5bc0eafe6..aef85c5b48897d 100644 --- a/deps/v8/src/compiler/js-inlining-heuristic.cc +++ b/deps/v8/src/compiler/js-inlining-heuristic.cc @@ -259,10 +259,9 @@ void JSInliningHeuristic::Finalize() { Candidate candidate = *i; candidates_.erase(i); - // Make sure we don't try to inline dead candidate nodes. - if (candidate.node->IsDead()) { - continue; - } + // Ignore this candidate if it's no longer valid. + if (!IrOpcode::IsInlineeOpcode(candidate.node->opcode())) continue; + if (candidate.node->IsDead()) continue; // Make sure we have some extra budget left, so that any small functions // exposed by this function would be given a chance to inline. diff --git a/deps/v8/src/compiler/representation-change.cc b/deps/v8/src/compiler/representation-change.cc index 7077f7d643f633..0ef026aa6eea51 100644 --- a/deps/v8/src/compiler/representation-change.cc +++ b/deps/v8/src/compiler/representation-change.cc @@ -919,10 +919,10 @@ Node* RepresentationChanger::GetWord32RepresentationFor( return node; } else if (output_rep == MachineRepresentation::kWord64) { if (output_type.Is(Type::Signed32()) || - output_type.Is(Type::Unsigned32())) { - op = machine()->TruncateInt64ToInt32(); - } else if (output_type.Is(cache_->kSafeInteger) && - use_info.truncation().IsUsedAsWord32()) { + (output_type.Is(Type::Unsigned32()) && + use_info.type_check() == TypeCheckKind::kNone) || + (output_type.Is(cache_->kSafeInteger) && + use_info.truncation().IsUsedAsWord32())) { op = machine()->TruncateInt64ToInt32(); } else if (use_info.type_check() == TypeCheckKind::kSignedSmall || use_info.type_check() == TypeCheckKind::kSigned32 || diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc index d00acefc39c791..a9229617dad037 100644 --- a/deps/v8/src/compiler/simplified-lowering.cc +++ b/deps/v8/src/compiler/simplified-lowering.cc @@ -178,10 +178,16 @@ void ReplaceEffectControlUses(Node* node, Node* effect, Node* control) { } bool CanOverflowSigned32(const Operator* op, Type left, Type right, - Zone* type_zone) { - // We assume the inputs are checked Signed32 (or known statically - // to be Signed32). Technically, the inputs could also be minus zero, but - // that cannot cause overflow. + TypeCache const* type_cache, Zone* type_zone) { + // We assume the inputs are checked Signed32 (or known statically to be + // Signed32). Technically, the inputs could also be minus zero, which we treat + // as 0 for the purpose of this function. + if (left.Maybe(Type::MinusZero())) { + left = Type::Union(left, type_cache->kSingletonZero, type_zone); + } + if (right.Maybe(Type::MinusZero())) { + right = Type::Union(right, type_cache->kSingletonZero, type_zone); + } left = Type::Intersect(left, Type::Signed32(), type_zone); right = Type::Intersect(right, Type::Signed32(), type_zone); if (left.IsNone() || right.IsNone()) return false; @@ -1375,7 +1381,6 @@ class RepresentationSelector { IsSomePositiveOrderedNumber(input1_type) ? CheckForMinusZeroMode::kDontCheckForMinusZero : CheckForMinusZeroMode::kCheckForMinusZero; - NodeProperties::ChangeOp(node, simplified()->CheckedInt32Mul(mz_mode)); } @@ -1419,6 +1424,13 @@ class RepresentationSelector { Type left_feedback_type = TypeOf(node->InputAt(0)); Type right_feedback_type = TypeOf(node->InputAt(1)); + + // Using Signed32 as restriction type amounts to promising there won't be + // signed overflow. This is incompatible with relying on a Word32 + // truncation in order to skip the overflow check. + Type const restriction = + truncation.IsUsedAsWord32() ? Type::Any() : Type::Signed32(); + // Handle the case when no int32 checks on inputs are necessary (but // an overflow check is needed on the output). Note that we do not // have to do any check if at most one side can be minus zero. For @@ -1432,7 +1444,7 @@ class RepresentationSelector { right_upper.Is(Type::Signed32OrMinusZero()) && (left_upper.Is(Type::Signed32()) || right_upper.Is(Type::Signed32()))) { VisitBinop(node, UseInfo::TruncatingWord32(), - MachineRepresentation::kWord32, Type::Signed32()); + MachineRepresentation::kWord32, restriction); } else { // If the output's truncation is identify-zeros, we can pass it // along. Moreover, if the operation is addition and we know the @@ -1452,12 +1464,14 @@ class RepresentationSelector { UseInfo right_use = CheckedUseInfoAsWord32FromHint(hint, FeedbackSource(), kIdentifyZeros); VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32, - Type::Signed32()); + restriction); } + if (lower()) { if (truncation.IsUsedAsWord32() || !CanOverflowSigned32(node->op(), left_feedback_type, - right_feedback_type, graph_zone())) { + right_feedback_type, type_cache_, + graph_zone())) { ChangeToPureOp(node, Int32Op(node)); } else { diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc index 44c92f557046db..970723839ca0ad 100644 --- a/deps/v8/src/deoptimizer/deoptimizer.cc +++ b/deps/v8/src/deoptimizer/deoptimizer.cc @@ -249,6 +249,7 @@ class ActivationsFinder : public ThreadVisitor { SafepointEntry safepoint = code.GetSafepointEntry(it.frame()->pc()); int trampoline_pc = safepoint.trampoline_pc(); DCHECK_IMPLIES(code == topmost_, safe_to_deopt_); + CHECK_GE(trampoline_pc, 0); // Replace the current pc on the stack with the trampoline. // TODO(v8:10026): avoid replacing a signed pointer. Address* pc_addr = it.frame()->pc_address(); @@ -3265,7 +3266,8 @@ Address TranslatedState::DecompressIfNeeded(intptr_t value) { } } -TranslatedState::TranslatedState(const JavaScriptFrame* frame) { +TranslatedState::TranslatedState(const JavaScriptFrame* frame) + : purpose_(kFrameInspection) { int deopt_index = Safepoint::kNoDeoptimizationIndex; DeoptimizationData data = static_cast(frame)->GetDeoptimizationData( @@ -3640,25 +3642,63 @@ void TranslatedState::EnsureCapturedObjectAllocatedAt( } default: - CHECK(map->IsJSObjectMap()); EnsureJSObjectAllocated(slot, map); - TranslatedValue* properties_slot = &(frame->values_[value_index]); - value_index++; + int remaining_children_count = slot->GetChildrenCount() - 1; + + TranslatedValue* properties_slot = frame->ValueAt(value_index); + value_index++, remaining_children_count--; if (properties_slot->kind() == TranslatedValue::kCapturedObject) { - // If we are materializing the property array, make sure we put - // the mutable heap numbers at the right places. + // We are materializing the property array, so make sure we put the + // mutable heap numbers at the right places. EnsurePropertiesAllocatedAndMarked(properties_slot, map); EnsureChildrenAllocated(properties_slot->GetChildrenCount(), frame, &value_index, worklist); + } else { + CHECK_EQ(properties_slot->kind(), TranslatedValue::kTagged); } - // Make sure all the remaining children (after the map and properties) are - // allocated. - return EnsureChildrenAllocated(slot->GetChildrenCount() - 2, frame, + + TranslatedValue* elements_slot = frame->ValueAt(value_index); + value_index++, remaining_children_count--; + if (elements_slot->kind() == TranslatedValue::kCapturedObject || + !map->IsJSArrayMap()) { + // Handle this case with the other remaining children below. + value_index--, remaining_children_count++; + } else { + CHECK_EQ(elements_slot->kind(), TranslatedValue::kTagged); + elements_slot->GetValue(); + if (purpose_ == kFrameInspection) { + // We are materializing a JSArray for the purpose of frame inspection. + // If we were to construct it with the above elements value then an + // actual deopt later on might create another JSArray instance with + // the same elements store. That would violate the key assumption + // behind left-trimming. + elements_slot->ReplaceElementsArrayWithCopy(); + } + } + + // Make sure all the remaining children (after the map, properties store, + // and possibly elements store) are allocated. + return EnsureChildrenAllocated(remaining_children_count, frame, &value_index, worklist); } UNREACHABLE(); } +void TranslatedValue::ReplaceElementsArrayWithCopy() { + DCHECK_EQ(kind(), TranslatedValue::kTagged); + DCHECK_EQ(materialization_state(), TranslatedValue::kFinished); + auto elements = Handle::cast(GetValue()); + DCHECK(elements->IsFixedArray() || elements->IsFixedDoubleArray()); + if (elements->IsFixedDoubleArray()) { + DCHECK(!elements->IsCowArray()); + set_storage(isolate()->factory()->CopyFixedDoubleArray( + Handle::cast(elements))); + } else if (!elements->IsCowArray()) { + set_storage(isolate()->factory()->CopyFixedArray( + Handle::cast(elements))); + } +} + void TranslatedState::EnsureChildrenAllocated(int count, TranslatedFrame* frame, int* value_index, std::stack* worklist) { @@ -3723,6 +3763,7 @@ Handle TranslatedState::AllocateStorageFor(TranslatedValue* slot) { void TranslatedState::EnsureJSObjectAllocated(TranslatedValue* slot, Handle map) { + CHECK(map->IsJSObjectMap()); CHECK_EQ(map->instance_size(), slot->GetChildrenCount() * kTaggedSize); Handle object_storage = AllocateStorageFor(slot); diff --git a/deps/v8/src/deoptimizer/deoptimizer.h b/deps/v8/src/deoptimizer/deoptimizer.h index ee6978e6292b8b..eaf0578878da0d 100644 --- a/deps/v8/src/deoptimizer/deoptimizer.h +++ b/deps/v8/src/deoptimizer/deoptimizer.h @@ -117,6 +117,8 @@ class TranslatedValue { return storage_; } + void ReplaceElementsArrayWithCopy(); + Kind kind_; MaterializationState materialization_state_ = kUninitialized; TranslatedState* container_; // This is only needed for materialization of @@ -313,7 +315,15 @@ class TranslatedFrame { class TranslatedState { public: - TranslatedState() = default; + // There are two constructors, each for a different purpose: + + // The default constructor is for the purpose of deoptimizing an optimized + // frame (replacing it with one or several unoptimized frames). It is used by + // the Deoptimizer. + TranslatedState() : purpose_(kDeoptimization) {} + + // This constructor is for the purpose of merely inspecting an optimized + // frame. It is used by stack trace generation and various debugging features. explicit TranslatedState(const JavaScriptFrame* frame); void Prepare(Address stack_frame_pointer); @@ -347,6 +357,12 @@ class TranslatedState { private: friend TranslatedValue; + // See the description of the constructors for an explanation of the two + // purposes. The only actual difference is that in the kFrameInspection case + // extra work is needed to not violate assumptions made by left-trimming. For + // details, see the code around ReplaceElementsArrayWithCopy. + enum Purpose { kDeoptimization, kFrameInspection }; + TranslatedFrame CreateNextTranslatedFrame(TranslationIterator* iterator, FixedArray literal_array, Address fp, FILE* trace_file); @@ -404,6 +420,7 @@ class TranslatedState { static Float32 GetFloatSlot(Address fp, int slot_index); static Float64 GetDoubleSlot(Address fp, int slot_index); + Purpose const purpose_; std::vector frames_; Isolate* isolate_ = nullptr; Address stack_frame_pointer_ = kNullAddress; diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc index 5e0c5c65e2342e..a489968e1bdf77 100644 --- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc +++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc @@ -1161,6 +1161,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) { int mod, regop, rm, vvvv = vex_vreg(); get_modrm(*current, &mod, ®op, &rm); switch (opcode) { + case 0x10: + AppendToBuffer("vmovupd %s,", NameOfXMMRegister(regop)); + current += PrintRightXMMOperand(current); + break; case 0x28: AppendToBuffer("vmovapd %s,", NameOfXMMRegister(regop)); current += PrintRightXMMOperand(current); @@ -2090,7 +2094,13 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector out_buffer, data += 2; } else if (*data == 0x0F) { data++; - if (*data == 0x28) { + if (*data == 0x10) { + data++; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movupd %s,", NameOfXMMRegister(regop)); + data += PrintRightXMMOperand(data); + } else if (*data == 0x28) { data++; int mod, regop, rm; get_modrm(*data, &mod, ®op, &rm); diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc index 698b92c6162fe1..4925a09a0047a5 100644 --- a/deps/v8/src/diagnostics/objects-debug.cc +++ b/deps/v8/src/diagnostics/objects-debug.cc @@ -1667,12 +1667,13 @@ bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) { uint32_t current = 0; for (int i = 0; i < number_of_descriptors(); i++) { Name key = GetSortedKey(i); + CHECK(key.HasHashCode()); if (key == current_key) { Print(); return false; } current_key = key; - uint32_t hash = GetSortedKey(i).Hash(); + uint32_t hash = key.hash(); if (hash < current) { Print(); return false; @@ -1691,7 +1692,8 @@ bool TransitionArray::IsSortedNoDuplicates(int valid_entries) { for (int i = 0; i < number_of_transitions(); i++) { Name key = GetSortedKey(i); - uint32_t hash = key.Hash(); + CHECK(key.HasHashCode()); + uint32_t hash = key.hash(); PropertyKind kind = kData; PropertyAttributes attributes = NONE; if (!TransitionsAccessor::IsSpecialTransition(key.GetReadOnlyRoots(), diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc index 4a1c045927e733..26ec1600278cdb 100644 --- a/deps/v8/src/interpreter/bytecode-generator.cc +++ b/deps/v8/src/interpreter/bytecode-generator.cc @@ -4891,8 +4891,9 @@ void BytecodeGenerator::VisitCall(Call* expr) { Property* property = chain->expression()->AsProperty(); BuildOptionalChain([&]() { VisitAndPushIntoRegisterList(property->obj(), &args); - VisitPropertyLoadForRegister(args.last_register(), property, callee); + VisitPropertyLoad(args.last_register(), property); }); + builder()->StoreAccumulatorInRegister(callee); break; } case Call::SUPER_CALL: diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc index dfc302e77c8945..3a9e169757526d 100644 --- a/deps/v8/src/objects/bigint.cc +++ b/deps/v8/src/objects/bigint.cc @@ -1862,6 +1862,8 @@ Handle MutableBigInt::RightShiftByAbsolute(Isolate* isolate, DCHECK_LE(result_length, length); Handle result = New(isolate, result_length).ToHandleChecked(); if (bits_shift == 0) { + // Zero out any overflow digit (see "rounding_can_overflow" above). + result->set_digit(result_length - 1, 0); for (int i = digit_shift; i < length; i++) { result->set_digit(i - digit_shift, x->digit(i)); } diff --git a/deps/v8/src/objects/descriptor-array-inl.h b/deps/v8/src/objects/descriptor-array-inl.h index 357a6732e227d1..a7c6443a05fa7d 100644 --- a/deps/v8/src/objects/descriptor-array-inl.h +++ b/deps/v8/src/objects/descriptor-array-inl.h @@ -55,17 +55,19 @@ void DescriptorArray::CopyEnumCacheFrom(DescriptorArray array) { set_enum_cache(array.enum_cache()); } -InternalIndex DescriptorArray::Search(Name name, int valid_descriptors) { +InternalIndex DescriptorArray::Search(Name name, int valid_descriptors, + bool concurrent_search) { DCHECK(name.IsUniqueName()); - return InternalIndex( - internal::Search(this, name, valid_descriptors, nullptr)); + return InternalIndex(internal::Search( + this, name, valid_descriptors, nullptr, concurrent_search)); } -InternalIndex DescriptorArray::Search(Name name, Map map) { +InternalIndex DescriptorArray::Search(Name name, Map map, + bool concurrent_search) { DCHECK(name.IsUniqueName()); int number_of_own_descriptors = map.NumberOfOwnDescriptors(); if (number_of_own_descriptors == 0) return InternalIndex::NotFound(); - return Search(name, number_of_own_descriptors); + return Search(name, number_of_own_descriptors, concurrent_search); } InternalIndex DescriptorArray::SearchWithCache(Isolate* isolate, Name name, @@ -226,7 +228,7 @@ void DescriptorArray::Append(Descriptor* desc) { for (insertion = descriptor_number; insertion > 0; --insertion) { Name key = GetSortedKey(insertion - 1); - if (key.Hash() <= hash) break; + if (key.hash() <= hash) break; SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1)); } diff --git a/deps/v8/src/objects/descriptor-array.h b/deps/v8/src/objects/descriptor-array.h index 61da8dc240c3d9..3d505ad57695e0 100644 --- a/deps/v8/src/objects/descriptor-array.h +++ b/deps/v8/src/objects/descriptor-array.h @@ -113,11 +113,15 @@ class DescriptorArray int slack = 0); // Sort the instance descriptors by the hash codes of their keys. - void Sort(); - - // Search the instance descriptors for given name. - V8_INLINE InternalIndex Search(Name name, int number_of_own_descriptors); - V8_INLINE InternalIndex Search(Name name, Map map); + V8_EXPORT_PRIVATE void Sort(); + + // Search the instance descriptors for given name. {concurrent_search} signals + // if we are doing the search on a background thread. If so, we will sacrifice + // speed for thread-safety. + V8_INLINE InternalIndex Search(Name name, int number_of_own_descriptors, + bool concurrent_search = false); + V8_INLINE InternalIndex Search(Name name, Map map, + bool concurrent_search = false); // As the above, but uses DescriptorLookupCache and updates it when // necessary. diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h index 174d4abc5b4e0a..df741310c7036b 100644 --- a/deps/v8/src/objects/fixed-array-inl.h +++ b/deps/v8/src/objects/fixed-array-inl.h @@ -212,7 +212,7 @@ int BinarySearch(T* array, Name name, int valid_entries, DCHECK(search_mode == ALL_ENTRIES || out_insertion_index == nullptr); int low = 0; int high = array->number_of_entries() - 1; - uint32_t hash = name.hash_field(); + uint32_t hash = name.hash(); int limit = high; DCHECK(low <= high); @@ -220,7 +220,7 @@ int BinarySearch(T* array, Name name, int valid_entries, while (low != high) { int mid = low + (high - low) / 2; Name mid_name = array->GetSortedKey(mid); - uint32_t mid_hash = mid_name.hash_field(); + uint32_t mid_hash = mid_name.hash(); if (mid_hash >= hash) { high = mid; @@ -232,7 +232,7 @@ int BinarySearch(T* array, Name name, int valid_entries, for (; low <= limit; ++low) { int sort_index = array->GetSortedKeyIndex(low); Name entry = array->GetKey(InternalIndex(sort_index)); - uint32_t current_hash = entry.hash_field(); + uint32_t current_hash = entry.hash(); if (current_hash != hash) { if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) { *out_insertion_index = sort_index + (current_hash > hash ? 0 : 1); @@ -259,12 +259,12 @@ template int LinearSearch(T* array, Name name, int valid_entries, int* out_insertion_index) { if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) { - uint32_t hash = name.hash_field(); + uint32_t hash = name.hash(); int len = array->number_of_entries(); for (int number = 0; number < len; number++) { int sorted_index = array->GetSortedKeyIndex(number); Name entry = array->GetKey(InternalIndex(sorted_index)); - uint32_t current_hash = entry.hash_field(); + uint32_t current_hash = entry.hash(); if (current_hash > hash) { *out_insertion_index = sorted_index; return T::kNotFound; @@ -284,8 +284,9 @@ int LinearSearch(T* array, Name name, int valid_entries, } template -int Search(T* array, Name name, int valid_entries, int* out_insertion_index) { - SLOW_DCHECK(array->IsSortedNoDuplicates()); +int Search(T* array, Name name, int valid_entries, int* out_insertion_index, + bool concurrent_search) { + SLOW_DCHECK_IMPLIES(!concurrent_search, array->IsSortedNoDuplicates()); if (valid_entries == 0) { if (search_mode == ALL_ENTRIES && out_insertion_index != nullptr) { @@ -294,14 +295,14 @@ int Search(T* array, Name name, int valid_entries, int* out_insertion_index) { return T::kNotFound; } - // Fast case: do linear search for small arrays. + // Do linear search for small arrays, and for searches in the background + // thread. const int kMaxElementsForLinearSearch = 8; - if (valid_entries <= kMaxElementsForLinearSearch) { + if (valid_entries <= kMaxElementsForLinearSearch || concurrent_search) { return LinearSearch(array, name, valid_entries, out_insertion_index); } - // Slow case: perform binary search. return BinarySearch(array, name, valid_entries, out_insertion_index); } @@ -309,7 +310,7 @@ int Search(T* array, Name name, int valid_entries, int* out_insertion_index) { double FixedDoubleArray::get_scalar(int index) { DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() && map() != GetReadOnlyRoots().fixed_array_map()); - DCHECK(index >= 0 && index < this->length()); + DCHECK_LT(static_cast(index), static_cast(length())); DCHECK(!is_the_hole(index)); return ReadField(kHeaderSize + index * kDoubleSize); } @@ -317,7 +318,7 @@ double FixedDoubleArray::get_scalar(int index) { uint64_t FixedDoubleArray::get_representation(int index) { DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() && map() != GetReadOnlyRoots().fixed_array_map()); - DCHECK(index >= 0 && index < this->length()); + DCHECK_LT(static_cast(index), static_cast(length())); int offset = kHeaderSize + index * kDoubleSize; // Bug(v8:8875): Doubles may be unaligned. return base::ReadUnalignedValue(field_address(offset)); @@ -335,6 +336,7 @@ Handle FixedDoubleArray::get(FixedDoubleArray array, int index, void FixedDoubleArray::set(int index, double value) { DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() && map() != GetReadOnlyRoots().fixed_array_map()); + DCHECK_LT(static_cast(index), static_cast(length())); int offset = kHeaderSize + index * kDoubleSize; if (std::isnan(value)) { WriteField(offset, std::numeric_limits::quiet_NaN()); @@ -351,6 +353,7 @@ void FixedDoubleArray::set_the_hole(Isolate* isolate, int index) { void FixedDoubleArray::set_the_hole(int index) { DCHECK(map() != GetReadOnlyRoots().fixed_cow_array_map() && map() != GetReadOnlyRoots().fixed_array_map()); + DCHECK_LT(static_cast(index), static_cast(length())); int offset = kHeaderSize + index * kDoubleSize; base::WriteUnalignedValue(field_address(offset), kHoleNanInt64); } diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h index 63c3c5360b96aa..ccb954c6e2517f 100644 --- a/deps/v8/src/objects/fixed-array.h +++ b/deps/v8/src/objects/fixed-array.h @@ -465,7 +465,8 @@ enum SearchMode { ALL_ENTRIES, VALID_ENTRIES }; template inline int Search(T* array, Name name, int valid_entries = 0, - int* out_insertion_index = nullptr); + int* out_insertion_index = nullptr, + bool concurrent_search = false); // ByteArray represents fixed sized byte arrays. Used for the relocation info // that is attached to code objects. diff --git a/deps/v8/src/objects/map-updater.cc b/deps/v8/src/objects/map-updater.cc index 8c9b94014f8efa..4ea69120d14c45 100644 --- a/deps/v8/src/objects/map-updater.cc +++ b/deps/v8/src/objects/map-updater.cc @@ -401,7 +401,17 @@ MapUpdater::State MapUpdater::FindTargetMap() { } Representation tmp_representation = tmp_details.representation(); if (!old_details.representation().fits_into(tmp_representation)) { - break; + // Try updating the field in-place to a generalized type. + Representation generalized = + tmp_representation.generalize(old_details.representation()); + if (!tmp_representation.CanBeInPlaceChangedTo(generalized)) { + break; + } + Handle field_owner(tmp_map->FindFieldOwner(isolate_, i), isolate_); + tmp_representation = generalized; + GeneralizeField(field_owner, i, tmp_details.constness(), + tmp_representation, + handle(tmp_descriptors->GetFieldType(i), isolate_)); } if (tmp_details.location() == kField) { diff --git a/deps/v8/src/objects/map.cc b/deps/v8/src/objects/map.cc index bb13ace4bb0502..8b7f25f1a2ab8c 100644 --- a/deps/v8/src/objects/map.cc +++ b/deps/v8/src/objects/map.cc @@ -610,6 +610,7 @@ void Map::DeprecateTransitionTree(Isolate* isolate) { transitions.GetTarget(i).DeprecateTransitionTree(isolate); } DCHECK(!constructor_or_backpointer().IsFunctionTemplateInfo()); + DCHECK(CanBeDeprecated()); set_is_deprecated(true); if (FLAG_trace_maps) { LOG(isolate, MapEvent("Deprecate", handle(*this, isolate), Handle())); diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h index 9876d85d3eccf6..a4f563f6a17390 100644 --- a/deps/v8/src/objects/map.h +++ b/deps/v8/src/objects/map.h @@ -594,6 +594,7 @@ class Map : public HeapObject { WriteBarrierMode mode = UPDATE_WRITE_BARRIER); // [instance descriptors]: describes the object. + DECL_GETTER(synchronized_instance_descriptors, DescriptorArray) DECL_GETTER(instance_descriptors, DescriptorArray) V8_EXPORT_PRIVATE void SetInstanceDescriptors(Isolate* isolate, DescriptorArray descriptors, @@ -976,7 +977,8 @@ class Map : public HeapObject { MaybeHandle new_value); // Use the high-level instance_descriptors/SetInstanceDescriptors instead. - DECL_ACCESSORS(synchronized_instance_descriptors, DescriptorArray) + inline void set_synchronized_instance_descriptors( + DescriptorArray value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER); static const int kFastPropertiesSoftLimit = 12; static const int kMaxFastProperties = 128; diff --git a/deps/v8/src/objects/name-inl.h b/deps/v8/src/objects/name-inl.h index 0735b4e506fbcb..ffcd287fd37454 100644 --- a/deps/v8/src/objects/name-inl.h +++ b/deps/v8/src/objects/name-inl.h @@ -94,6 +94,12 @@ uint32_t Name::Hash() { return String::cast(*this).ComputeAndSetHash(); } +uint32_t Name::hash() const { + uint32_t field = hash_field(); + DCHECK(IsHashFieldComputed(field)); + return field >> kHashShift; +} + DEF_GETTER(Name, IsInterestingSymbol, bool) { return IsSymbol(isolate) && Symbol::cast(*this).is_interesting_symbol(); } diff --git a/deps/v8/src/objects/name.h b/deps/v8/src/objects/name.h index 533d8b000ebfa0..6309de9d4ca450 100644 --- a/deps/v8/src/objects/name.h +++ b/deps/v8/src/objects/name.h @@ -23,9 +23,15 @@ class Name : public TorqueGeneratedName { // Tells whether the hash code has been computed. inline bool HasHashCode(); - // Returns a hash value used for the property table + // Returns a hash value used for the property table. Ensures that the hash + // value is computed. + // TODO(ishell): rename to EnsureHash(). inline uint32_t Hash(); + // Returns a hash value used for the property table (same as Hash()), assumes + // the hash is already computed. + inline uint32_t hash() const; + // Equality operations. inline bool Equals(Name other); inline static bool Equals(Isolate* isolate, Handle one, diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc index 3eb78f46301f2b..009b09284e6ab9 100644 --- a/deps/v8/src/objects/objects.cc +++ b/deps/v8/src/objects/objects.cc @@ -4355,16 +4355,16 @@ void DescriptorArray::Sort() { // Reset sorting since the descriptor array might contain invalid pointers. for (int i = 0; i < len; ++i) SetSortedKey(i, i); // Bottom-up max-heap construction. - // Index of the last node with children + // Index of the last node with children. const int max_parent_index = (len / 2) - 1; for (int i = max_parent_index; i >= 0; --i) { int parent_index = i; - const uint32_t parent_hash = GetSortedKey(i).Hash(); + const uint32_t parent_hash = GetSortedKey(i).hash(); while (parent_index <= max_parent_index) { int child_index = 2 * parent_index + 1; - uint32_t child_hash = GetSortedKey(child_index).Hash(); + uint32_t child_hash = GetSortedKey(child_index).hash(); if (child_index + 1 < len) { - uint32_t right_child_hash = GetSortedKey(child_index + 1).Hash(); + uint32_t right_child_hash = GetSortedKey(child_index + 1).hash(); if (right_child_hash > child_hash) { child_index++; child_hash = right_child_hash; @@ -4383,13 +4383,13 @@ void DescriptorArray::Sort() { SwapSortedKeys(0, i); // Shift down the new top element. int parent_index = 0; - const uint32_t parent_hash = GetSortedKey(parent_index).Hash(); + const uint32_t parent_hash = GetSortedKey(parent_index).hash(); const int max_parent_index = (i / 2) - 1; while (parent_index <= max_parent_index) { int child_index = parent_index * 2 + 1; - uint32_t child_hash = GetSortedKey(child_index).Hash(); + uint32_t child_hash = GetSortedKey(child_index).hash(); if (child_index + 1 < i) { - uint32_t right_child_hash = GetSortedKey(child_index + 1).Hash(); + uint32_t right_child_hash = GetSortedKey(child_index + 1).hash(); if (right_child_hash > child_hash) { child_index++; child_hash = right_child_hash; diff --git a/deps/v8/src/objects/transitions-inl.h b/deps/v8/src/objects/transitions-inl.h index 5694d66d948325..09157b7f5d0051 100644 --- a/deps/v8/src/objects/transitions-inl.h +++ b/deps/v8/src/objects/transitions-inl.h @@ -169,12 +169,20 @@ int TransitionArray::SearchNameForTesting(Name name, int* out_insertion_index) { return SearchName(name, out_insertion_index); } +Map TransitionArray::SearchAndGetTargetForTesting( + PropertyKind kind, Name name, PropertyAttributes attributes) { + return SearchAndGetTarget(kind, name, attributes); +} + int TransitionArray::SearchSpecial(Symbol symbol, int* out_insertion_index) { return SearchName(symbol, out_insertion_index); } int TransitionArray::SearchName(Name name, int* out_insertion_index) { DCHECK(name.IsUniqueName()); + // The name is taken from DescriptorArray, so it must already has a computed + // hash. + DCHECK(name.HasHashCode()); return internal::Search(this, name, number_of_entries(), out_insertion_index); } diff --git a/deps/v8/src/objects/transitions.cc b/deps/v8/src/objects/transitions.cc index e0ba40ce7d0230..e240878b33d767 100644 --- a/deps/v8/src/objects/transitions.cc +++ b/deps/v8/src/objects/transitions.cc @@ -619,8 +619,8 @@ void TransitionArray::Sort() { temp_kind = details.kind(); temp_attributes = details.attributes(); } - int cmp = CompareKeys(temp_key, temp_key.Hash(), temp_kind, - temp_attributes, key, key.Hash(), kind, attributes); + int cmp = CompareKeys(temp_key, temp_key.hash(), temp_kind, + temp_attributes, key, key.hash(), kind, attributes); if (cmp > 0) { SetKey(j + 1, temp_key); SetRawTarget(j + 1, temp_target); diff --git a/deps/v8/src/objects/transitions.h b/deps/v8/src/objects/transitions.h index 5a7db13e516cf9..055bf41c914419 100644 --- a/deps/v8/src/objects/transitions.h +++ b/deps/v8/src/objects/transitions.h @@ -143,6 +143,8 @@ class V8_EXPORT_PRIVATE TransitionsAccessor { return encoding_; } + inline TransitionArray transitions(); + private: friend class MarkCompactCollector; // For HasSimpleTransitionTo. friend class TransitionArray; @@ -175,8 +177,6 @@ class V8_EXPORT_PRIVATE TransitionsAccessor { void TraverseTransitionTreeInternal(TraverseCallback callback, void* data, DisallowHeapAllocation* no_gc); - inline TransitionArray transitions(); - Isolate* isolate_; Handle map_handle_; Map map_; @@ -231,7 +231,7 @@ class TransitionArray : public WeakFixedArray { V8_EXPORT_PRIVATE bool IsSortedNoDuplicates(int valid_entries = -1); #endif - void Sort(); + V8_EXPORT_PRIVATE void Sort(); void PrintInternal(std::ostream& os); @@ -260,6 +260,9 @@ class TransitionArray : public WeakFixedArray { inline int SearchNameForTesting(Name name, int* out_insertion_index = nullptr); + inline Map SearchAndGetTargetForTesting(PropertyKind kind, Name name, + PropertyAttributes attributes); + private: friend class Factory; friend class MarkCompactCollector; @@ -296,8 +299,8 @@ class TransitionArray : public WeakFixedArray { int Search(PropertyKind kind, Name name, PropertyAttributes attributes, int* out_insertion_index = nullptr); - Map SearchAndGetTarget(PropertyKind kind, Name name, - PropertyAttributes attributes); + V8_EXPORT_PRIVATE Map SearchAndGetTarget(PropertyKind kind, Name name, + PropertyAttributes attributes); // Search a non-property transition (like elements kind, observe or frozen // transitions). diff --git a/deps/v8/src/regexp/regexp-bytecode-generator-inl.h b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h index bd906fea153a21..2a6ffec9297f32 100644 --- a/deps/v8/src/regexp/regexp-bytecode-generator-inl.h +++ b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h @@ -14,13 +14,13 @@ namespace v8 { namespace internal { void RegExpBytecodeGenerator::Emit(uint32_t byte, uint32_t twenty_four_bits) { - uint32_t word = ((twenty_four_bits << BYTECODE_SHIFT) | byte); - DCHECK(pc_ <= buffer_.length()); - if (pc_ + 3 >= buffer_.length()) { - Expand(); - } - *reinterpret_cast(buffer_.begin() + pc_) = word; - pc_ += 4; + DCHECK(is_uint24(twenty_four_bits)); + Emit32((twenty_four_bits << BYTECODE_SHIFT) | byte); +} + +void RegExpBytecodeGenerator::Emit(uint32_t byte, int32_t twenty_four_bits) { + DCHECK(is_int24(twenty_four_bits)); + Emit32((static_cast(twenty_four_bits) << BYTECODE_SHIFT) | byte); } void RegExpBytecodeGenerator::Emit16(uint32_t word) { diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.cc b/deps/v8/src/regexp/regexp-bytecode-generator.cc index e82b67b530a707..16f693c6a03999 100644 --- a/deps/v8/src/regexp/regexp-bytecode-generator.cc +++ b/deps/v8/src/regexp/regexp-bytecode-generator.cc @@ -161,8 +161,10 @@ bool RegExpBytecodeGenerator::Succeed() { void RegExpBytecodeGenerator::Fail() { Emit(BC_FAIL, 0); } void RegExpBytecodeGenerator::AdvanceCurrentPosition(int by) { - DCHECK_LE(kMinCPOffset, by); - DCHECK_GE(kMaxCPOffset, by); + // TODO(chromium:1166138): Turn back into DCHECKs once the underlying issue + // is fixed. + CHECK_LE(kMinCPOffset, by); + CHECK_GE(kMaxCPOffset, by); advance_current_start_ = pc_; advance_current_offset_ = by; Emit(BC_ADVANCE_CP, by); diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.h b/deps/v8/src/regexp/regexp-bytecode-generator.h index fdb9b468619d60..0b4656f6633ad0 100644 --- a/deps/v8/src/regexp/regexp-bytecode-generator.h +++ b/deps/v8/src/regexp/regexp-bytecode-generator.h @@ -85,6 +85,7 @@ class V8_EXPORT_PRIVATE RegExpBytecodeGenerator : public RegExpMacroAssembler { inline void Emit16(uint32_t x); inline void Emit8(uint32_t x); inline void Emit(uint32_t bc, uint32_t arg); + inline void Emit(uint32_t bc, int32_t arg); // Bytecode buffer. int length(); void Copy(byte* a); diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc index a04180fd346d7f..5e1b70ef7334a3 100644 --- a/deps/v8/src/regexp/regexp-compiler.cc +++ b/deps/v8/src/regexp/regexp-compiler.cc @@ -2536,7 +2536,16 @@ int ChoiceNode::GreedyLoopTextLengthForAlternative( SeqRegExpNode* seq_node = static_cast(node); node = seq_node->on_success(); } - return read_backward() ? -length : length; + if (read_backward()) { + length = -length; + } + // Check that we can jump by the whole text length. If not, return sentinel + // to indicate the we can't construct a greedy loop. + if (length < RegExpMacroAssembler::kMinCPOffset || + length > RegExpMacroAssembler::kMaxCPOffset) { + return kNodeIsTooComplexForGreedyLoops; + } + return length; } void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) { diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h index eb91b79ea55a95..fe8105d4eb2833 100644 --- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h +++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h @@ -140,6 +140,8 @@ template inline void I64BinopI(LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { + // The compiler allocated registers such that either {dst == lhs} or there is + // no overlap between the two. DCHECK_NE(dst.low_gp(), lhs.high_gp()); (assm->*op)(dst.low_gp(), lhs.low_gp(), Operand(imm), SetCC, al); // Top half of the immediate sign extended, either 0 or -1. @@ -437,7 +439,7 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, vmov(liftoff::GetFloatRegister(reg.fp()), value.to_f32_boxed()); break; case ValueType::kF64: { - Register extra_scratch = GetUnusedRegister(kGpReg).gp(); + Register extra_scratch = GetUnusedRegister(kGpReg, {}).gp(); vmov(reg.fp(), Double(value.to_f64_boxed().get_bits()), extra_scratch); break; } @@ -880,11 +882,13 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr, if (cache_state()->is_used(LiftoffRegister(dst_high))) { SpillRegister(LiftoffRegister(dst_high)); } - UseScratchRegisterScope temps(this); - Register actual_addr = liftoff::CalculateActualAddress( - this, &temps, src_addr, offset_reg, offset_imm); - ldrexd(dst_low, dst_high, actual_addr); - dmb(ISH); + { + UseScratchRegisterScope temps(this); + Register actual_addr = liftoff::CalculateActualAddress( + this, &temps, src_addr, offset_reg, offset_imm); + ldrexd(dst_low, dst_high, actual_addr); + dmb(ISH); + } LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{ {dst, LiftoffRegister::ForPair(dst_low, dst_high), kWasmI64}}; @@ -1171,7 +1175,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueType type) { DCHECK_NE(dst_offset, src_offset); - LiftoffRegister reg = GetUnusedRegister(reg_class_for(type)); + LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {}); Fill(reg, src_offset, type); Spill(dst_offset, reg, type); } @@ -1196,12 +1200,10 @@ void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src, } void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { -#ifdef DEBUG // The {str} instruction needs a temp register when the immediate in the // provided MemOperand does not fit into 12 bits. This happens for large stack // frames. This DCHECK checks that the temp register is available when needed. DCHECK(UseScratchRegisterScope{this}.CanAcquire()); -#endif DCHECK_LT(0, offset); RecordUsedSpillOffset(offset); MemOperand dst(fp, -offset); @@ -1216,7 +1218,7 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { // The scratch register will be required by str if multiple instructions // are required to encode the offset, and so we cannot use it in that case. if (!ImmediateFitsAddrMode2Instruction(dst.offset())) { - src = GetUnusedRegister(kGpReg).gp(); + src = GetUnusedRegister(kGpReg, {}).gp(); } else { src = temps.Acquire(); } @@ -1758,7 +1760,7 @@ void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { constexpr uint32_t kF32SignBit = uint32_t{1} << 31; UseScratchRegisterScope temps(this); - Register scratch = GetUnusedRegister(kGpReg).gp(); + Register scratch = GetUnusedRegister(kGpReg, {}).gp(); Register scratch2 = temps.Acquire(); VmovLow(scratch, lhs); // Clear sign bit in {scratch}. @@ -1777,7 +1779,7 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, // On arm, we cannot hold the whole f64 value in a gp register, so we just // operate on the upper half (UH). UseScratchRegisterScope temps(this); - Register scratch = GetUnusedRegister(kGpReg).gp(); + Register scratch = GetUnusedRegister(kGpReg, {}).gp(); Register scratch2 = temps.Acquire(); VmovHigh(scratch, lhs); // Clear sign bit in {scratch}. diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h index 7a1d629bf2dddd..065aed0ebbe0c7 100644 --- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h +++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h @@ -130,7 +130,7 @@ inline Register GetTmpByteRegister(LiftoffAssembler* assm, Register candidate) { if (candidate.is_byte_register()) return candidate; // {GetUnusedRegister()} may insert move instructions to spill registers to // the stack. This is OK because {mov} does not change the status flags. - return assm->GetUnusedRegister(liftoff::kByteRegs).gp(); + return assm->GetUnusedRegister(liftoff::kByteRegs, {}).gp(); } inline void MoveStackValue(LiftoffAssembler* assm, const Operand& src, @@ -532,7 +532,126 @@ void LiftoffAssembler::AtomicCompareExchange( Register dst_addr, Register offset_reg, uint32_t offset_imm, LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result, StoreType type) { - bailout(kAtomics, "AtomicCompareExchange"); + // We expect that the offset has already been added to {dst_addr}, and no + // {offset_reg} is provided. This is to save registers. + DCHECK_EQ(offset_reg, no_reg); + + DCHECK_EQ(result, expected); + + if (type.value() != StoreType::kI64Store) { + bool is_64_bit_op = type.value_type() == kWasmI64; + + Register value_reg = is_64_bit_op ? new_value.low_gp() : new_value.gp(); + Register expected_reg = is_64_bit_op ? expected.low_gp() : expected.gp(); + Register result_reg = expected_reg; + + // The cmpxchg instruction uses eax to store the old value of the + // compare-exchange primitive. Therefore we have to spill the register and + // move any use to another register. + ClearRegister(eax, {&dst_addr, &value_reg}, + LiftoffRegList::ForRegs(dst_addr, value_reg, expected_reg)); + if (expected_reg != eax) { + mov(eax, expected_reg); + expected_reg = eax; + } + + bool is_byte_store = type.size() == 1; + LiftoffRegList pinned = + LiftoffRegList::ForRegs(dst_addr, value_reg, expected_reg); + + // Ensure that {value_reg} is a valid register. + if (is_byte_store && !liftoff::kByteRegs.has(value_reg)) { + Register safe_value_reg = + pinned.set(GetUnusedRegister(liftoff::kByteRegs, pinned)).gp(); + mov(safe_value_reg, value_reg); + value_reg = safe_value_reg; + pinned.clear(LiftoffRegister(value_reg)); + } + + + Operand dst_op = Operand(dst_addr, offset_imm); + + lock(); + switch (type.value()) { + case StoreType::kI32Store8: + case StoreType::kI64Store8: { + cmpxchg_b(dst_op, value_reg); + movzx_b(result_reg, eax); + break; + } + case StoreType::kI32Store16: + case StoreType::kI64Store16: { + cmpxchg_w(dst_op, value_reg); + movzx_w(result_reg, eax); + break; + } + case StoreType::kI32Store: + case StoreType::kI64Store32: { + cmpxchg(dst_op, value_reg); + if (result_reg != eax) { + mov(result_reg, eax); + } + break; + } + default: + UNREACHABLE(); + } + if (is_64_bit_op) { + xor_(result.high_gp(), result.high_gp()); + } + return; + } + + // The following code handles kExprI64AtomicCompareExchange. + + // We need {ebx} here, which is the root register. The root register it + // needs special treatment. As we use {ebx} directly in the code below, we + // have to make sure here that the root register is actually {ebx}. + static_assert(kRootRegister == ebx, + "The following code assumes that kRootRegister == ebx"); + push(kRootRegister); + + // The compare-exchange instruction uses registers as follows: + // old-value = EDX:EAX; new-value = ECX:EBX. + Register expected_hi = edx; + Register expected_lo = eax; + Register new_hi = ecx; + Register new_lo = ebx; + // The address needs a separate registers that does not alias with the + // ones above. + Register address = esi; + + // Spill all these registers if they are still holding other values. + liftoff::SpillRegisters(this, expected_hi, expected_lo, new_hi, address); + + // We have to set new_lo specially, because it's the root register. We do it + // before setting all other registers so that the original value does not get + // overwritten. + mov(new_lo, new_value.low_gp()); + + // Move all other values into the right register. + { + LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{ + {LiftoffRegister(address), LiftoffRegister(dst_addr), kWasmI32}, + {LiftoffRegister::ForPair(expected_lo, expected_hi), expected, kWasmI64}, + {LiftoffRegister(new_hi), new_value.high(), kWasmI32}}; + ParallelRegisterMove(ArrayVector(reg_moves)); + }; + + Operand dst_op = Operand(address, offset_imm); + + lock(); + cmpxchg8b(dst_op); + + // Restore the root register, and we are done. + pop(kRootRegister); + + // Move the result into the correct registers. + { + LiftoffAssembler::ParallelRegisterMoveTuple reg_moves[]{ + {result, LiftoffRegister::ForPair(expected_lo, expected_hi), kWasmI64}}; + ParallelRegisterMove(ArrayVector(reg_moves)); + } } void LiftoffAssembler::AtomicFence() { mfence(); } @@ -978,31 +1097,19 @@ template inline void OpWithCarryI(LiftoffAssembler* assm, LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { - // First, compute the low half of the result, potentially into a temporary dst - // register if {dst.low_gp()} equals any register we need to - // keep alive for computing the upper half. - LiftoffRegList keep_alive = LiftoffRegList::ForRegs(lhs.high_gp()); - Register dst_low = keep_alive.has(dst.low_gp()) - ? assm->GetUnusedRegister(kGpReg, keep_alive).gp() - : dst.low_gp(); - - if (dst_low != lhs.low_gp()) assm->mov(dst_low, lhs.low_gp()); - (assm->*op)(dst_low, Immediate(imm)); + // The compiler allocated registers such that either {dst == lhs} or there is + // no overlap between the two. + DCHECK_NE(dst.low_gp(), lhs.high_gp()); - // Now compute the upper half, while keeping alive the previous result. - keep_alive = LiftoffRegList::ForRegs(dst_low); - Register dst_high = keep_alive.has(dst.high_gp()) - ? assm->GetUnusedRegister(kGpReg, keep_alive).gp() - : dst.high_gp(); + // First, compute the low half of the result. + if (dst.low_gp() != lhs.low_gp()) assm->mov(dst.low_gp(), lhs.low_gp()); + (assm->*op)(dst.low_gp(), Immediate(imm)); - if (dst_high != lhs.high_gp()) assm->mov(dst_high, lhs.high_gp()); + // Now compute the upper half. + if (dst.high_gp() != lhs.high_gp()) assm->mov(dst.high_gp(), lhs.high_gp()); // Top half of the immediate sign extended, either 0 or -1. int32_t sign_extend = imm < 0 ? -1 : 0; - (assm->*op_with_carry)(dst_high, sign_extend); - - // If necessary, move result into the right registers. - LiftoffRegister tmp_result = LiftoffRegister::ForPair(dst_low, dst_high); - if (tmp_result != dst) assm->Move(dst, tmp_result, kWasmI64); + (assm->*op_with_carry)(dst.high_gp(), sign_extend); } } // namespace liftoff @@ -1349,7 +1456,7 @@ inline void EmitFloatMinOrMax(LiftoffAssembler* assm, DoubleRegister dst, // We need one tmp register to extract the sign bit. Get it right at the // beginning, such that the spilling code is not accidentially jumped over. - Register tmp = assm->GetUnusedRegister(kGpReg).gp(); + Register tmp = assm->GetUnusedRegister(kGpReg, {}).gp(); #define dop(name, ...) \ do { \ @@ -1412,9 +1519,9 @@ void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs, void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs, DoubleRegister rhs) { static constexpr int kF32SignBit = 1 << 31; - Register scratch = GetUnusedRegister(kGpReg).gp(); - Register scratch2 = - GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp(); + LiftoffRegList pinned; + Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); + Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp(); Movd(scratch, lhs); // move {lhs} into {scratch}. and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}. Movd(scratch2, rhs); // move {rhs} into {scratch2}. @@ -1541,9 +1648,9 @@ void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs, static constexpr int kF32SignBit = 1 << 31; // On ia32, we cannot hold the whole f64 value in a gp register, so we just // operate on the upper half (UH). - Register scratch = GetUnusedRegister(kGpReg).gp(); - Register scratch2 = - GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(scratch)).gp(); + LiftoffRegList pinned; + Register scratch = pinned.set(GetUnusedRegister(kGpReg, pinned)).gp(); + Register scratch2 = GetUnusedRegister(kGpReg, pinned).gp(); Pextrd(scratch, lhs, 1); // move UH of {lhs} into {scratch}. and_(scratch, Immediate(~kF32SignBit)); // clear sign bit in {scratch}. @@ -2381,7 +2488,7 @@ void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs, void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs, int32_t rhs) { static constexpr RegClass tmp_rc = reg_class_for(ValueType::kI32); - LiftoffRegister tmp = GetUnusedRegister(tmp_rc); + LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {}); byte shift = static_cast(rhs & 0x7); if (CpuFeatures::IsSupported(AVX)) { CpuFeatureScope scope(this, AVX); @@ -3270,7 +3377,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { } void LiftoffAssembler::CallTrapCallbackForTesting() { - PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp()); + PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp()); CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0); } diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.cc b/deps/v8/src/wasm/baseline/liftoff-assembler.cc index 923d375064c924..e21ee60c0cfac7 100644 --- a/deps/v8/src/wasm/baseline/liftoff-assembler.cc +++ b/deps/v8/src/wasm/baseline/liftoff-assembler.cc @@ -37,6 +37,7 @@ class StackTransferRecipe { struct RegisterLoad { enum LoadKind : uint8_t { + kNop, // no-op, used for high fp of a fp pair. kConstant, // load a constant value into a register. kStack, // fill a register from a stack slot. kLowHalfStack, // fill a register from the low half of a stack slot. @@ -63,6 +64,10 @@ class StackTransferRecipe { return {half == kLowWord ? kLowHalfStack : kHighHalfStack, kWasmI32, offset}; } + static RegisterLoad Nop() { + // ValueType does not matter. + return {kNop, kWasmI32, 0}; + } private: RegisterLoad(LoadKind kind, ValueType type, int32_t value) @@ -217,11 +222,11 @@ class StackTransferRecipe { RegisterLoad::HalfStack(stack_offset, kHighWord); } else if (dst.is_fp_pair()) { DCHECK_EQ(kWasmS128, type); - // load_dst_regs_.set above will set both low and high fp regs. - // But unlike gp_pair, we load a kWasm128 in one go in ExecuteLoads. - // So unset the top fp register to skip loading it. - load_dst_regs_.clear(dst.high()); + // Only need register_load for low_gp since we load 128 bits at one go. + // Both low and high need to be set in load_dst_regs_ but when iterating + // over it, both low and high will be cleared, so we won't load twice. *register_load(dst.low()) = RegisterLoad::Stack(stack_offset, type); + *register_load(dst.high()) = RegisterLoad::Nop(); } else { *register_load(dst) = RegisterLoad::Stack(stack_offset, type); } @@ -318,6 +323,8 @@ class StackTransferRecipe { for (LiftoffRegister dst : load_dst_regs_) { RegisterLoad* load = register_load(dst); switch (load->kind) { + case RegisterLoad::kNop: + break; case RegisterLoad::kConstant: asm_->LoadConstant(dst, load->type == kWasmI64 ? WasmValue(int64_t{load->value}) @@ -535,7 +542,7 @@ LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(VarState slot, if (slot.is_reg()) { return half == kLowWord ? slot.reg().low() : slot.reg().high(); } - LiftoffRegister dst = GetUnusedRegister(kGpReg); + LiftoffRegister dst = GetUnusedRegister(kGpReg, {}); if (slot.is_stack()) { FillI64Half(dst.gp(), slot.offset(), half); return dst; @@ -574,7 +581,7 @@ void LiftoffAssembler::PrepareLoopArgs(int num) { if (!slot.is_const()) continue; RegClass rc = kNeedI64RegPair && slot.type() == kWasmI64 ? kGpRegPair : kGpReg; - LiftoffRegister reg = GetUnusedRegister(rc); + LiftoffRegister reg = GetUnusedRegister(rc, {}); LoadConstant(reg, slot.constant()); slot.MakeRegister(reg); cache_state_.inc_used(reg); diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h index 3377990496fb85..701b4b8e6a6209 100644 --- a/deps/v8/src/wasm/baseline/liftoff-assembler.h +++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h @@ -340,7 +340,7 @@ class LiftoffAssembler : public TurboAssembler { // possible. LiftoffRegister GetUnusedRegister( RegClass rc, std::initializer_list try_first, - LiftoffRegList pinned = {}) { + LiftoffRegList pinned) { for (LiftoffRegister reg : try_first) { DCHECK_EQ(reg.reg_class(), rc); if (cache_state_.is_free(reg)) return reg; @@ -349,7 +349,7 @@ class LiftoffAssembler : public TurboAssembler { } // Get an unused register for class {rc}, potentially spilling to free one. - LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned = {}) { + LiftoffRegister GetUnusedRegister(RegClass rc, LiftoffRegList pinned) { if (kNeedI64RegPair && rc == kGpRegPair) { LiftoffRegList candidates = kGpCacheRegList; Register low = pinned.set(GetUnusedRegister(candidates, pinned)).gp(); diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc index 4d0d9dbcecac3a..987f46b6ffbd00 100644 --- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc +++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc @@ -495,7 +495,7 @@ class LiftoffCompiler { position, __ cache_state()->used_registers, RegisterDebugSideTableEntry(DebugSideTableBuilder::kAssumeSpilling))); OutOfLineCode& ool = out_of_line_code_.back(); - Register limit_address = __ GetUnusedRegister(kGpReg).gp(); + Register limit_address = __ GetUnusedRegister(kGpReg, {}).gp(); LOAD_INSTANCE_FIELD(limit_address, StackLimitAddress, kSystemPointerSize); __ StackCheck(ool.label.get(), limit_address); __ bind(ool.continuation.get()); @@ -604,7 +604,7 @@ class LiftoffCompiler { *next_breakpoint_ptr_ == decoder->position()); if (!has_breakpoint) { DEBUG_CODE_COMMENT("check hook on function call"); - Register flag = __ GetUnusedRegister(kGpReg).gp(); + Register flag = __ GetUnusedRegister(kGpReg, {}).gp(); LOAD_INSTANCE_FIELD(flag, HookOnFunctionCallAddress, kSystemPointerSize); Label no_break; @@ -923,8 +923,8 @@ class LiftoffCompiler { constexpr RegClass result_rc = reg_class_for(result_type); LiftoffRegister src = __ PopToRegister(); LiftoffRegister dst = src_rc == result_rc - ? __ GetUnusedRegister(result_rc, {src}) - : __ GetUnusedRegister(result_rc); + ? __ GetUnusedRegister(result_rc, {src}, {}) + : __ GetUnusedRegister(result_rc, {}); CallEmitFn(fn, dst, src); __ PushRegister(ValueType(result_type), dst); } @@ -951,8 +951,9 @@ class LiftoffCompiler { static constexpr RegClass src_rc = reg_class_for(src_type); static constexpr RegClass dst_rc = reg_class_for(dst_type); LiftoffRegister src = __ PopToRegister(); - LiftoffRegister dst = src_rc == dst_rc ? __ GetUnusedRegister(dst_rc, {src}) - : __ GetUnusedRegister(dst_rc); + LiftoffRegister dst = src_rc == dst_rc + ? __ GetUnusedRegister(dst_rc, {src}, {}) + : __ GetUnusedRegister(dst_rc, {}); DCHECK_EQ(!!can_trap, trap_position > 0); Label* trap = can_trap ? AddOutOfLineTrap( trap_position, @@ -1121,9 +1122,12 @@ class LiftoffCompiler { int32_t imm = rhs_slot.i32_const(); LiftoffRegister lhs = __ PopToRegister(); + // Either reuse {lhs} for {dst}, or choose a register (pair) which does + // not overlap, for easier code generation. + LiftoffRegList pinned = LiftoffRegList::ForRegs(lhs); LiftoffRegister dst = src_rc == result_rc - ? __ GetUnusedRegister(result_rc, {lhs}) - : __ GetUnusedRegister(result_rc); + ? __ GetUnusedRegister(result_rc, {lhs}, pinned) + : __ GetUnusedRegister(result_rc, pinned); CallEmitFn(fnImm, dst, lhs, imm); __ PushRegister(ValueType(result_type), dst); @@ -1141,8 +1145,8 @@ class LiftoffCompiler { LiftoffRegister rhs = __ PopToRegister(); LiftoffRegister lhs = __ PopToRegister(LiftoffRegList::ForRegs(rhs)); LiftoffRegister dst = src_rc == result_rc - ? __ GetUnusedRegister(result_rc, {lhs, rhs}) - : __ GetUnusedRegister(result_rc); + ? __ GetUnusedRegister(result_rc, {lhs, rhs}, {}) + : __ GetUnusedRegister(result_rc, {}); if (swap_lhs_rhs) std::swap(lhs, rhs); @@ -1483,20 +1487,20 @@ class LiftoffCompiler { if (value_i32 == value) { __ PushConstant(kWasmI64, value_i32); } else { - LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64)); + LiftoffRegister reg = __ GetUnusedRegister(reg_class_for(kWasmI64), {}); __ LoadConstant(reg, WasmValue(value)); __ PushRegister(kWasmI64, reg); } } void F32Const(FullDecoder* decoder, Value* result, float value) { - LiftoffRegister reg = __ GetUnusedRegister(kFpReg); + LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {}); __ LoadConstant(reg, WasmValue(value)); __ PushRegister(kWasmF32, reg); } void F64Const(FullDecoder* decoder, Value* result, double value) { - LiftoffRegister reg = __ GetUnusedRegister(kFpReg); + LiftoffRegister reg = __ GetUnusedRegister(kFpReg, {}); __ LoadConstant(reg, WasmValue(value)); __ PushRegister(kWasmF64, reg); } @@ -1546,7 +1550,7 @@ class LiftoffCompiler { break; case kStack: { auto rc = reg_class_for(imm.type); - LiftoffRegister reg = __ GetUnusedRegister(rc); + LiftoffRegister reg = __ GetUnusedRegister(rc, {}); __ Fill(reg, slot.offset(), imm.type); __ PushRegister(slot.type(), reg); break; @@ -1570,7 +1574,7 @@ class LiftoffCompiler { } DCHECK_EQ(type, __ local_type(local_index)); RegClass rc = reg_class_for(type); - LiftoffRegister dst_reg = __ GetUnusedRegister(rc); + LiftoffRegister dst_reg = __ GetUnusedRegister(rc, {}); __ Fill(dst_reg, src_slot.offset(), type); *dst_slot = LiftoffAssembler::VarState(type, dst_reg, dst_slot->offset()); __ cache_state()->inc_used(dst_reg); @@ -1609,7 +1613,7 @@ class LiftoffCompiler { Register GetGlobalBaseAndOffset(const WasmGlobal* global, LiftoffRegList* pinned, uint32_t* offset) { - Register addr = pinned->set(__ GetUnusedRegister(kGpReg)).gp(); + Register addr = pinned->set(__ GetUnusedRegister(kGpReg, {})).gp(); if (global->mutability && global->imported) { LOAD_INSTANCE_FIELD(addr, ImportedMutableGlobals, kSystemPointerSize); __ Load(LiftoffRegister(addr), addr, no_reg, @@ -1675,8 +1679,8 @@ class LiftoffCompiler { DCHECK_EQ(type, __ cache_state()->stack_state.end()[-2].type()); LiftoffRegister false_value = pinned.set(__ PopToRegister(pinned)); LiftoffRegister true_value = __ PopToRegister(pinned); - LiftoffRegister dst = - __ GetUnusedRegister(true_value.reg_class(), {true_value, false_value}); + LiftoffRegister dst = __ GetUnusedRegister(true_value.reg_class(), + {true_value, false_value}, {}); __ PushRegister(type, dst); // Now emit the actual code to move either {true_value} or {false_value} @@ -2075,7 +2079,7 @@ class LiftoffCompiler { } void CurrentMemoryPages(FullDecoder* decoder, Value* result) { - Register mem_size = __ GetUnusedRegister(kGpReg).gp(); + Register mem_size = __ GetUnusedRegister(kGpReg, {}).gp(); LOAD_INSTANCE_FIELD(mem_size, MemorySize, kSystemPointerSize); __ emit_ptrsize_shri(mem_size, mem_size, kWasmPageSizeLog2); __ PushRegister(kWasmI32, LiftoffRegister(mem_size)); @@ -2344,7 +2348,7 @@ class LiftoffCompiler { src_rc == result_rc ? __ GetUnusedRegister(result_rc, {src3}, LiftoffRegList::ForRegs(src1, src2)) - : __ GetUnusedRegister(result_rc); + : __ GetUnusedRegister(result_rc, {}); CallEmitFn(fn, dst, src1, src2, src3); __ PushRegister(ValueType(result_type), dst); } @@ -2360,14 +2364,14 @@ class LiftoffCompiler { int32_t imm = rhs_slot.i32_const(); LiftoffRegister operand = __ PopToRegister(); - LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}); + LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {}); CallEmitFn(fnImm, dst, operand, imm); __ PushRegister(kWasmS128, dst); } else { LiftoffRegister count = __ PopToRegister(); LiftoffRegister operand = __ PopToRegister(); - LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}); + LiftoffRegister dst = __ GetUnusedRegister(result_rc, {operand}, {}); CallEmitFn(fn, dst, operand, count); __ PushRegister(kWasmS128, dst); @@ -2689,8 +2693,8 @@ class LiftoffCompiler { static constexpr RegClass result_rc = reg_class_for(result_type); LiftoffRegister lhs = __ PopToRegister(); LiftoffRegister dst = src_rc == result_rc - ? __ GetUnusedRegister(result_rc, {lhs}) - : __ GetUnusedRegister(result_rc); + ? __ GetUnusedRegister(result_rc, {lhs}, {}) + : __ GetUnusedRegister(result_rc, {}); fn(dst, lhs, imm.lane); __ PushRegister(ValueType(result_type), dst); } @@ -2716,7 +2720,7 @@ class LiftoffCompiler { (src2_rc == result_rc || pin_src2) ? __ GetUnusedRegister(result_rc, {src1}, LiftoffRegList::ForRegs(src2)) - : __ GetUnusedRegister(result_rc, {src1}); + : __ GetUnusedRegister(result_rc, {src1}, {}); fn(dst, src1, src2, imm.lane); __ PushRegister(kWasmS128, dst); } @@ -2879,9 +2883,38 @@ class LiftoffCompiler { void AtomicCompareExchange(FullDecoder* decoder, StoreType type, const MemoryAccessImmediate& imm) { #ifdef V8_TARGET_ARCH_IA32 - // With the current implementation we do not have enough registers on ia32 - // to even get to the platform-specific code. Therefore we bailout early. - unsupported(decoder, kAtomics, "AtomicCompareExchange"); + // On ia32 we don't have enough registers to first pop all the values off + // the stack and then start with the code generation. Instead we do the + // complete address calculation first, so that the address only needs a + // single register. Afterwards we load all remaining values into the + // other registers. + LiftoffRegList pinned; + Register index_reg = pinned.set(__ PeekToRegister(2, pinned)).gp(); + if (BoundsCheckMem(decoder, type.size(), imm.offset, index_reg, pinned, + kDoForceCheck)) { + return; + } + AlignmentCheckMem(decoder, type.size(), imm.offset, index_reg, pinned); + + uint32_t offset = imm.offset; + index_reg = AddMemoryMasking(index_reg, &offset, &pinned); + Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp(); + LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize); + __ emit_i32_add(addr, addr, index_reg); + pinned.clear(LiftoffRegister(index_reg)); + LiftoffRegister new_value = pinned.set(__ PopToRegister(pinned)); + LiftoffRegister expected = pinned.set(__ PopToRegister(pinned)); + + // Pop the index from the stack. + __ cache_state()->stack_state.pop_back(1); + + LiftoffRegister result = expected; + + // We already added the index to addr, so we can just pass no_reg to the + // assembler now. + __ AtomicCompareExchange(addr, no_reg, offset, expected, new_value, result, + type); + __ PushRegister(type.value_type(), result); return; #else ValueType result_type = type.value_type(); diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h index f24c95008c98c2..8937919c30094a 100644 --- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h +++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h @@ -603,7 +603,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueType type) { DCHECK_NE(dst_offset, src_offset); - LiftoffRegister reg = GetUnusedRegister(reg_class_for(type)); + LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {}); Fill(reg, src_offset, type); Spill(dst_offset, reg, type); } @@ -646,13 +646,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { MemOperand dst = liftoff::GetStackSlot(offset); switch (value.type().kind()) { case ValueType::kI32: { - LiftoffRegister tmp = GetUnusedRegister(kGpReg); + LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); sw(tmp.gp(), dst); break; } case ValueType::kI64: { - LiftoffRegister tmp = GetUnusedRegister(kGpRegPair); + LiftoffRegister tmp = GetUnusedRegister(kGpRegPair, {}); int32_t low_word = value.to_i64(); int32_t high_word = value.to_i64() >> 32; @@ -2251,7 +2251,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { } void LiftoffAssembler::CallTrapCallbackForTesting() { - PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp()); + PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp()); CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0); } diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h index 292f8032b8fc3d..e07ed7a393cf00 100644 --- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h +++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h @@ -532,7 +532,7 @@ void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, ValueType type) { DCHECK_NE(dst_offset, src_offset); - LiftoffRegister reg = GetUnusedRegister(reg_class_for(type)); + LiftoffRegister reg = GetUnusedRegister(reg_class_for(type), {}); Fill(reg, src_offset, type); Spill(dst_offset, reg, type); } @@ -582,13 +582,13 @@ void LiftoffAssembler::Spill(int offset, WasmValue value) { MemOperand dst = liftoff::GetStackSlot(offset); switch (value.type().kind()) { case ValueType::kI32: { - LiftoffRegister tmp = GetUnusedRegister(kGpReg); + LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); sw(tmp.gp(), dst); break; } case ValueType::kI64: { - LiftoffRegister tmp = GetUnusedRegister(kGpReg); + LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); TurboAssembler::li(tmp.gp(), value.to_i64()); sd(tmp.gp(), dst); break; @@ -2197,7 +2197,7 @@ void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { } void LiftoffAssembler::CallTrapCallbackForTesting() { - PrepareCallCFunction(0, GetUnusedRegister(kGpReg).gp()); + PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp()); CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0); } diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn index 2c9363130a385f..66e1473ed28d3a 100644 --- a/deps/v8/test/cctest/BUILD.gn +++ b/deps/v8/test/cctest/BUILD.gn @@ -196,6 +196,7 @@ v8_source_set("cctest_sources") { "test-code-pages.cc", "test-code-stub-assembler.cc", "test-compiler.cc", + "test-concurrent-descriptor-array.cc", "test-constantpool.cc", "test-conversions.cc", "test-cpu-profiler.cc", @@ -204,6 +205,7 @@ v8_source_set("cctest_sources") { "test-debug.cc", "test-decls.cc", "test-deoptimization.cc", + "test-descriptor-array.cc", "test-dictionary.cc", "test-diy-fp.cc", "test-double.cc", diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc index 09e390a6931630..3110681c1037d5 100644 --- a/deps/v8/test/cctest/cctest.cc +++ b/deps/v8/test/cctest/cctest.cc @@ -150,6 +150,18 @@ void CcTest::PreciseCollectAllGarbage(i::Isolate* isolate) { i::GarbageCollectionReason::kTesting); } +i::Handle CcTest::MakeString(const char* str) { + i::Isolate* isolate = CcTest::i_isolate(); + i::Factory* factory = isolate->factory(); + return factory->InternalizeUtf8String(str); +} + +i::Handle CcTest::MakeName(const char* str, int suffix) { + i::EmbeddedVector buffer; + SNPrintF(buffer, "%s%d", str, suffix); + return CcTest::MakeString(buffer.begin()); +} + v8::base::RandomNumberGenerator* CcTest::random_number_generator() { return InitIsolateOnce()->random_number_generator(); } diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h index 74dd9d18225a3e..7bb82294115c7b 100644 --- a/deps/v8/test/cctest/cctest.h +++ b/deps/v8/test/cctest/cctest.h @@ -141,6 +141,9 @@ class CcTest { static void CollectAllAvailableGarbage(i::Isolate* isolate = nullptr); static void PreciseCollectAllGarbage(i::Isolate* isolate = nullptr); + static i::Handle MakeString(const char* str); + static i::Handle MakeName(const char* str, int suffix); + static v8::base::RandomNumberGenerator* random_number_generator(); static v8::Local global(); diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc index a371325340d177..d0b91de7d2da1d 100644 --- a/deps/v8/test/cctest/test-code-stub-assembler.cc +++ b/deps/v8/test/cctest/test-code-stub-assembler.cc @@ -42,18 +42,6 @@ template using TVariable = TypedCodeAssemblerVariable; using PromiseResolvingFunctions = TorqueStructPromiseResolvingFunctions; -Handle MakeString(const char* str) { - Isolate* isolate = CcTest::i_isolate(); - Factory* factory = isolate->factory(); - return factory->InternalizeUtf8String(str); -} - -Handle MakeName(const char* str, int suffix) { - EmbeddedVector buffer; - SNPrintF(buffer, "%s%d", str, suffix); - return MakeString(buffer.begin()); -} - intptr_t sum10(intptr_t a0, intptr_t a1, intptr_t a2, intptr_t a3, intptr_t a4, intptr_t a5, intptr_t a6, intptr_t a7, intptr_t a8, intptr_t a9) { @@ -1091,7 +1079,7 @@ TEST(TransitionLookup) { name = factory->NewSymbol(); } else { int random_key = rand_gen.NextInt(Smi::kMaxValue); - name = MakeName("p", random_key); + name = CcTest::MakeName("p", random_key); } keys[i] = name; @@ -3646,8 +3634,8 @@ TEST(TestCallBuiltinInlineTrampoline) { options.use_pc_relative_calls_and_jumps = false; options.isolate_independent_code = false; FunctionTester ft(asm_tester.GenerateCode(options), kNumParams); - MaybeHandle result = ft.Call(MakeString("abcdef")); - CHECK(String::Equals(isolate, MakeString("abcdefabcdef"), + MaybeHandle result = ft.Call(CcTest::MakeString("abcdef")); + CHECK(String::Equals(isolate, CcTest::MakeString("abcdefabcdef"), Handle::cast(result.ToHandleChecked()))); } @@ -3672,8 +3660,8 @@ DISABLED_TEST(TestCallBuiltinIndirectLoad) { options.use_pc_relative_calls_and_jumps = false; options.isolate_independent_code = true; FunctionTester ft(asm_tester.GenerateCode(options), kNumParams); - MaybeHandle result = ft.Call(MakeString("abcdef")); - CHECK(String::Equals(isolate, MakeString("abcdefabcdef"), + MaybeHandle result = ft.Call(CcTest::MakeString("abcdef")); + CHECK(String::Equals(isolate, CcTest::MakeString("abcdefabcdef"), Handle::cast(result.ToHandleChecked()))); } diff --git a/deps/v8/test/cctest/test-concurrent-descriptor-array.cc b/deps/v8/test/cctest/test-concurrent-descriptor-array.cc new file mode 100644 index 00000000000000..43c686d4e0d4ec --- /dev/null +++ b/deps/v8/test/cctest/test-concurrent-descriptor-array.cc @@ -0,0 +1,128 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/api/api.h" +#include "src/base/platform/semaphore.h" +#include "src/handles/handles-inl.h" +#include "src/handles/local-handles-inl.h" +#include "src/handles/persistent-handles.h" +#include "src/heap/heap.h" +#include "src/heap/local-heap.h" +#include "test/cctest/cctest.h" +#include "test/cctest/heap/heap-utils.h" + +namespace v8 { +namespace internal { + +static constexpr int kNumHandles = kHandleBlockSize * 2 + kHandleBlockSize / 2; + +namespace { + +class PersistentHandlesThread final : public v8::base::Thread { + public: + PersistentHandlesThread(Heap* heap, std::vector> handles, + std::unique_ptr ph, + Handle name, base::Semaphore* sema_started) + : v8::base::Thread(base::Thread::Options("ThreadWithLocalHeap")), + heap_(heap), + handles_(std::move(handles)), + ph_(std::move(ph)), + name_(name), + sema_started_(sema_started) {} + + void Run() override { + LocalHeap local_heap(heap_, std::move(ph_)); + LocalHandleScope scope(&local_heap); + Address object = handles_[0]->ptr(); + + for (int i = 0; i < kNumHandles; i++) { + handles_.push_back( + Handle::cast(local_heap.NewPersistentHandle(object))); + } + + sema_started_->Signal(); + + for (Handle handle : handles_) { + // Lookup the named property on the {map}. + CHECK(name_->IsUniqueName()); + Handle map(handle->map(), &local_heap); + + Handle descriptors( + map->synchronized_instance_descriptors(), &local_heap); + bool is_background_thread = true; + InternalIndex const number = + descriptors->Search(*name_, *map, is_background_thread); + CHECK(number.is_found()); + } + + CHECK_EQ(handles_.size(), kNumHandles * 2); + + CHECK(!ph_); + ph_ = local_heap.DetachPersistentHandles(); + } + + Heap* heap_; + std::vector> handles_; + std::unique_ptr ph_; + Handle name_; + base::Semaphore* sema_started_; +}; + +// Uses linear search on a flat object, with up to 8 elements. +TEST(LinearSearchFlatObject) { + CcTest::InitializeVM(); + FLAG_local_heaps = true; + Isolate* isolate = CcTest::i_isolate(); + + std::unique_ptr ph = isolate->NewPersistentHandles(); + std::vector> handles; + + auto factory = isolate->factory(); + HandleScope handle_scope(isolate); + + Handle function = + factory->NewFunctionForTest(factory->empty_string()); + Handle js_object = factory->NewJSObject(function); + Handle name = CcTest::MakeString("property"); + Handle value = CcTest::MakeString("dummy_value"); + // For the default constructor function no in-object properties are reserved + // hence adding a single property will initialize the property-array. + JSObject::DefinePropertyOrElementIgnoreAttributes(js_object, name, value, + NONE) + .Check(); + + Address object = js_object->ptr(); + for (int i = 0; i < kNumHandles; i++) { + handles.push_back(Handle::cast(ph->NewHandle(object))); + } + + Handle persistent_name = Handle::cast(ph->NewHandle(name->ptr())); + + base::Semaphore sema_started(0); + + // Pass persistent handles to background thread. + std::unique_ptr thread(new PersistentHandlesThread( + isolate->heap(), std::move(handles), std::move(ph), persistent_name, + &sema_started)); + CHECK(thread->Start()); + + sema_started.Wait(); + + // Exercise descriptor in main thread too. + for (int i = 0; i < 7; ++i) { + Handle filler_name = CcTest::MakeName("filler_property_", i); + Handle filler_value = CcTest::MakeString("dummy_value"); + JSObject::DefinePropertyOrElementIgnoreAttributes(js_object, filler_name, + filler_value, NONE) + .Check(); + } + CHECK_EQ(js_object->map().NumberOfOwnDescriptors(), 8); + + thread->Join(); +} + +} // anonymous namespace + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/test/cctest/test-descriptor-array.cc b/deps/v8/test/cctest/test-descriptor-array.cc new file mode 100644 index 00000000000000..9a7b9bc1295b46 --- /dev/null +++ b/deps/v8/test/cctest/test-descriptor-array.cc @@ -0,0 +1,424 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/logging.h" +#include "src/codegen/code-stub-assembler.h" +#include "src/common/globals.h" +#include "src/objects/descriptor-array.h" +#include "src/objects/property-details.h" +#include "src/objects/string-inl.h" +#include "src/objects/transitions-inl.h" +#include "test/cctest/cctest.h" +#include "test/cctest/compiler/code-assembler-tester.h" +#include "test/cctest/compiler/function-tester.h" +#include "test/cctest/test-transitions.h" + +namespace v8 { +namespace internal { + +namespace { + +using Label = compiler::CodeAssemblerLabel; +template +using TVariable = compiler::TypedCodeAssemblerVariable; + +Handle NewNameWithHash(Isolate* isolate, const char* str, uint32_t hash, + bool is_integer) { + uint32_t hash_field = hash << Name::kHashShift; + + static_assert(Name::kNofHashBitFields == 2, "This test needs updating"); + static_assert(Name::kHashNotComputedMask == 1, "This test needs updating"); + static_assert(Name::kIsNotIntegerIndexMask == 2, "This test needs updating"); + + if (!is_integer) { + hash_field |= Name::kIsNotIntegerIndexMask; + } + Handle name = isolate->factory()->NewOneByteInternalizedString( + OneByteVector(str), hash_field); + name->set_hash_field(hash_field); + CHECK(name->IsUniqueName()); + return name; +} + +template +MaybeHandle Call(Isolate* isolate, Handle function, + Args... args) { + const int nof_args = sizeof...(Args); + Handle call_args[] = {args...}; + Handle receiver = isolate->factory()->undefined_value(); + return Execution::Call(isolate, function, receiver, nof_args, call_args); +} + +void CheckDescriptorArrayLookups(Isolate* isolate, Handle map, + std::vector>& names, + Handle csa_lookup) { + // Test C++ implementation. + { + DisallowHeapAllocation no_gc; + DescriptorArray descriptors = map->instance_descriptors(); + DCHECK(descriptors.IsSortedNoDuplicates()); + int nof_descriptors = descriptors.number_of_descriptors(); + + for (size_t i = 0; i < names.size(); ++i) { + Name name = *names[i]; + InternalIndex index = descriptors.Search(name, nof_descriptors, false); + CHECK(index.is_found()); + CHECK_EQ(i, index.as_uint32()); + } + } + + // Test CSA implementation. + if (!FLAG_jitless) { + for (size_t i = 0; i < names.size(); ++i) { + Handle name_index = + Call(isolate, csa_lookup, map, names[i]).ToHandleChecked(); + CHECK(name_index->IsSmi()); + CHECK_EQ(DescriptorArray::ToKeyIndex(static_cast(i)), + Smi::ToInt(*name_index)); + } + } +} + +void CheckTransitionArrayLookups(Isolate* isolate, + Handle transitions, + std::vector>& maps, + Handle csa_lookup) { + // Test C++ implementation. + { + DisallowHeapAllocation no_gc; + DCHECK(transitions->IsSortedNoDuplicates()); + + for (size_t i = 0; i < maps.size(); ++i) { + Map expected_map = *maps[i]; + Name name = + expected_map.instance_descriptors().GetKey(expected_map.LastAdded()); + + Map map = transitions->SearchAndGetTargetForTesting(PropertyKind::kData, + name, NONE); + CHECK(!map.is_null()); + CHECK_EQ(expected_map, map); + } + } + + // Test CSA implementation. + if (!FLAG_jitless) { + for (size_t i = 0; i < maps.size(); ++i) { + Handle expected_map = maps[i]; + Handle name(expected_map->instance_descriptors().GetKey( + expected_map->LastAdded()), + isolate); + + Handle transition_map = + Call(isolate, csa_lookup, transitions, name).ToHandleChecked(); + CHECK(transition_map->IsMap()); + CHECK_EQ(*expected_map, *transition_map); + } + } +} + +// Creates function with (Map, Name) arguments. Returns Smi with the index of +// the name value of the found descriptor (DescriptorArray::ToKeyIndex()) +// or null otherwise. +Handle CreateCsaDescriptorArrayLookup(Isolate* isolate) { + // We are not allowed to generate code in jitless mode. + if (FLAG_jitless) return Handle(); + + // Preallocate handle for the result in the current handle scope. + Handle result_function(JSFunction{}, isolate); + + const int kNumParams = 2; + + compiler::CodeAssemblerTester asm_tester( + isolate, kNumParams + 1, // +1 to include receiver. + Code::STUB); + { + CodeStubAssembler m(asm_tester.state()); + + TNode map = m.CAST(m.Parameter(1)); + TNode unique_name = m.CAST(m.Parameter(2)); + + Label passed(&m), failed(&m); + Label if_found(&m), if_not_found(&m); + TVariable var_name_index(&m); + + TNode bit_field3 = m.LoadMapBitField3(map); + TNode descriptors = m.LoadMapDescriptors(map); + + m.DescriptorLookup(unique_name, descriptors, bit_field3, &if_found, + &var_name_index, &if_not_found); + + m.BIND(&if_found); + m.Return(m.SmiTag(var_name_index.value())); + + m.BIND(&if_not_found); + m.Return(m.NullConstant()); + } + + { + compiler::FunctionTester ft(asm_tester.GenerateCode(), kNumParams); + // Copy function value to a handle created in the outer handle scope. + *(result_function.location()) = ft.function->ptr(); + } + + return result_function; +} + +// Creates function with (TransitionArray, Name) arguments. Returns transition +// map if transition is found or null otherwise. +Handle CreateCsaTransitionArrayLookup(Isolate* isolate) { + // We are not allowed to generate code in jitless mode. + if (FLAG_jitless) return Handle(); + + // Preallocate handle for the result in the current handle scope. + Handle result_function(JSFunction{}, isolate); + + const int kNumParams = 2; + compiler::CodeAssemblerTester asm_tester( + isolate, kNumParams + 1, // +1 to include receiver. + Code::STUB); + { + CodeStubAssembler m(asm_tester.state()); + + TNode transitions = m.CAST(m.Parameter(1)); + TNode unique_name = m.CAST(m.Parameter(2)); + + Label passed(&m), failed(&m); + Label if_found(&m), if_not_found(&m); + TVariable var_name_index(&m); + + m.TransitionLookup(unique_name, transitions, &if_found, &var_name_index, + &if_not_found); + + m.BIND(&if_found); + { + STATIC_ASSERT(kData == 0); + STATIC_ASSERT(NONE == 0); + const int kKeyToTargetOffset = (TransitionArray::kEntryTargetIndex - + TransitionArray::kEntryKeyIndex) * + kTaggedSize; + TNode transition_map = m.CAST(m.GetHeapObjectAssumeWeak( + m.LoadArrayElement(transitions, WeakFixedArray::kHeaderSize, + var_name_index.value(), kKeyToTargetOffset))); + m.Return(transition_map); + } + + m.BIND(&if_not_found); + m.Return(m.NullConstant()); + } + + { + compiler::FunctionTester ft(asm_tester.GenerateCode(), kNumParams); + // Copy function value to a handle created in the outer handle scope. + *(result_function.location()) = ft.function->ptr(); + } + + return result_function; +} + +} // namespace + +TEST(DescriptorArrayHashCollisionMassive) { + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + HandleScope handle_scope(isolate); + + static_assert(Name::kNofHashBitFields == 2, "This test needs updating"); + + std::vector> names; + + // Use the same hash value for all names. + uint32_t hash = + static_cast(isolate->GenerateIdentityHash(Name::kHashBitMask)); + + for (int i = 0; i < kMaxNumberOfDescriptors / 2; ++i) { + // Add pairs of names having the same base hash value but having different + // values of is_integer bit. + bool first_is_integer = (i & 1) != 0; + bool second_is_integer = (i & 2) != 0; + + names.push_back(NewNameWithHash(isolate, "a", hash, first_is_integer)); + names.push_back(NewNameWithHash(isolate, "b", hash, second_is_integer)); + } + + // Create descriptor array with the created names by appending fields to some + // map. DescriptorArray marking relies on the fact that it's attached to an + // owning map. + Handle map = Map::Create(isolate, 0); + + Handle any_type = FieldType::Any(isolate); + + for (size_t i = 0; i < names.size(); ++i) { + map = Map::CopyWithField(isolate, map, names[i], any_type, NONE, + PropertyConstness::kMutable, + Representation::Tagged(), OMIT_TRANSITION) + .ToHandleChecked(); + } + + Handle csa_lookup = CreateCsaDescriptorArrayLookup(isolate); + + CheckDescriptorArrayLookups(isolate, map, names, csa_lookup); + + // Sort descriptor array and check it again. + map->instance_descriptors().Sort(); + CheckDescriptorArrayLookups(isolate, map, names, csa_lookup); +} + +TEST(DescriptorArrayHashCollision) { + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + HandleScope handle_scope(isolate); + + static_assert(Name::kNofHashBitFields == 2, "This test needs updating"); + + std::vector> names; + uint32_t hash = 0; + + for (int i = 0; i < kMaxNumberOfDescriptors / 2; ++i) { + if (i % 2 == 0) { + // Change hash value for every pair of names. + hash = static_cast( + isolate->GenerateIdentityHash(Name::kHashBitMask)); + } + + // Add pairs of names having the same base hash value but having different + // values of is_integer bit. + bool first_is_integer = (i & 1) != 0; + bool second_is_integer = (i & 2) != 0; + + names.push_back(NewNameWithHash(isolate, "a", hash, first_is_integer)); + names.push_back(NewNameWithHash(isolate, "b", hash, second_is_integer)); + } + + // Create descriptor array with the created names by appending fields to some + // map. DescriptorArray marking relies on the fact that it's attached to an + // owning map. + Handle map = Map::Create(isolate, 0); + + Handle any_type = FieldType::Any(isolate); + + for (size_t i = 0; i < names.size(); ++i) { + map = Map::CopyWithField(isolate, map, names[i], any_type, NONE, + PropertyConstness::kMutable, + Representation::Tagged(), OMIT_TRANSITION) + .ToHandleChecked(); + } + + Handle csa_lookup = CreateCsaDescriptorArrayLookup(isolate); + + CheckDescriptorArrayLookups(isolate, map, names, csa_lookup); + + // Sort descriptor array and check it again. + map->instance_descriptors().Sort(); + CheckDescriptorArrayLookups(isolate, map, names, csa_lookup); +} + +TEST(TransitionArrayHashCollisionMassive) { + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + HandleScope handle_scope(isolate); + + static_assert(Name::kNofHashBitFields == 2, "This test needs updating"); + + std::vector> names; + + // Use the same hash value for all names. + uint32_t hash = + static_cast(isolate->GenerateIdentityHash(Name::kHashBitMask)); + + for (int i = 0; i < TransitionsAccessor::kMaxNumberOfTransitions / 2; ++i) { + // Add pairs of names having the same base hash value but having different + // values of is_integer bit. + bool first_is_integer = (i & 1) != 0; + bool second_is_integer = (i & 2) != 0; + + names.push_back(NewNameWithHash(isolate, "a", hash, first_is_integer)); + names.push_back(NewNameWithHash(isolate, "b", hash, second_is_integer)); + } + + // Create transitions for each name. + Handle root_map = Map::Create(isolate, 0); + + std::vector> maps; + + Handle any_type = FieldType::Any(isolate); + + for (size_t i = 0; i < names.size(); ++i) { + Handle map = + Map::CopyWithField(isolate, root_map, names[i], any_type, NONE, + PropertyConstness::kMutable, + Representation::Tagged(), INSERT_TRANSITION) + .ToHandleChecked(); + maps.push_back(map); + } + + Handle csa_lookup = CreateCsaTransitionArrayLookup(isolate); + + Handle transition_array( + TestTransitionsAccessor(isolate, root_map).transitions(), isolate); + + CheckTransitionArrayLookups(isolate, transition_array, maps, csa_lookup); + + // Sort transition array and check it again. + transition_array->Sort(); + CheckTransitionArrayLookups(isolate, transition_array, maps, csa_lookup); +} + +TEST(TransitionArrayHashCollision) { + CcTest::InitializeVM(); + Isolate* isolate = CcTest::i_isolate(); + HandleScope handle_scope(isolate); + + static_assert(Name::kNofHashBitFields == 2, "This test needs updating"); + + std::vector> names; + + // Use the same hash value for all names. + uint32_t hash = + static_cast(isolate->GenerateIdentityHash(Name::kHashBitMask)); + + for (int i = 0; i < TransitionsAccessor::kMaxNumberOfTransitions / 2; ++i) { + if (i % 2 == 0) { + // Change hash value for every pair of names. + hash = static_cast( + isolate->GenerateIdentityHash(Name::kHashBitMask)); + } + // Add pairs of names having the same base hash value but having different + // values of is_integer bit. + bool first_is_integer = (i & 1) != 0; + bool second_is_integer = (i & 2) != 0; + + names.push_back(NewNameWithHash(isolate, "a", hash, first_is_integer)); + names.push_back(NewNameWithHash(isolate, "b", hash, second_is_integer)); + } + + // Create transitions for each name. + Handle root_map = Map::Create(isolate, 0); + + std::vector> maps; + + Handle any_type = FieldType::Any(isolate); + + for (size_t i = 0; i < names.size(); ++i) { + Handle map = + Map::CopyWithField(isolate, root_map, names[i], any_type, NONE, + PropertyConstness::kMutable, + Representation::Tagged(), INSERT_TRANSITION) + .ToHandleChecked(); + maps.push_back(map); + } + + Handle csa_lookup = CreateCsaTransitionArrayLookup(isolate); + + Handle transition_array( + TestTransitionsAccessor(isolate, root_map).transitions(), isolate); + + CheckTransitionArrayLookups(isolate, transition_array, maps, csa_lookup); + + // Sort transition array and check it again. + transition_array->Sort(); + CheckTransitionArrayLookups(isolate, transition_array, maps, csa_lookup); +} + +} // namespace internal +} // namespace v8 diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc index 496fa666844783..39ecc460619d17 100644 --- a/deps/v8/test/cctest/test-disasm-ia32.cc +++ b/deps/v8/test/cctest/test-disasm-ia32.cc @@ -473,6 +473,7 @@ TEST(DisasmIa320) { __ movapd(xmm0, xmm1); __ movapd(xmm0, Operand(edx, 4)); + __ movupd(xmm0, Operand(edx, 4)); __ movd(xmm0, edi); __ movd(xmm0, Operand(ebx, ecx, times_4, 10000)); @@ -689,6 +690,7 @@ TEST(DisasmIa320) { __ vmovaps(xmm0, xmm1); __ vmovapd(xmm0, xmm1); __ vmovapd(xmm0, Operand(ebx, ecx, times_4, 10000)); + __ vmovupd(xmm0, Operand(ebx, ecx, times_4, 10000)); __ vshufps(xmm0, xmm1, xmm2, 3); __ vshufps(xmm0, xmm1, Operand(edx, 4), 3); __ vhaddps(xmm0, xmm1, xmm2); diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc index 8e9eadca25e94b..290a57653a68a7 100644 --- a/deps/v8/test/cctest/test-disasm-x64.cc +++ b/deps/v8/test/cctest/test-disasm-x64.cc @@ -813,6 +813,7 @@ TEST(DisasmX64) { __ vpblendw(xmm1, xmm2, xmm3, 23); __ vpblendw(xmm1, xmm2, Operand(rbx, rcx, times_4, 10000), 23); __ vpalignr(xmm1, xmm2, xmm3, 4); + __ vpalignr(xmm1, xmm2, Operand(rbx, rcx, times_4, 10000), 4); __ vblendvpd(xmm1, xmm2, xmm3, xmm4); diff --git a/deps/v8/test/cctest/test-elements-kind.cc b/deps/v8/test/cctest/test-elements-kind.cc index 2f6ec6c164bd88..a9460a89f97f37 100644 --- a/deps/v8/test/cctest/test-elements-kind.cc +++ b/deps/v8/test/cctest/test-elements-kind.cc @@ -27,19 +27,6 @@ namespace test_elements_kind { namespace { -Handle MakeString(const char* str) { - Isolate* isolate = CcTest::i_isolate(); - Factory* factory = isolate->factory(); - return factory->InternalizeUtf8String(str); -} - - -Handle MakeName(const char* str, int suffix) { - EmbeddedVector buffer; - SNPrintF(buffer, "%s%d", str, suffix); - return MakeString(buffer.begin()); -} - template bool EQUALS(Isolate* isolate, Handle left, Handle right) { if (*left == *right) return true; @@ -127,7 +114,7 @@ TEST(JSObjectAddingProperties) { // for the default constructor function no in-object properties are reserved // hence adding a single property will initialize the property-array - Handle name = MakeName("property", 0); + Handle name = CcTest::MakeName("property", 0); JSObject::DefinePropertyOrElementIgnoreAttributes(object, name, value, NONE) .Check(); CHECK_NE(object->map(), *previous_map); @@ -162,7 +149,7 @@ TEST(JSObjectInObjectAddingProperties) { // we have reserved space for in-object properties, hence adding up to // |nof_inobject_properties| will not create a property store for (int i = 0; i < nof_inobject_properties; i++) { - Handle name = MakeName("property", i); + Handle name = CcTest::MakeName("property", i); JSObject::DefinePropertyOrElementIgnoreAttributes(object, name, value, NONE) .Check(); } @@ -174,7 +161,7 @@ TEST(JSObjectInObjectAddingProperties) { // adding one more property will not fit in the in-object properties, thus // creating a property store int index = nof_inobject_properties + 1; - Handle name = MakeName("property", index); + Handle name = CcTest::MakeName("property", index); JSObject::DefinePropertyOrElementIgnoreAttributes(object, name, value, NONE) .Check(); CHECK_NE(object->map(), *previous_map); @@ -205,7 +192,7 @@ TEST(JSObjectAddingElements) { CHECK(EQUALS(isolate, object->elements(), empty_fixed_array)); // Adding an indexed element initializes the elements array - name = MakeString("0"); + name = CcTest::MakeString("0"); JSObject::DefinePropertyOrElementIgnoreAttributes(object, name, value, NONE) .Check(); // no change in elements_kind => no map transition @@ -217,7 +204,7 @@ TEST(JSObjectAddingElements) { // Adding more consecutive elements without a change in the backing store int non_dict_backing_store_limit = 100; for (int i = 1; i < non_dict_backing_store_limit; i++) { - name = MakeName("", i); + name = CcTest::MakeName("", i); JSObject::DefinePropertyOrElementIgnoreAttributes(object, name, value, NONE) .Check(); } @@ -229,7 +216,7 @@ TEST(JSObjectAddingElements) { // Adding an element at an very large index causes a change to // DICTIONARY_ELEMENTS - name = MakeString("100000000"); + name = CcTest::MakeString("100000000"); JSObject::DefinePropertyOrElementIgnoreAttributes(object, name, value, NONE) .Check(); // change in elements_kind => map transition @@ -260,7 +247,7 @@ TEST(JSArrayAddingProperties) { // for the default constructor function no in-object properties are reserved // hence adding a single property will initialize the property-array - Handle name = MakeName("property", 0); + Handle name = CcTest::MakeName("property", 0); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value, NONE) .Check(); // No change in elements_kind but added property => new map @@ -292,7 +279,7 @@ TEST(JSArrayAddingElements) { CHECK_EQ(0, Smi::ToInt(array->length())); // Adding an indexed element initializes the elements array - name = MakeString("0"); + name = CcTest::MakeString("0"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value, NONE) .Check(); // no change in elements_kind => no map transition @@ -305,7 +292,7 @@ TEST(JSArrayAddingElements) { // Adding more consecutive elements without a change in the backing store int non_dict_backing_store_limit = 100; for (int i = 1; i < non_dict_backing_store_limit; i++) { - name = MakeName("", i); + name = CcTest::MakeName("", i); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value, NONE) .Check(); } @@ -319,7 +306,7 @@ TEST(JSArrayAddingElements) { // Adding an element at an very large index causes a change to // DICTIONARY_ELEMENTS int index = 100000000; - name = MakeName("", index); + name = CcTest::MakeName("", index); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value, NONE) .Check(); // change in elements_kind => map transition @@ -340,7 +327,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastSmiElements) { Handle name; Handle value_smi(Smi::FromInt(42), isolate); - Handle value_string(MakeString("value")); + Handle value_string(CcTest::MakeString("value")); Handle value_double = factory->NewNumber(3.1415); Handle array = @@ -350,7 +337,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastSmiElements) { CHECK_EQ(0, Smi::ToInt(array->length())); // `array[0] = smi_value` doesn't change the elements_kind - name = MakeString("0"); + name = CcTest::MakeString("0"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_smi, NONE) .Check(); @@ -360,7 +347,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastSmiElements) { CHECK_EQ(1, Smi::ToInt(array->length())); // `delete array[0]` does not alter length, but changes the elments_kind - name = MakeString("0"); + name = CcTest::MakeString("0"); CHECK(JSReceiver::DeletePropertyOrElement(array, name).FromMaybe(false)); CHECK_NE(array->map(), *previous_map); CHECK_EQ(HOLEY_SMI_ELEMENTS, array->map().elements_kind()); @@ -368,11 +355,11 @@ TEST(JSArrayAddingElementsGeneralizingiFastSmiElements) { previous_map = handle(array->map(), isolate); // add a couple of elements again - name = MakeString("0"); + name = CcTest::MakeString("0"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_smi, NONE) .Check(); - name = MakeString("1"); + name = CcTest::MakeString("1"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_smi, NONE) .Check(); @@ -381,7 +368,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastSmiElements) { CHECK_EQ(2, Smi::ToInt(array->length())); // Adding a string to the array changes from FAST_HOLEY_SMI to FAST_HOLEY - name = MakeString("0"); + name = CcTest::MakeString("0"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_string, NONE) .Check(); @@ -391,14 +378,14 @@ TEST(JSArrayAddingElementsGeneralizingiFastSmiElements) { previous_map = handle(array->map(), isolate); // We don't transition back to FAST_SMI even if we remove the string - name = MakeString("0"); + name = CcTest::MakeString("0"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_smi, NONE) .Check(); CHECK_EQ(array->map(), *previous_map); // Adding a double doesn't change the map either - name = MakeString("0"); + name = CcTest::MakeString("0"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_double, NONE) .Check(); @@ -414,7 +401,7 @@ TEST(JSArrayAddingElementsGeneralizingFastElements) { Handle name; Handle value_smi(Smi::FromInt(42), isolate); - Handle value_string(MakeString("value")); + Handle value_string(CcTest::MakeString("value")); Handle array = factory->NewJSArray(ElementsKind::PACKED_ELEMENTS, 0, 0); @@ -423,7 +410,7 @@ TEST(JSArrayAddingElementsGeneralizingFastElements) { CHECK_EQ(0, Smi::ToInt(array->length())); // `array[0] = smi_value` doesn't change the elements_kind - name = MakeString("0"); + name = CcTest::MakeString("0"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_smi, NONE) .Check(); @@ -433,7 +420,7 @@ TEST(JSArrayAddingElementsGeneralizingFastElements) { CHECK_EQ(1, Smi::ToInt(array->length())); // `delete array[0]` does not alter length, but changes the elments_kind - name = MakeString("0"); + name = CcTest::MakeString("0"); CHECK(JSReceiver::DeletePropertyOrElement(array, name).FromMaybe(false)); CHECK_NE(array->map(), *previous_map); CHECK_EQ(HOLEY_ELEMENTS, array->map().elements_kind()); @@ -441,11 +428,11 @@ TEST(JSArrayAddingElementsGeneralizingFastElements) { previous_map = handle(array->map(), isolate); // add a couple of elements, elements_kind stays HOLEY - name = MakeString("0"); + name = CcTest::MakeString("0"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_string, NONE) .Check(); - name = MakeString("1"); + name = CcTest::MakeString("1"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_smi, NONE) .Check(); @@ -463,7 +450,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) { Handle name; Handle value_smi(Smi::FromInt(42), isolate); - Handle value_string(MakeString("value")); + Handle value_string(CcTest::MakeString("value")); Handle value_double = factory->NewNumber(3.1415); Handle array = @@ -471,7 +458,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) { Handle previous_map(array->map(), isolate); // `array[0] = value_double` changes |elements_kind| to PACKED_DOUBLE_ELEMENTS - name = MakeString("0"); + name = CcTest::MakeString("0"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_double, NONE) .Check(); @@ -481,7 +468,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) { previous_map = handle(array->map(), isolate); // `array[1] = value_smi` doesn't alter the |elements_kind| - name = MakeString("1"); + name = CcTest::MakeString("1"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_smi, NONE) .Check(); @@ -490,7 +477,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) { CHECK_EQ(2, Smi::ToInt(array->length())); // `delete array[0]` does not alter length, but changes the elments_kind - name = MakeString("0"); + name = CcTest::MakeString("0"); CHECK(JSReceiver::DeletePropertyOrElement(array, name).FromMaybe(false)); CHECK_NE(array->map(), *previous_map); CHECK_EQ(HOLEY_DOUBLE_ELEMENTS, array->map().elements_kind()); @@ -498,7 +485,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) { previous_map = handle(array->map(), isolate); // filling the hole `array[0] = value_smi` again doesn't transition back - name = MakeString("0"); + name = CcTest::MakeString("0"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_double, NONE) .Check(); @@ -507,7 +494,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) { CHECK_EQ(2, Smi::ToInt(array->length())); // Adding a string to the array changes to elements_kind PACKED_ELEMENTS - name = MakeString("1"); + name = CcTest::MakeString("1"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_string, NONE) .Check(); @@ -517,7 +504,7 @@ TEST(JSArrayAddingElementsGeneralizingiFastDoubleElements) { previous_map = handle(array->map(), isolate); // Adding a double doesn't change the map - name = MakeString("0"); + name = CcTest::MakeString("0"); JSObject::DefinePropertyOrElementIgnoreAttributes(array, name, value_double, NONE) .Check(); diff --git a/deps/v8/test/cctest/test-field-type-tracking.cc b/deps/v8/test/cctest/test-field-type-tracking.cc index d7b672a345c279..740ae05c1eaf81 100644 --- a/deps/v8/test/cctest/test-field-type-tracking.cc +++ b/deps/v8/test/cctest/test-field-type-tracking.cc @@ -43,20 +43,6 @@ const int kPropCount = 7; // Helper functions. // -static Handle MakeString(const char* str) { - Isolate* isolate = CcTest::i_isolate(); - Factory* factory = isolate->factory(); - return factory->InternalizeUtf8String(str); -} - - -static Handle MakeName(const char* str, int suffix) { - EmbeddedVector buffer; - SNPrintF(buffer, "%s%d", str, suffix); - return MakeString(buffer.begin()); -} - - static Handle CreateAccessorPair(bool with_getter, bool with_setter) { Isolate* isolate = CcTest::i_isolate(); @@ -339,7 +325,7 @@ class Expectations { SetDataField(property_index, attributes, constness, representation, field_type); - Handle name = MakeName("prop", property_index); + Handle name = CcTest::MakeName("prop", property_index); return Map::CopyWithField(isolate_, map, name, field_type, attributes, constness, representation, INSERT_TRANSITION) .ToHandleChecked(); @@ -351,7 +337,7 @@ class Expectations { int property_index = number_of_properties_++; SetDataConstant(property_index, attributes, value); - Handle name = MakeName("prop", property_index); + Handle name = CcTest::MakeName("prop", property_index); return Map::CopyWithConstant(isolate_, map, name, value, attributes, INSERT_TRANSITION) .ToHandleChecked(); @@ -368,7 +354,7 @@ class Expectations { SetDataField(property_index, attributes, constness, representation, heap_type); - Handle name = MakeName("prop", property_index); + Handle name = CcTest::MakeName("prop", property_index); return Map::TransitionToDataProperty(isolate_, map, name, value, attributes, constness, StoreOrigin::kNamed); } @@ -380,7 +366,7 @@ class Expectations { int property_index = number_of_properties_++; SetDataConstant(property_index, attributes, value); - Handle name = MakeName("prop", property_index); + Handle name = CcTest::MakeName("prop", property_index); return Map::TransitionToDataProperty(isolate_, map, name, value, attributes, PropertyConstness::kConst, StoreOrigin::kNamed); @@ -396,7 +382,7 @@ class Expectations { SetDataField(property_index, attributes, constness, representation, heap_type); - Handle name = MakeName("prop", property_index); + Handle name = CcTest::MakeName("prop", property_index); Map target = TransitionsAccessor(isolate_, map) .SearchTransition(*name, kData, attributes); CHECK(!target.is_null()); @@ -410,7 +396,7 @@ class Expectations { int property_index = number_of_properties_++; SetAccessorConstant(property_index, attributes, pair); - Handle name = MakeName("prop", property_index); + Handle name = CcTest::MakeName("prop", property_index); Descriptor d = Descriptor::AccessorConstant(name, pair, attributes); return Map::CopyInsertDescriptor(isolate_, map, &d, INSERT_TRANSITION); @@ -424,7 +410,7 @@ class Expectations { int property_index = number_of_properties_++; SetAccessorConstant(property_index, attributes, getter, setter); - Handle name = MakeName("prop", property_index); + Handle name = CcTest::MakeName("prop", property_index); CHECK(!getter->IsNull(isolate_) || !setter->IsNull(isolate_)); Factory* factory = isolate_->factory(); @@ -451,7 +437,7 @@ class Expectations { int property_index = number_of_properties_++; SetAccessorConstant(property_index, attributes, pair); - Handle name = MakeName("prop", property_index); + Handle name = CcTest::MakeName("prop", property_index); Isolate* isolate = CcTest::i_isolate(); Handle getter(pair->getter(), isolate); @@ -1052,7 +1038,8 @@ namespace { // where "p2A" and "p2B" differ only in the attributes. // void TestReconfigureDataFieldAttribute_GeneralizeField( - const CRFTData& from, const CRFTData& to, const CRFTData& expected) { + const CRFTData& from, const CRFTData& to, const CRFTData& expected, + bool expected_deprecation) { Isolate* isolate = CcTest::i_isolate(); Expectations expectations(isolate); @@ -1121,24 +1108,29 @@ void TestReconfigureDataFieldAttribute_GeneralizeField( CHECK_NE(*map2, *new_map); CHECK(expectations2.Check(*map2)); - // |map| should be deprecated and |new_map| should match new expectations. for (int i = kSplitProp; i < kPropCount; i++) { expectations.SetDataField(i, expected.constness, expected.representation, expected.type); } - CHECK(map->is_deprecated()); - CHECK(!code_field_type->marked_for_deoptimization()); - CHECK(!code_field_repr->marked_for_deoptimization()); - CHECK(!code_field_const->marked_for_deoptimization()); - CHECK_NE(*map, *new_map); + if (expected_deprecation) { + // |map| should be deprecated and |new_map| should match new expectations. + CHECK(map->is_deprecated()); + CHECK(!code_field_type->marked_for_deoptimization()); + CHECK(!code_field_repr->marked_for_deoptimization()); + CHECK(!code_field_const->marked_for_deoptimization()); + CHECK_NE(*map, *new_map); - CHECK(!new_map->is_deprecated()); - CHECK(expectations.Check(*new_map)); + CHECK(!new_map->is_deprecated()); + CHECK(expectations.Check(*new_map)); - // Update deprecated |map|, it should become |new_map|. - Handle updated_map = Map::Update(isolate, map); - CHECK_EQ(*new_map, *updated_map); - CheckMigrationTarget(isolate, *map, *updated_map); + // Update deprecated |map|, it should become |new_map|. + Handle updated_map = Map::Update(isolate, map); + CHECK_EQ(*new_map, *updated_map); + CheckMigrationTarget(isolate, *map, *updated_map); + } else { + CHECK(!map->is_deprecated()); + CHECK(expectations.Check(*map)); + } } // This test ensures that trivial field generalization (from HeapObject to @@ -1254,22 +1246,22 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeSmiFieldToDouble) { TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kConst, Representation::Smi(), any_type}, {PropertyConstness::kConst, Representation::Double(), any_type}, - {PropertyConstness::kConst, Representation::Double(), any_type}); + {PropertyConstness::kConst, Representation::Double(), any_type}, true); TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kConst, Representation::Smi(), any_type}, {PropertyConstness::kMutable, Representation::Double(), any_type}, - {PropertyConstness::kMutable, Representation::Double(), any_type}); + {PropertyConstness::kMutable, Representation::Double(), any_type}, true); TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kMutable, Representation::Smi(), any_type}, {PropertyConstness::kConst, Representation::Double(), any_type}, - {PropertyConstness::kMutable, Representation::Double(), any_type}); + {PropertyConstness::kMutable, Representation::Double(), any_type}, true); TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kMutable, Representation::Smi(), any_type}, {PropertyConstness::kMutable, Representation::Double(), any_type}, - {PropertyConstness::kMutable, Representation::Double(), any_type}); + {PropertyConstness::kMutable, Representation::Double(), any_type}, true); } TEST(ReconfigureDataFieldAttribute_GeneralizeSmiFieldToTagged) { @@ -1284,22 +1276,26 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeSmiFieldToTagged) { TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kConst, Representation::Smi(), any_type}, {PropertyConstness::kConst, Representation::HeapObject(), value_type}, - {PropertyConstness::kConst, Representation::Tagged(), any_type}); + {PropertyConstness::kConst, Representation::Tagged(), any_type}, + !FLAG_modify_field_representation_inplace); TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kConst, Representation::Smi(), any_type}, {PropertyConstness::kMutable, Representation::HeapObject(), value_type}, - {PropertyConstness::kMutable, Representation::Tagged(), any_type}); + {PropertyConstness::kMutable, Representation::Tagged(), any_type}, + !FLAG_modify_field_representation_inplace); TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kMutable, Representation::Smi(), any_type}, {PropertyConstness::kConst, Representation::HeapObject(), value_type}, - {PropertyConstness::kMutable, Representation::Tagged(), any_type}); + {PropertyConstness::kMutable, Representation::Tagged(), any_type}, + !FLAG_modify_field_representation_inplace); TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kMutable, Representation::Smi(), any_type}, {PropertyConstness::kMutable, Representation::HeapObject(), value_type}, - {PropertyConstness::kMutable, Representation::Tagged(), any_type}); + {PropertyConstness::kMutable, Representation::Tagged(), any_type}, + !FLAG_modify_field_representation_inplace); } TEST(ReconfigureDataFieldAttribute_GeneralizeDoubleFieldToTagged) { @@ -1314,22 +1310,26 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeDoubleFieldToTagged) { TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kConst, Representation::Double(), any_type}, {PropertyConstness::kConst, Representation::HeapObject(), value_type}, - {PropertyConstness::kConst, Representation::Tagged(), any_type}); + {PropertyConstness::kConst, Representation::Tagged(), any_type}, + FLAG_unbox_double_fields || !FLAG_modify_field_representation_inplace); TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kConst, Representation::Double(), any_type}, {PropertyConstness::kMutable, Representation::HeapObject(), value_type}, - {PropertyConstness::kMutable, Representation::Tagged(), any_type}); + {PropertyConstness::kMutable, Representation::Tagged(), any_type}, + FLAG_unbox_double_fields || !FLAG_modify_field_representation_inplace); TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kMutable, Representation::Double(), any_type}, {PropertyConstness::kConst, Representation::HeapObject(), value_type}, - {PropertyConstness::kMutable, Representation::Tagged(), any_type}); + {PropertyConstness::kMutable, Representation::Tagged(), any_type}, + FLAG_unbox_double_fields || !FLAG_modify_field_representation_inplace); TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kMutable, Representation::Double(), any_type}, {PropertyConstness::kMutable, Representation::HeapObject(), value_type}, - {PropertyConstness::kMutable, Representation::Tagged(), any_type}); + {PropertyConstness::kMutable, Representation::Tagged(), any_type}, + FLAG_unbox_double_fields || !FLAG_modify_field_representation_inplace); } TEST(ReconfigureDataFieldAttribute_GeneralizeHeapObjFieldToHeapObj) { @@ -1415,7 +1415,8 @@ TEST(ReconfigureDataFieldAttribute_GeneralizeHeapObjectFieldToTagged) { TestReconfigureDataFieldAttribute_GeneralizeField( {PropertyConstness::kMutable, Representation::HeapObject(), value_type}, {PropertyConstness::kMutable, Representation::Smi(), any_type}, - {PropertyConstness::kMutable, Representation::Tagged(), any_type}); + {PropertyConstness::kMutable, Representation::Tagged(), any_type}, + !FLAG_modify_field_representation_inplace); } // Checks that given |map| is deprecated and that it updates to given |new_map| @@ -2121,7 +2122,7 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) { split_map = map2; } - Handle name = MakeName("prop", i); + Handle name = CcTest::MakeName("prop", i); Map target = TransitionsAccessor(isolate, map2) .SearchTransition(*name, kData, NONE); CHECK(!target.is_null()); @@ -2148,7 +2149,7 @@ TEST(ReconfigurePropertySplitMapTransitionsOverflow) { // Fill in transition tree of |map2| so that it can't have more transitions. for (int i = 0; i < TransitionsAccessor::kMaxNumberOfTransitions; i++) { CHECK(TransitionsAccessor(isolate, map2).CanHaveMoreTransitions()); - Handle name = MakeName("foo", i); + Handle name = CcTest::MakeName("foo", i); Map::CopyWithField(isolate, map2, name, any_type, NONE, PropertyConstness::kMutable, Representation::Smi(), INSERT_TRANSITION) @@ -2367,9 +2368,9 @@ TEST(ElementsKindTransitionFromMapNotOwningDescriptor) { // Add one more transition to |map| in order to prevent descriptors // ownership. CHECK(map->owns_descriptors()); - Map::CopyWithField(isolate, map, MakeString("foo"), any_type, NONE, - PropertyConstness::kMutable, Representation::Smi(), - INSERT_TRANSITION) + Map::CopyWithField(isolate, map, CcTest::MakeString("foo"), any_type, + NONE, PropertyConstness::kMutable, + Representation::Smi(), INSERT_TRANSITION) .ToHandleChecked(); CHECK(!map->owns_descriptors()); @@ -2480,9 +2481,9 @@ TEST(PrototypeTransitionFromMapNotOwningDescriptor) { // Add one more transition to |map| in order to prevent descriptors // ownership. CHECK(map->owns_descriptors()); - Map::CopyWithField(isolate, map, MakeString("foo"), any_type, NONE, - PropertyConstness::kMutable, Representation::Smi(), - INSERT_TRANSITION) + Map::CopyWithField(isolate, map, CcTest::MakeString("foo"), any_type, + NONE, PropertyConstness::kMutable, + Representation::Smi(), INSERT_TRANSITION) .ToHandleChecked(); CHECK(!map->owns_descriptors()); diff --git a/deps/v8/test/cctest/test-local-handles.cc b/deps/v8/test/cctest/test-local-handles.cc index 0e4fc5c7d18237..f4fdaebd0d9c27 100644 --- a/deps/v8/test/cctest/test-local-handles.cc +++ b/deps/v8/test/cctest/test-local-handles.cc @@ -20,6 +20,8 @@ namespace v8 { namespace internal { +namespace { + class LocalHandlesThread final : public v8::base::Thread { public: LocalHandlesThread(Heap* heap, Address object, base::Semaphore* sema_started, @@ -92,5 +94,7 @@ TEST(CreateLocalHandles) { thread->Join(); } +} // anonymous namespace + } // namespace internal } // namespace v8 diff --git a/deps/v8/test/cctest/test-persistent-handles.cc b/deps/v8/test/cctest/test-persistent-handles.cc index 0bb2990d178cf3..b40b3eb0443df3 100644 --- a/deps/v8/test/cctest/test-persistent-handles.cc +++ b/deps/v8/test/cctest/test-persistent-handles.cc @@ -23,6 +23,8 @@ namespace internal { static constexpr int kNumHandles = kHandleBlockSize * 2 + kHandleBlockSize / 2; +namespace { + class PersistentHandlesThread final : public v8::base::Thread { public: PersistentHandlesThread(Heap* heap, std::vector> handles, @@ -110,5 +112,7 @@ TEST(CreatePersistentHandles) { ph->NewHandle(number->ptr()); } +} // anonymous namespace + } // namespace internal } // namespace v8 diff --git a/deps/v8/test/cctest/test-transitions.h b/deps/v8/test/cctest/test-transitions.h index 724eb3d3c544b5..66bbbfa76dd7c2 100644 --- a/deps/v8/test/cctest/test-transitions.h +++ b/deps/v8/test/cctest/test-transitions.h @@ -24,6 +24,8 @@ class TestTransitionsAccessor : public TransitionsAccessor { bool IsFullTransitionArrayEncoding() { return encoding() == kFullTransitionArray; } + + TransitionArray transitions() { return TransitionsAccessor::transitions(); } }; } // namespace internal diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc index ebeb05597e4da5..525a73a788caec 100644 --- a/deps/v8/test/cctest/test-unboxed-doubles.cc +++ b/deps/v8/test/cctest/test-unboxed-doubles.cc @@ -45,20 +45,6 @@ static void InitializeVerifiedMapDescriptors( CHECK(layout_descriptor.IsConsistentWithMap(map, true)); } -static Handle MakeString(const char* str) { - Isolate* isolate = CcTest::i_isolate(); - Factory* factory = isolate->factory(); - return factory->InternalizeUtf8String(str); -} - - -static Handle MakeName(const char* str, int suffix) { - EmbeddedVector buffer; - SNPrintF(buffer, "%s%d", str, suffix); - return MakeString(buffer.begin()); -} - - Handle GetObject(const char* name) { return Handle::cast( v8::Utils::OpenHandle(*v8::Local::Cast( @@ -995,13 +981,14 @@ TEST(DescriptorArrayTrimming) { Handle any_type = FieldType::Any(isolate); Handle map = Map::Create(isolate, kFieldCount); for (int i = 0; i < kSplitFieldIndex; i++) { - map = Map::CopyWithField(isolate, map, MakeName("prop", i), any_type, NONE, - PropertyConstness::kMutable, Representation::Smi(), - INSERT_TRANSITION) + map = Map::CopyWithField(isolate, map, CcTest::MakeName("prop", i), + any_type, NONE, PropertyConstness::kMutable, + Representation::Smi(), INSERT_TRANSITION) .ToHandleChecked(); } - map = Map::CopyWithField(isolate, map, MakeName("dbl", kSplitFieldIndex), - any_type, NONE, PropertyConstness::kMutable, + map = Map::CopyWithField(isolate, map, + CcTest::MakeName("dbl", kSplitFieldIndex), any_type, + NONE, PropertyConstness::kMutable, Representation::Double(), INSERT_TRANSITION) .ToHandleChecked(); CHECK(map->layout_descriptor().IsConsistentWithMap(*map, true)); @@ -1015,7 +1002,7 @@ TEST(DescriptorArrayTrimming) { Handle tmp_map = map; for (int i = kSplitFieldIndex + 1; i < kFieldCount; i++) { - tmp_map = Map::CopyWithField(isolate, tmp_map, MakeName("dbl", i), + tmp_map = Map::CopyWithField(isolate, tmp_map, CcTest::MakeName("dbl", i), any_type, NONE, PropertyConstness::kMutable, Representation::Double(), INSERT_TRANSITION) .ToHandleChecked(); @@ -1055,14 +1042,15 @@ TEST(DescriptorArrayTrimming) { Handle tmp_map = map; for (int i = kSplitFieldIndex + 1; i < kFieldCount - 1; i++) { - tmp_map = Map::CopyWithField(isolate, tmp_map, MakeName("tagged", i), - any_type, NONE, PropertyConstness::kMutable, - Representation::Tagged(), INSERT_TRANSITION) - .ToHandleChecked(); + tmp_map = + Map::CopyWithField(isolate, tmp_map, CcTest::MakeName("tagged", i), + any_type, NONE, PropertyConstness::kMutable, + Representation::Tagged(), INSERT_TRANSITION) + .ToHandleChecked(); CHECK(tmp_map->layout_descriptor().IsConsistentWithMap(*tmp_map, true)); } - tmp_map = Map::CopyWithField(isolate, tmp_map, MakeString("dbl"), any_type, - NONE, PropertyConstness::kMutable, + tmp_map = Map::CopyWithField(isolate, tmp_map, CcTest::MakeString("dbl"), + any_type, NONE, PropertyConstness::kMutable, Representation::Double(), INSERT_TRANSITION) .ToHandleChecked(); CHECK(tmp_map->layout_descriptor().IsConsistentWithMap(*tmp_map, true)); @@ -1087,8 +1075,8 @@ TEST(DoScavenge) { Handle any_type = FieldType::Any(isolate); Handle map = Map::Create(isolate, 10); - map = Map::CopyWithField(isolate, map, MakeName("prop", 0), any_type, NONE, - PropertyConstness::kMutable, + map = Map::CopyWithField(isolate, map, CcTest::MakeName("prop", 0), any_type, + NONE, PropertyConstness::kMutable, Representation::Double(), INSERT_TRANSITION) .ToHandleChecked(); @@ -1153,12 +1141,12 @@ TEST(DoScavengeWithIncrementalWriteBarrier) { Handle any_type = FieldType::Any(isolate); Handle map = Map::Create(isolate, 10); - map = Map::CopyWithField(isolate, map, MakeName("prop", 0), any_type, NONE, - PropertyConstness::kMutable, + map = Map::CopyWithField(isolate, map, CcTest::MakeName("prop", 0), any_type, + NONE, PropertyConstness::kMutable, Representation::Double(), INSERT_TRANSITION) .ToHandleChecked(); - map = Map::CopyWithField(isolate, map, MakeName("prop", 1), any_type, NONE, - PropertyConstness::kMutable, + map = Map::CopyWithField(isolate, map, CcTest::MakeName("prop", 1), any_type, + NONE, PropertyConstness::kMutable, Representation::Tagged(), INSERT_TRANSITION) .ToHandleChecked(); @@ -1390,14 +1378,14 @@ TEST(LayoutDescriptorSharing) { { Handle map = Map::Create(isolate, 64); for (int i = 0; i < 32; i++) { - Handle name = MakeName("prop", i); + Handle name = CcTest::MakeName("prop", i); map = Map::CopyWithField(isolate, map, name, any_type, NONE, PropertyConstness::kMutable, Representation::Smi(), INSERT_TRANSITION) .ToHandleChecked(); } - split_map = Map::CopyWithField(isolate, map, MakeString("dbl"), any_type, - NONE, PropertyConstness::kMutable, + split_map = Map::CopyWithField(isolate, map, CcTest::MakeString("dbl"), + any_type, NONE, PropertyConstness::kMutable, Representation::Double(), INSERT_TRANSITION) .ToHandleChecked(); } @@ -1408,9 +1396,9 @@ TEST(LayoutDescriptorSharing) { CHECK(split_map->owns_descriptors()); Handle map1 = - Map::CopyWithField(isolate, split_map, MakeString("foo"), any_type, NONE, - PropertyConstness::kMutable, Representation::Double(), - INSERT_TRANSITION) + Map::CopyWithField(isolate, split_map, CcTest::MakeString("foo"), + any_type, NONE, PropertyConstness::kMutable, + Representation::Double(), INSERT_TRANSITION) .ToHandleChecked(); CHECK(!split_map->owns_descriptors()); CHECK_EQ(*split_layout_descriptor, split_map->layout_descriptor()); @@ -1421,9 +1409,9 @@ TEST(LayoutDescriptorSharing) { CHECK(map1->layout_descriptor().IsConsistentWithMap(*map1, true)); Handle map2 = - Map::CopyWithField(isolate, split_map, MakeString("bar"), any_type, NONE, - PropertyConstness::kMutable, Representation::Tagged(), - INSERT_TRANSITION) + Map::CopyWithField(isolate, split_map, CcTest::MakeString("bar"), + any_type, NONE, PropertyConstness::kMutable, + Representation::Tagged(), INSERT_TRANSITION) .ToHandleChecked(); // Layout descriptors should not be shared with |split_map|. @@ -1595,15 +1583,15 @@ static void TestWriteBarrierObjectShiftFieldsRight( Handle func = GetObject("func"); Handle map = Map::Create(isolate, 10); - map = Map::CopyWithConstant(isolate, map, MakeName("prop", 0), func, NONE, - INSERT_TRANSITION) + map = Map::CopyWithConstant(isolate, map, CcTest::MakeName("prop", 0), func, + NONE, INSERT_TRANSITION) .ToHandleChecked(); - map = Map::CopyWithField(isolate, map, MakeName("prop", 1), any_type, NONE, - PropertyConstness::kMutable, + map = Map::CopyWithField(isolate, map, CcTest::MakeName("prop", 1), any_type, + NONE, PropertyConstness::kMutable, Representation::Double(), INSERT_TRANSITION) .ToHandleChecked(); - map = Map::CopyWithField(isolate, map, MakeName("prop", 2), any_type, NONE, - PropertyConstness::kMutable, + map = Map::CopyWithField(isolate, map, CcTest::MakeName("prop", 2), any_type, + NONE, PropertyConstness::kMutable, Representation::Tagged(), INSERT_TRANSITION) .ToHandleChecked(); diff --git a/deps/v8/test/mjsunit/compiler/regress-1126249.js b/deps/v8/test/mjsunit/compiler/regress-1126249.js new file mode 100644 index 00000000000000..87f4885305da3c --- /dev/null +++ b/deps/v8/test/mjsunit/compiler/regress-1126249.js @@ -0,0 +1,22 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + +function foo(b) { + var x = -0; + var y = -0x80000000; + + if (b) { + x = -1; + y = 1; + } + + return (x - y) == -0x80000000; +} + +%PrepareFunctionForOptimization(foo); +assertFalse(foo(true)); +%OptimizeFunctionOnNextCall(foo); +assertFalse(foo(false)); diff --git a/deps/v8/test/mjsunit/compiler/regress-1127319.js b/deps/v8/test/mjsunit/compiler/regress-1127319.js new file mode 100644 index 00000000000000..74d577bdd83e54 --- /dev/null +++ b/deps/v8/test/mjsunit/compiler/regress-1127319.js @@ -0,0 +1,40 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --interrupt-budget=1024 + +function v1() { + const v4 = Reflect; + const v8 = [11.11]; + const v10 = {__proto__:1111, a:-1, c:RegExp, f:v8, d:1111, e:-1}; + const v12 = [11.11]; + function v13() {} + const v16 = {a:v13, b:v13, c:v13, d:v13, e:v13, f:v13, g:v13, h:v13, i:v13, j:v13}; +} + +function foo() { + let v22 = Number; + v22 = v1; + const v23 = false; + if (v23) { + v22 = Number; + } else { + function v24() { + const v28 = ".Cactus"[0]; + for (let v32 = 0; v32 < 7; v32++) {} + } + new Promise(v24); + try { + for (const v37 of v36) { + const v58 = [cactus,cactus,[] = cactus] = v117; + } + } catch(v119) { + } + } + v22(); +} + +for (let i = 0; i < 10; i++) { + foo(); +} diff --git a/deps/v8/test/mjsunit/compiler/regress-1150649.js b/deps/v8/test/mjsunit/compiler/regress-1150649.js new file mode 100644 index 00000000000000..a193481a3a20dc --- /dev/null +++ b/deps/v8/test/mjsunit/compiler/regress-1150649.js @@ -0,0 +1,24 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + +function foo(a) { + var y = 0x7fffffff; // 2^31 - 1 + + // Widen the static type of y (this condition never holds). + if (a == NaN) y = NaN; + + // The next condition holds only in the warmup run. It leads to Smi + // (SignedSmall) feedback being collected for the addition below. + if (a) y = -1; + + const z = (y + 1)|0; + return z < 0; +} + +%PrepareFunctionForOptimization(foo); +assertFalse(foo(true)); +%OptimizeFunctionOnNextCall(foo); +assertTrue(foo(false)); diff --git a/deps/v8/test/mjsunit/compiler/regress-1182647.js b/deps/v8/test/mjsunit/compiler/regress-1182647.js new file mode 100644 index 00000000000000..e0582f7cbfb4f1 --- /dev/null +++ b/deps/v8/test/mjsunit/compiler/regress-1182647.js @@ -0,0 +1,25 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax --verify-heap + +function foo() { + const arr = Array(1000); + + function bar() { + try { ({a: p4nda, b: arr.length}); } catch(e) {} + } + + for (var i = 0; i < 25; i++) bar(); + + /p4nda/.test({}); // Deopt here. + + arr.shift(); +} + +%PrepareFunctionForOptimization(foo); +foo(); +foo(); +%OptimizeFunctionOnNextCall(foo); +foo(); diff --git a/deps/v8/test/mjsunit/compiler/regress-1195777.js b/deps/v8/test/mjsunit/compiler/regress-1195777.js new file mode 100644 index 00000000000000..b122f4f0169af5 --- /dev/null +++ b/deps/v8/test/mjsunit/compiler/regress-1195777.js @@ -0,0 +1,62 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + + +(function() { + function foo(b) { + let y = (new Date(42)).getMilliseconds(); + let x = -1; + if (b) x = 0xFFFF_FFFF; + return y < Math.max(1 << y, x, 1 + y); + } + assertTrue(foo(true)); + %PrepareFunctionForOptimization(foo); + assertTrue(foo(false)); + %OptimizeFunctionOnNextCall(foo); + assertTrue(foo(true)); +})(); + + +(function() { + function foo(b) { + let x = 0; + if (b) x = -1; + return x == Math.max(-1, x >>> Infinity); + } + assertFalse(foo(true)); + %PrepareFunctionForOptimization(foo); + assertTrue(foo(false)); + %OptimizeFunctionOnNextCall(foo); + assertFalse(foo(true)); +})(); + + +(function() { + function foo(b) { + let x = -1; + if (b) x = 0xFFFF_FFFF; + return -1 < Math.max(0, x, -1); + } + assertTrue(foo(true)); + %PrepareFunctionForOptimization(foo); + assertTrue(foo(false)); + %OptimizeFunctionOnNextCall(foo); + assertTrue(foo(true)); +})(); + + +(function() { + function foo(b) { + let x = 0x7FFF_FFFF; + if (b) x = 0; + return 0 < (Math.max(-5 >>> x, -5) % -5); + } + assertTrue(foo(true)); + %PrepareFunctionForOptimization(foo); + assertTrue(foo(false)); + %OptimizeFunctionOnNextCall(foo); + assertTrue(foo(true)); +})(); diff --git a/deps/v8/test/mjsunit/compiler/regress-1196683.js b/deps/v8/test/mjsunit/compiler/regress-1196683.js new file mode 100644 index 00000000000000..abd7d6b2f8da45 --- /dev/null +++ b/deps/v8/test/mjsunit/compiler/regress-1196683.js @@ -0,0 +1,56 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + + +(function() { + const arr = new Uint32Array([2**31]); + function foo() { + return (arr[0] ^ 0) + 1; + } + %PrepareFunctionForOptimization(foo); + assertEquals(-(2**31) + 1, foo()); + %OptimizeFunctionOnNextCall(foo); + assertEquals(-(2**31) + 1, foo()); +}); + + +// The remaining tests already passed without the bugfix. + + +(function() { + const arr = new Uint16Array([2**15]); + function foo() { + return (arr[0] ^ 0) + 1; + } + %PrepareFunctionForOptimization(foo); + assertEquals(2**15 + 1, foo()); + %OptimizeFunctionOnNextCall(foo); + assertEquals(2**15 + 1, foo()); +})(); + + +(function() { + const arr = new Uint8Array([2**7]); + function foo() { + return (arr[0] ^ 0) + 1; + } + %PrepareFunctionForOptimization(foo); + assertEquals(2**7 + 1, foo()); + %OptimizeFunctionOnNextCall(foo); + assertEquals(2**7 + 1, foo()); +})(); + + +(function() { + const arr = new Int32Array([-(2**31)]); + function foo() { + return (arr[0] >>> 0) + 1; + } + %PrepareFunctionForOptimization(foo); + assertEquals(2**31 + 1, foo()); + %OptimizeFunctionOnNextCall(foo); + assertEquals(2**31 + 1, foo()); +})(); diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status index 42f0b970d3644f..dd34390b81ff1e 100644 --- a/deps/v8/test/mjsunit/mjsunit.status +++ b/deps/v8/test/mjsunit/mjsunit.status @@ -38,6 +38,15 @@ # All tests in the bug directory are expected to fail. 'bugs/*': [FAIL], + ############################################################################## + # LTS failing tests. + # Introduced in https://chromium-review.googlesource.com/c/v8/v8/+/2584242 + 'regress/wasm/regress-1146861': [SKIP], + # Introduced in https://chromium-review.googlesource.com/c/v8/v8/+/2656263 + 'regress/wasm/regress-1153442': [SKIP], + # Introduced in https://chromium-review.googlesource.com/c/v8/v8/+/2649176 + 'regress/wasm/regress-1161654': [SKIP], + ############################################################################## # Open bugs. @@ -73,6 +82,9 @@ # Enable once multi-byte prefixed opcodes are correctly handled 'regress/wasm/regress-1065599': [SKIP], + # https://crbug.com/1166138 + 'regress/regress-1166138': SKIP, + ############################################################################## # Tests where variants make no sense. 'd8/enable-tracing': [PASS, NO_VARIANTS], diff --git a/deps/v8/test/mjsunit/regress/regress-1132111.js b/deps/v8/test/mjsunit/regress/regress-1132111.js new file mode 100644 index 00000000000000..1dd1b58806862a --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-1132111.js @@ -0,0 +1,23 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Public function field with computed name +eval(` + buggy = ((bug = new class { [0] = x => 1337.0; }) => bug); +`); + +// Public method with computed name +eval(` + buggy = ((bug = new class { [0](x) { return 1337.0}; }) => bug); +`); + +// Private function field with computed name +eval(` + buggy = ((bug = new class { #foo = x => 1337.0; }) => bug); +`); + +// Private method with computed name +eval(` + buggy = ((bug = new class { #foo(x) { return 1337.0; } }) => bug); +`); diff --git a/deps/v8/test/mjsunit/regress/regress-1143772.js b/deps/v8/test/mjsunit/regress/regress-1143772.js new file mode 100644 index 00000000000000..40bc494d458afe --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-1143772.js @@ -0,0 +1,71 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --allow-natives-syntax + +(function() { + // Only run this test if doubles are transitioned in-place to tagged. + let x = {}; + x.a = 0.1; + let y = {}; + y.a = {}; + if (!%HaveSameMap(x, y)) return; + + // m1: {} + let m1 = {}; + + // m2: {a:d} + let m2 = {}; + assertTrue(%HaveSameMap(m2, m1)); + m2.a = 13.37; + + // m3: {a:d, b:s} + let m3 = {}; + m3.a = 13.37; + assertTrue(%HaveSameMap(m3, m2)); + m3.b = 1; + + // m4: {a:d, b:s, c:h} + let m4 = {}; + m4.a = 13.37; + m4.b = 1; + assertTrue(%HaveSameMap(m4, m3)); + m4.c = {}; + + // m4_2 == m4 + let m4_2 = {}; + m4_2.a = 13.37; + m4_2.b = 1; + m4_2.c = {}; + assertTrue(%HaveSameMap(m4_2, m4)); + + // m5: {a:d, b:d} + let m5 = {}; + m5.a = 13.37; + assertTrue(%HaveSameMap(m5, m2)); + m5.b = 13.37; + assertFalse(%HaveSameMap(m5, m3)); + + // At this point, Map3 and Map4 are both deprecated. Map2 transitions to + // Map5. Map5 is the migration target for Map3. + assertFalse(%HaveSameMap(m5, m3)); + + // m6: {a:d, b:d, c:d} + let m6 = {}; + m6.a = 13.37; + assertTrue(%HaveSameMap(m6, m2)); + m6.b = 13.37; + assertTrue(%HaveSameMap(m6, m5)); + m6.c = 13.37 + + // Make m7: {a:d, b:d, c:t} + let m7 = m4_2; + assertTrue(%HaveSameMap(m7, m4)); + // Map4 is deprecated, so this property access triggers a Map migration. + // With in-place map updates and no double unboxing, this should end up + // migrating to Map6, and updating it in-place. + m7.c; + assertFalse(%HaveSameMap(m7, m4)); + assertTrue(%HaveSameMap(m6, m7)); +})(); diff --git a/deps/v8/test/mjsunit/regress/regress-1166138.js b/deps/v8/test/mjsunit/regress/regress-1166138.js new file mode 100644 index 00000000000000..f3e4bde83e2769 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-1166138.js @@ -0,0 +1,7 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +let badregexp = "(?:" + " ".repeat(32768*2)+ ")*"; +reg = RegExp(badregexp); +assertThrows(() => reg.test(), SyntaxError); diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1038178.js b/deps/v8/test/mjsunit/regress/regress-crbug-1038178.js index 0362f69bcda3ad..3a84066b837d51 100644 --- a/deps/v8/test/mjsunit/regress/regress-crbug-1038178.js +++ b/deps/v8/test/mjsunit/regress/regress-crbug-1038178.js @@ -15,7 +15,7 @@ function opt(){ (((function(){})())?.v)() } %PrepareFunctionForOptimization(opt) -assertThrows(opt()); -assertThrows(opt()); +assertThrows(() => opt()); +assertThrows(() => opt()); %OptimizeFunctionOnNextCall(opt) -assertThrows(opt()); +assertThrows(() => opt()); diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1151890.js b/deps/v8/test/mjsunit/regress/regress-crbug-1151890.js new file mode 100644 index 00000000000000..70a3d6bbf06500 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-1151890.js @@ -0,0 +1,11 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax + +for (let i = 0, j = 0; i < 10; ++i) { + let x = (-0xffffffffffffffff_ffffffffffffffffn >> 0x40n); + assertEquals(-0x10000000000000000n, x); + %SimulateNewspaceFull(); +} diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1171954.js b/deps/v8/test/mjsunit/regress/regress-crbug-1171954.js new file mode 100644 index 00000000000000..94fbb329bc47b4 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/regress-crbug-1171954.js @@ -0,0 +1,19 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --always-opt + +// This causes the register used by the call in the later try-catch block to be +// used by the ToName conversion for null which causes a DCHECK fail when +// compiling. If register allocation changes, this test may no longer reproduce +// the crash but it is not easy write a proper test because it is linked to +// register allocation. This test should always work, so shouldn't cause any +// flakes. +try { + var { [null]: __v_12, } = {}; +} catch (e) {} + +try { + assertEquals((__v_40?.o?.m)().p); +} catch (e) {} diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1137608.js b/deps/v8/test/mjsunit/regress/wasm/regress-1137608.js new file mode 100644 index 00000000000000..5011dced2f70ff --- /dev/null +++ b/deps/v8/test/mjsunit/regress/wasm/regress-1137608.js @@ -0,0 +1,46 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --no-liftoff --experimental-wasm-return-call --experimental-wasm-threads + +load("test/mjsunit/wasm/wasm-module-builder.js"); + +(function Regress1137608() { + print(arguments.callee.name); + let builder = new WasmModuleBuilder(); + let sig0 = builder.addType(kSig_i_iii); + let sig1 = builder.addType(makeSig([kWasmF64, kWasmF64, kWasmI32, + kWasmI32, kWasmI32, kWasmF32, kWasmI32, kWasmF64, kWasmI32, kWasmF32, + kWasmI32, kWasmF32, kWasmI32, kWasmF64, kWasmI32], [kWasmI32])); + let main = builder.addFunction("main", sig0) + .addBody([ + kExprI64Const, 0, + kExprF64UConvertI64, + kExprF64Const, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x00, 0x00, + kExprF64Const, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, + kExprF64Mul, + kExprI32Const, 0, + kExprF64Const, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + kExprF64StoreMem, 0x00, 0xb0, 0xe0, 0xc0, 0x81, 0x03, + kExprI32Const, 0, + kExprI32Const, 0, + kExprI32Const, 0, + kExprF32Const, 0x00, 0x00, 0x00, 0x00, + kExprI32Const, 0, + kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + kExprI32Const, 0, + kExprF32Const, 0x00, 0x00, 0x00, 0x00, + kExprI32Const, 0, + kExprF32Const, 0x00, 0x00, 0x00, 0x00, + kExprI32Const, 0, + kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + kExprI32Const, 0, + kExprI32Const, 2, + kExprReturnCallIndirect, sig1, kTableZero]).exportFunc(); + builder.addFunction("f", sig1).addBody([kExprI32Const, 0]); + builder.addTable(kWasmAnyFunc, 4, 4); + builder.addMemory(16, 32, false, true); + let module = new WebAssembly.Module(builder.toBuffer()); + let instance = new WebAssembly.Instance(module); +})(); diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1140549.js b/deps/v8/test/mjsunit/regress/wasm/regress-1140549.js new file mode 100644 index 00000000000000..65191e1962373c --- /dev/null +++ b/deps/v8/test/mjsunit/regress/wasm/regress-1140549.js @@ -0,0 +1,25 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --wasm-staging + +load('test/mjsunit/wasm/wasm-module-builder.js'); + +const builder = new WasmModuleBuilder(); +builder.addMemory(16, 32, false, true); +builder.addType(makeSig([], [])); +builder.addFunction(undefined, 0 /* sig */) + .addBodyWithEnd([ +// signature: v_v +// body: +kExprI32Const, 0x00, +kExprI32Const, 0x00, +kExprI32Const, 0x00, +kAtomicPrefix, kExprI32AtomicCompareExchange8U, 0x00, 0xc3, 0x01, +kExprDrop, +kExprEnd, // end @193 +]); +builder.addExport('main', 0); +const instance = builder.instantiate(); +print(instance.exports.main(1, 2, 3)); diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js b/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js new file mode 100644 index 00000000000000..d9d80e58ccc1a1 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/wasm/regress-1146861.js @@ -0,0 +1,56 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +load('test/mjsunit/wasm/wasm-module-builder.js'); + +const builder = new WasmModuleBuilder(); +builder.addGlobal(kWasmI32, 1); +builder.addType(makeSig([], [kWasmF64])); +// Generate function 1 (out of 1). +builder.addFunction(undefined, 0 /* sig */) + .addLocals(kWasmI32, 8).addLocals(kWasmI64, 3) + .addBodyWithEnd([ +// signature: d_v +// body: +kExprGlobalGet, 0x00, // global.get +kExprLocalSet, 0x00, // local.set +kExprI32Const, 0x00, // i32.const +kExprI32Eqz, // i32.eqz +kExprLocalSet, 0x01, // local.set +kExprGlobalGet, 0x00, // global.get +kExprLocalSet, 0x02, // local.set +kExprI32Const, 0x01, // i32.const +kExprI32Const, 0x01, // i32.const +kExprI32Sub, // i32.sub +kExprLocalSet, 0x03, // local.set +kExprGlobalGet, 0x00, // global.get +kExprLocalSet, 0x04, // local.set +kExprI32Const, 0x00, // i32.const +kExprI32Eqz, // i32.eqz +kExprLocalSet, 0x05, // local.set +kExprGlobalGet, 0x00, // global.get +kExprLocalSet, 0x06, // local.set +kExprI32Const, 0x00, // i32.const +kExprI32Const, 0x01, // i32.const +kExprI32Sub, // i32.sub +kExprLocalSet, 0x07, // local.set +kExprBlock, kWasmStmt, // block @45 + kExprI32Const, 0x00, // i32.const + kExprIf, kWasmStmt, // if @49 + kExprLocalGet, 0x0a, // local.get + kExprLocalSet, 0x08, // local.set + kExprElse, // else @55 + kExprNop, // nop + kExprEnd, // end @57 + kExprLocalGet, 0x08, // local.get + kExprLocalSet, 0x09, // local.set + kExprLocalGet, 0x09, // local.get + kExprI64Const, 0xff, 0x01, // i64.const + kExprI64Add, // i64.add + kExprDrop, // drop + kExprEnd, // end @69 +kExprF64Const, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f, // f64.const +kExprEnd, // end @79 +]); +builder.instantiate(); diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js b/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js new file mode 100644 index 00000000000000..989da11a25b85f --- /dev/null +++ b/deps/v8/test/mjsunit/regress/wasm/regress-1153442.js @@ -0,0 +1,61 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --experimental-wasm-threads + +load('test/mjsunit/wasm/wasm-module-builder.js'); + +const builder = new WasmModuleBuilder(); +builder.addMemory(1, 1, false, true); +builder.addGlobal(kWasmI32, 1); +builder.addGlobal(kWasmI32, 1); +builder.addType(makeSig([kWasmI32, kWasmI64, kWasmI32], [])); +// Generate function 1 (out of 1). +builder.addFunction(undefined, 0 /* sig */) + .addLocals(kWasmI32, 10) + .addBodyWithEnd([ +// signature: v_ili +// body: +kExprI32Const, 0x00, // i32.const +kExprLocalSet, 0x04, // local.set +kExprI32Const, 0x01, // i32.const +kExprLocalSet, 0x05, // local.set +kExprBlock, kWasmStmt, // block @11 + kExprBr, 0x00, // br depth=0 + kExprEnd, // end @15 +kExprGlobalGet, 0x01, // global.get +kExprLocalSet, 0x03, // local.set +kExprLocalGet, 0x03, // local.get +kExprI32Const, 0x01, // i32.const +kExprI32Sub, // i32.sub +kExprLocalSet, 0x06, // local.set +kExprI64Const, 0x01, // i64.const +kExprLocalSet, 0x01, // local.set +kExprI32Const, 0x00, // i32.const +kExprI32Eqz, // i32.eqz +kExprLocalSet, 0x07, // local.set +kExprBlock, kWasmStmt, // block @36 + kExprBr, 0x00, // br depth=0 + kExprEnd, // end @40 +kExprGlobalGet, 0x01, // global.get +kExprLocalSet, 0x08, // local.set +kExprI32Const, 0x01, // i32.const +kExprI32Const, 0x01, // i32.const +kExprI32Sub, // i32.sub +kExprLocalSet, 0x09, // local.set +kExprLocalGet, 0x00, // local.get +kExprLocalSet, 0x0a, // local.set +kExprGlobalGet, 0x00, // global.get +kExprLocalSet, 0x0b, // local.set +kExprI32Const, 0x00, // i32.const +kExprI32Const, 0x0f, // i32.const +kExprI32And, // i32.and +kExprLocalSet, 0x0c, // local.set +kExprI32Const, 0x00, // i32.const +kAtomicPrefix, kExprI64AtomicLoad, 0x03, 0x04, // i64.atomic.load64 +kExprDrop, // drop +kExprUnreachable, // unreachable +kExprEnd, // end @75 +]); +builder.toModule(); diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js b/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js new file mode 100644 index 00000000000000..93f2c3b556fc55 --- /dev/null +++ b/deps/v8/test/mjsunit/regress/wasm/regress-1161654.js @@ -0,0 +1,56 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --wasm-staging + +// This is a fuzzer-generated test case that exposed a bug in Liftoff that only +// affects ARM, where the fp register aliasing is different from other archs. +// We were inncorrectly clearing the the high fp register in a LiftoffRegList +// indicating registers to load, hitting a DCHECK. +load('test/mjsunit/wasm/wasm-module-builder.js'); + +const builder = new WasmModuleBuilder(); +builder.addMemory(19, 32, false); +builder.addGlobal(kWasmI32, 0); +builder.addType(makeSig([], [])); +builder.addType(makeSig([kWasmI64, kWasmS128, kWasmF32], [kWasmI32])); +// Generate function 1 (out of 5). +builder.addFunction(undefined, 0 /* sig */) + .addBodyWithEnd([ +// signature: v_v +// body: +kExprI32Const, 0x05, // i32.const +kExprReturn, // return +kExprUnreachable, // unreachable +kExprEnd, // end @5 +]); +// Generate function 4 (out of 5). +builder.addFunction(undefined, 1 /* sig */) + .addBodyWithEnd([ +// signature: i_lsf +// body: +kExprLocalGet, 0x01, // local.get +kExprLocalGet, 0x01, // local.get +kExprGlobalGet, 0x00, // global.get +kExprDrop, // drop +kExprLoop, kWasmStmt, // loop @8 + kExprLoop, 0x00, // loop @10 + kExprI32Const, 0x01, // i32.const + kExprMemoryGrow, 0x00, // memory.grow + kExprI64LoadMem8U, 0x00, 0x70, // i64.load8_u + kExprLoop, 0x00, // loop @19 + kExprCallFunction, 0x00, // call function #0: v_v + kExprEnd, // end @23 + kExprI64Const, 0xf1, 0x24, // i64.const + kExprGlobalGet, 0x00, // global.get + kExprDrop, // drop + kExprBr, 0x00, // br depth=0 + kExprEnd, // end @32 + kExprEnd, // end @33 +kExprI32Const, 0x5b, // i32.const +kExprReturn, // return +kExprEnd, // end @37 +]); +// Instantiation is enough to cause a crash. +const instance = builder.instantiate(); diff --git a/deps/v8/tools/gcmole/gcmole.lua b/deps/v8/tools/gcmole/gcmole.lua index a09c3b61ad5161..5ccf2e1b4c9d4d 100644 --- a/deps/v8/tools/gcmole/gcmole.lua +++ b/deps/v8/tools/gcmole/gcmole.lua @@ -116,7 +116,7 @@ local function MakeClangCommandLine( .. " -DV8_INTL_SUPPORT" .. " -I./" .. " -Iinclude/" - .. " -Iout/Release/gen" + .. " -Iout/build/gen" .. " -Ithird_party/icu/source/common" .. " -Ithird_party/icu/source/i18n" .. " " .. arch_options diff --git a/deps/v8/tools/gcmole/run-gcmole.py b/deps/v8/tools/gcmole/run-gcmole.py index 6f2a091c3c7e62..40e2be9699b706 100755 --- a/deps/v8/tools/gcmole/run-gcmole.py +++ b/deps/v8/tools/gcmole/run-gcmole.py @@ -21,9 +21,9 @@ assert len(sys.argv) == 2 -if not os.path.isfile("out/Release/gen/torque-generated/builtin-definitions-tq.h"): - print("Expected generated headers in out/Release/gen.") - print("Either build v8 in out/Release or change gcmole.lua:115") +if not os.path.isfile("out/build/gen/torque-generated/builtin-definitions-tq.h"): + print("Expected generated headers in out/build/gen.") + print("Either build v8 in out/build or change gcmole.lua:115") sys.exit(-1) proc = subprocess.Popen( diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py index d7255a94d35b94..80ea1f956592bd 100644 --- a/deps/v8/tools/run_perf.py +++ b/deps/v8/tools/run_perf.py @@ -575,6 +575,32 @@ def FlattenRunnables(node, node_cb): raise Exception('Invalid suite configuration.') +def find_build_directory(base_path, arch): + """Returns the location of d8 or node in the build output directory. + + This supports a seamless transition between legacy build location + (out/Release) and new build location (out/build). + """ + def is_build(path): + # We support d8 or node as executables. We don't support testing on + # Windows. + return (os.path.isfile(os.path.join(path, 'd8')) or + os.path.isfile(os.path.join(path, 'node'))) + possible_paths = [ + # Location developer wrapper scripts is using. + '%s.release' % arch, + # Current build location on bots. + 'build', + # Legacy build location on bots. + 'Release', + ] + possible_paths = [os.path.join(base_path, p) for p in possible_paths] + actual_paths = filter(is_build, possible_paths) + assert actual_paths, 'No build directory found.' + assert len(actual_paths) == 1, 'Found ambiguous build directories.' + return actual_paths[0] + + class Platform(object): def __init__(self, args): self.shell_dir = args.shell_dir @@ -881,8 +907,7 @@ def Main(argv): 'to auto-detect.', default='x64', choices=SUPPORTED_ARCHS + ['auto']) parser.add_argument('--buildbot', - help='Adapt to path structure used on buildbots and adds ' - 'timestamps/level to all logged status messages', + help='Deprecated', default=False, action='store_true') parser.add_argument('-d', '--device', help='The device ID to run Android tests on. If not ' @@ -978,13 +1003,9 @@ def Main(argv): workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) - if args.buildbot: - build_config = 'Release' - else: - build_config = '%s.release' % args.arch - if args.binary_override_path == None: - args.shell_dir = os.path.join(workspace, args.outdir, build_config) + args.shell_dir = find_build_directory( + os.path.join(workspace, args.outdir), args.arch) default_binary_name = 'd8' else: if not os.path.isfile(args.binary_override_path): @@ -998,8 +1019,8 @@ def Main(argv): default_binary_name = os.path.basename(args.binary_override_path) if args.outdir_secondary: - args.shell_dir_secondary = os.path.join( - workspace, args.outdir_secondary, build_config) + args.shell_dir_secondary = find_build_directory( + os.path.join(workspace, args.outdir_secondary), args.arch) else: args.shell_dir_secondary = None diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py index c4036bb918e3a3..cc731189f73243 100644 --- a/deps/v8/tools/testrunner/base_runner.py +++ b/deps/v8/tools/testrunner/base_runner.py @@ -6,7 +6,7 @@ from __future__ import print_function from functools import reduce -from collections import OrderedDict +from collections import OrderedDict, namedtuple import json import multiprocessing import optparse @@ -115,52 +115,35 @@ ] -class ModeConfig(object): - def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode): - self.flags = flags - self.timeout_scalefactor = timeout_scalefactor - self.status_mode = status_mode - self.execution_mode = execution_mode - +ModeConfig = namedtuple( + 'ModeConfig', 'label flags timeout_scalefactor status_mode') DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"] RELEASE_FLAGS = ["--nohard-abort"] -MODES = { - "debug": ModeConfig( - flags=DEBUG_FLAGS, - timeout_scalefactor=4, - status_mode="debug", - execution_mode="debug", - ), - "optdebug": ModeConfig( + +DEBUG_MODE = ModeConfig( + label='debug', flags=DEBUG_FLAGS, timeout_scalefactor=4, status_mode="debug", - execution_mode="debug", - ), - "release": ModeConfig( +) + +RELEASE_MODE = ModeConfig( + label='release', flags=RELEASE_FLAGS, timeout_scalefactor=1, status_mode="release", - execution_mode="release", - ), - # Normal trybot release configuration. There, dchecks are always on which - # implies debug is set. Hence, the status file needs to assume debug-like - # behavior/timeouts. - "tryrelease": ModeConfig( +) + +# Normal trybot release configuration. There, dchecks are always on which +# implies debug is set. Hence, the status file needs to assume debug-like +# behavior/timeouts. +TRY_RELEASE_MODE = ModeConfig( + label='release+dchecks', flags=RELEASE_FLAGS, - timeout_scalefactor=1, - status_mode="debug", - execution_mode="release", - ), - # This mode requires v8 to be compiled with dchecks and slow dchecks. - "slowrelease": ModeConfig( - flags=RELEASE_FLAGS + ["--enable-slow-asserts"], - timeout_scalefactor=2, + timeout_scalefactor=4, status_mode="debug", - execution_mode="release", - ), -} +) PROGRESS_INDICATORS = { 'verbose': progress.VerboseProgressIndicator, @@ -240,12 +223,29 @@ def __str__(self): return '\n'.join(detected_options) +def _do_load_build_config(outdir, verbose=False): + build_config_path = os.path.join(outdir, "v8_build_config.json") + if not os.path.exists(build_config_path): + if verbose: + print("Didn't find build config: %s" % build_config_path) + raise TestRunnerError() + + with open(build_config_path) as f: + try: + build_config_json = json.load(f) + except Exception: # pragma: no cover + print("%s exists but contains invalid json. Is your build up-to-date?" + % build_config_path) + raise TestRunnerError() + + return BuildConfig(build_config_json) + + class BaseTestRunner(object): def __init__(self, basedir=None): self.basedir = basedir or BASE_DIR self.outdir = None self.build_config = None - self.mode_name = None self.mode_options = None self.target_os = None @@ -279,7 +279,7 @@ def execute(self, sys_args=None): tests = self._load_testsuite_generators(args, options) self._setup_env() print(">>> Running tests for %s.%s" % (self.build_config.arch, - self.mode_name)) + self.mode_options.label)) exit_code = self._do_execute(tests, args, options) if exit_code == utils.EXIT_CODE_FAILURES and options.json_test_results: print("Force exit code 0 after failures. Json test results file " @@ -313,9 +313,6 @@ def _add_parser_default_options(self, parser): default="out") parser.add_option("--arch", help="The architecture to run tests for") - parser.add_option("-m", "--mode", - help="The test mode in which to run (uppercase for builds" - " in CI): %s" % MODES.keys()) parser.add_option("--shell-dir", help="DEPRECATED! Executables from build " "directory will be used") parser.add_option("--test-root", help="Root directory of the test suites", @@ -400,9 +397,8 @@ def _add_parser_options(self, parser): def _parse_args(self, parser, sys_args): options, args = parser.parse_args(sys_args) - if any(map(lambda v: v and ',' in v, - [options.arch, options.mode])): # pragma: no cover - print('Multiple arch/mode are deprecated') + if options.arch and ',' in options.arch: # pragma: no cover + print('Multiple architectures are deprecated') raise TestRunnerError() return options, args @@ -410,7 +406,12 @@ def _parse_args(self, parser, sys_args): def _load_build_config(self, options): for outdir in self._possible_outdirs(options): try: - self.build_config = self._do_load_build_config(outdir, options.verbose) + self.build_config = _do_load_build_config(outdir, options.verbose) + + # In auto-detect mode the outdir is always where we found the build config. + # This ensures that we'll also take the build products from there. + self.outdir = outdir + break except TestRunnerError: pass @@ -433,8 +434,7 @@ def _load_build_config(self, options): # Returns possible build paths in order: # gn # outdir - # outdir/arch.mode - # Each path is provided in two versions: and /mode for bots. + # outdir on bots def _possible_outdirs(self, options): def outdirs(): if options.gn: @@ -442,17 +442,13 @@ def outdirs(): return yield options.outdir - if options.arch and options.mode: - yield os.path.join(options.outdir, - '%s.%s' % (options.arch, options.mode)) + + if os.path.basename(options.outdir) != 'build': + yield os.path.join(options.outdir, 'build') for outdir in outdirs(): yield os.path.join(self.basedir, outdir) - # bot option - if options.mode: - yield os.path.join(self.basedir, outdir, options.mode) - def _get_gn_outdir(self): gn_out_dir = os.path.join(self.basedir, DEFAULT_OUT_GN) latest_timestamp = -1 @@ -468,51 +464,13 @@ def _get_gn_outdir(self): print(">>> Latest GN build found: %s" % latest_config) return os.path.join(DEFAULT_OUT_GN, latest_config) - def _do_load_build_config(self, outdir, verbose=False): - build_config_path = os.path.join(outdir, "v8_build_config.json") - if not os.path.exists(build_config_path): - if verbose: - print("Didn't find build config: %s" % build_config_path) - raise TestRunnerError() - - with open(build_config_path) as f: - try: - build_config_json = json.load(f) - except Exception: # pragma: no cover - print("%s exists but contains invalid json. Is your build up-to-date?" - % build_config_path) - raise TestRunnerError() - - # In auto-detect mode the outdir is always where we found the build config. - # This ensures that we'll also take the build products from there. - self.outdir = os.path.dirname(build_config_path) - - return BuildConfig(build_config_json) - def _process_default_options(self, options): - # We don't use the mode for more path-magic. - # Therefore transform the bot mode here to fix build_config value. - if options.mode: - options.mode = self._bot_to_v8_mode(options.mode) - - build_config_mode = 'debug' if self.build_config.is_debug else 'release' - if options.mode: - if options.mode not in MODES: # pragma: no cover - print('%s mode is invalid' % options.mode) - raise TestRunnerError() - if MODES[options.mode].execution_mode != build_config_mode: - print ('execution mode (%s) for %s is inconsistent with build config ' - '(%s)' % ( - MODES[options.mode].execution_mode, - options.mode, - build_config_mode)) - raise TestRunnerError() - - self.mode_name = options.mode + if self.build_config.is_debug: + self.mode_options = DEBUG_MODE + elif self.build_config.dcheck_always_on: + self.mode_options = TRY_RELEASE_MODE else: - self.mode_name = build_config_mode - - self.mode_options = MODES[self.mode_name] + self.mode_options = RELEASE_MODE if options.arch and options.arch != self.build_config.arch: print('--arch value (%s) inconsistent with build config (%s).' % ( @@ -533,15 +491,6 @@ def _process_default_options(self, options): options.command_prefix = shlex.split(options.command_prefix) options.extra_flags = sum(map(shlex.split, options.extra_flags), []) - def _bot_to_v8_mode(self, config): - """Convert build configs from bots to configs understood by the v8 runner. - - V8 configs are always lower case and without the additional _x64 suffix - for 64 bit builds on windows with ninja. - """ - mode = config[:-4] if config.endswith('_x64') else config - return mode.lower() - def _process_options(self, options): pass @@ -689,9 +638,7 @@ def _get_statusfile_variables(self, options): "is_clang": self.build_config.is_clang, "is_full_debug": self.build_config.is_full_debug, "mips_arch_variant": mips_arch_variant, - "mode": self.mode_options.status_mode - if not self.build_config.dcheck_always_on - else "debug", + "mode": self.mode_options.status_mode, "msan": self.build_config.msan, "no_harness": options.no_harness, "no_i18n": self.build_config.no_i18n, @@ -804,10 +751,7 @@ def _create_progress_indicators(self, test_count, options): procs.append(progress.JUnitTestProgressIndicator(options.junitout, options.junittestsuite)) if options.json_test_results: - procs.append(progress.JsonTestProgressIndicator( - self.framework_name, - self.build_config.arch, - self.mode_options.execution_mode)) + procs.append(progress.JsonTestProgressIndicator(self.framework_name)) for proc in procs: proc.configure(options) diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py index 10545fa5f2417b..f6c61466ba00eb 100755 --- a/deps/v8/tools/testrunner/standard_runner.py +++ b/deps/v8/tools/testrunner/standard_runner.py @@ -379,10 +379,8 @@ def _duration_results_text(test): ] assert os.path.exists(options.json_test_results) - complete_results = [] with open(options.json_test_results, "r") as f: - complete_results = json.loads(f.read()) - output = complete_results[0] + output = json.load(f) lines = [] for test in output['slowest_tests']: suffix = '' diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py index a993fc18a372cb..1c1b163e37cf13 100644 --- a/deps/v8/tools/testrunner/testproc/progress.py +++ b/deps/v8/tools/testrunner/testproc/progress.py @@ -358,7 +358,7 @@ def finished(self): class JsonTestProgressIndicator(ProgressIndicator): - def __init__(self, framework_name, arch, mode): + def __init__(self, framework_name): super(JsonTestProgressIndicator, self).__init__() # We want to drop stdout/err for all passed tests on the first try, but we # need to get outputs for all runs after the first one. To accommodate that, @@ -367,8 +367,6 @@ def __init__(self, framework_name, arch, mode): self._requirement = base.DROP_PASS_STDOUT self.framework_name = framework_name - self.arch = arch - self.mode = mode self.results = [] self.duration_sum = 0 self.test_count = 0 @@ -438,24 +436,16 @@ def _test_record(self, test, result, output, run): } def finished(self): - complete_results = [] - if os.path.exists(self.options.json_test_results): - with open(self.options.json_test_results, "r") as f: - # On bots we might start out with an empty file. - complete_results = json.loads(f.read() or "[]") - duration_mean = None if self.test_count: duration_mean = self.duration_sum / self.test_count - complete_results.append({ - "arch": self.arch, - "mode": self.mode, + result = { "results": self.results, "slowest_tests": self.tests.as_list(), "duration_mean": duration_mean, "test_total": self.test_count, - }) + } with open(self.options.json_test_results, "w") as f: - f.write(json.dumps(complete_results)) + json.dump(result, f) diff --git a/deps/v8/tools/unittests/run_perf_test.py b/deps/v8/tools/unittests/run_perf_test.py index 6cd63ac2b66798..28f71b2b339115 100755 --- a/deps/v8/tools/unittests/run_perf_test.py +++ b/deps/v8/tools/unittests/run_perf_test.py @@ -90,6 +90,21 @@ 'units': 'ms', } + +class UnitTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + sys.path.insert(0, BASE_DIR) + import run_perf + global run_perf + + def testBuildDirectory(self): + base_path = os.path.join(TEST_DATA, 'builddirs', 'dir1', 'out') + expected_path = os.path.join(base_path, 'build') + self.assertEquals( + expected_path, run_perf.find_build_directory(base_path, 'x64')) + + class PerfTest(unittest.TestCase): @classmethod def setUpClass(cls): @@ -125,6 +140,7 @@ def _WriteTestInput(self, json_content): f.write(json.dumps(json_content)) def _MockCommand(self, *args, **kwargs): + on_bots = kwargs.pop('on_bots', False) # Fake output for each test run. test_outputs = [Output(stdout=arg, timed_out=kwargs.get('timed_out', False), @@ -142,6 +158,16 @@ def execute(*args, **kwargs): run_perf.command, 'PosixCommand', mock.MagicMock(side_effect=create_cmd)).start() + build_dir = 'Release' if on_bots else 'x64.release' + out_dirs = ['out', 'out-secondary'] + return_values = [ + os.path.join(os.path.dirname(BASE_DIR), out, build_dir) + for out in out_dirs + ] + mock.patch.object( + run_perf, 'find_build_directory', + mock.MagicMock(side_effect=return_values)).start() + # Check that d8 is called from the correct cwd for each test run. dirs = [os.path.join(TEST_WORKSPACE, arg) for arg in args[0]] def chdir(*args, **kwargs): @@ -394,11 +420,12 @@ def testTwoRunsStdDevRegExp(self): def testBuildbot(self): self._WriteTestInput(V8_JSON) - self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n']) + self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'], + on_bots=True) mock.patch.object( run_perf.Platform, 'ReadBuildConfig', mock.MagicMock(return_value={'is_android': False})).start() - self.assertEqual(0, self._CallMain('--buildbot')) + self.assertEqual(0, self._CallMain()) self._VerifyResults('test', 'score', [ {'name': 'Richards', 'results': [1.234], 'stddev': ''}, {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, @@ -410,11 +437,12 @@ def testBuildbotWithTotal(self): test_input = dict(V8_JSON) test_input['total'] = True self._WriteTestInput(test_input) - self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n']) + self._MockCommand(['.'], ['Richards: 1.234\nDeltaBlue: 10657567\n'], + on_bots=True) mock.patch.object( run_perf.Platform, 'ReadBuildConfig', mock.MagicMock(return_value={'is_android': False})).start() - self.assertEqual(0, self._CallMain('--buildbot')) + self.assertEqual(0, self._CallMain()) self._VerifyResults('test', 'score', [ {'name': 'Richards', 'results': [1.234], 'stddev': ''}, {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, @@ -427,11 +455,12 @@ def testBuildbotWithTotalAndErrors(self): test_input = dict(V8_JSON) test_input['total'] = True self._WriteTestInput(test_input) - self._MockCommand(['.'], ['x\nRichards: bla\nDeltaBlue: 10657567\ny\n']) + self._MockCommand(['.'], ['x\nRichards: bla\nDeltaBlue: 10657567\ny\n'], + on_bots=True) mock.patch.object( run_perf.Platform, 'ReadBuildConfig', mock.MagicMock(return_value={'is_android': False})).start() - self.assertEqual(1, self._CallMain('--buildbot')) + self.assertEqual(1, self._CallMain()) self._VerifyResults('test', 'score', [ {'name': 'DeltaBlue', 'results': [10657567.0], 'stddev': ''}, ]) @@ -484,6 +513,7 @@ def testAndroid(self): mock.patch('run_perf.AndroidPlatform.PreExecution').start() mock.patch('run_perf.AndroidPlatform.PostExecution').start() mock.patch('run_perf.AndroidPlatform.PreTests').start() + mock.patch('run_perf.find_build_directory').start() mock.patch( 'run_perf.AndroidPlatform.Run', return_value=(Output(stdout='Richards: 1.234\nDeltaBlue: 10657567\n'), diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py index 3fc91b8e90f023..8b3275172d55a4 100755 --- a/deps/v8/tools/unittests/run_tests_test.py +++ b/deps/v8/tools/unittests/run_tests_test.py @@ -67,7 +67,7 @@ def temp_base(baseroot='testroot1'): """ basedir = os.path.join(TEST_DATA_ROOT, baseroot) with temp_dir() as tempbase: - builddir = os.path.join(tempbase, 'out', 'Release') + builddir = os.path.join(tempbase, 'out', 'build') testroot = os.path.join(tempbase, 'test') os.makedirs(builddir) shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir) @@ -112,7 +112,7 @@ def run_tests(basedir, *args, **kwargs): def override_build_config(basedir, **kwargs): """Override the build config with new values provided as kwargs.""" - path = os.path.join(basedir, 'out', 'Release', 'v8_build_config.json') + path = os.path.join(basedir, 'out', 'build', 'v8_build_config.json') with open(path) as f: config = json.load(f) config.update(kwargs) @@ -171,7 +171,6 @@ def testPass(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', '--time', @@ -189,7 +188,6 @@ def testShardedProc(self): for shard in [1, 2]: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', '--shard-count=2', @@ -220,7 +218,6 @@ def testSharded(self): for shard in [1, 2]: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', '--shard-count=2', @@ -239,7 +236,6 @@ def testFail(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', 'sweet/strawberries', @@ -252,7 +248,7 @@ def check_cleaned_json_output( self, expected_results_name, actual_json, basedir): # Check relevant properties of the json output. with open(actual_json) as f: - json_output = json.load(f)[0] + json_output = json.load(f) # Replace duration in actual output as it's non-deterministic. Also # replace the python executable prefix as it has a different absolute @@ -285,7 +281,6 @@ def testFailWithRerunAndJSON(self): json_path = os.path.join(basedir, 'out.json') result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--rerun-failures-count=2', @@ -314,7 +309,6 @@ def testFlakeWithRerunAndJSON(self): json_path = os.path.join(basedir, 'out.json') result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--rerun-failures-count=2', @@ -346,7 +340,6 @@ def testAutoDetect(self): v8_enable_pointer_compression=False) result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', 'sweet/bananas', @@ -371,7 +364,6 @@ def testSkips(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=nooptimization', 'sweet/strawberries', @@ -385,7 +377,6 @@ def testRunSkips(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=nooptimization', '--run-skipped', @@ -402,7 +393,6 @@ def testDefault(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', infra_staging=False, ) self.assertIn('0 tests ran', result.stdout, result) @@ -410,24 +400,15 @@ def testDefault(self): def testNoBuildConfig(self): """Test failing run when build config is not found.""" - with temp_base() as basedir: + with temp_dir() as basedir: result = run_tests(basedir) self.assertIn('Failed to load build config', result.stdout, result) self.assertEqual(5, result.returncode, result) - def testInconsistentMode(self): - """Test failing run when attempting to wrongly override the mode.""" - with temp_base() as basedir: - override_build_config(basedir, is_debug=True) - result = run_tests(basedir, '--mode=Release') - self.assertIn('execution mode (release) for release is inconsistent ' - 'with build config (debug)', result.stdout, result) - self.assertEqual(5, result.returncode, result) - def testInconsistentArch(self): """Test failing run when attempting to wrongly override the arch.""" with temp_base() as basedir: - result = run_tests(basedir, '--mode=Release', '--arch=ia32') + result = run_tests(basedir, '--arch=ia32') self.assertIn( '--arch value (ia32) inconsistent with build config (x64).', result.stdout, result) @@ -436,13 +417,13 @@ def testInconsistentArch(self): def testWrongVariant(self): """Test using a bogus variant.""" with temp_base() as basedir: - result = run_tests(basedir, '--mode=Release', '--variants=meh') + result = run_tests(basedir, '--variants=meh') self.assertEqual(5, result.returncode, result) def testModeFromBuildConfig(self): """Test auto-detection of mode from build config.""" with temp_base() as basedir: - result = run_tests(basedir, '--outdir=out/Release', 'sweet/bananas') + result = run_tests(basedir, '--outdir=out/build', 'sweet/bananas') self.assertIn('Running tests for x64.release', result.stdout, result) self.assertEqual(0, result.returncode, result) @@ -455,7 +436,6 @@ def testReport(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--variants=default', 'sweet', '--report', @@ -471,7 +451,6 @@ def testWarnUnusedRules(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--variants=default,nooptimization', 'sweet', '--warn-unused', @@ -486,7 +465,6 @@ def testCatNoSources(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--variants=default', 'sweet/bananas', '--cat', @@ -505,7 +483,6 @@ def testPredictable(self): override_build_config(basedir, v8_enable_verify_predictable=True) result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', 'sweet/bananas', @@ -524,7 +501,6 @@ def testSlowArch(self): override_build_config(basedir, v8_target_cpu='arm64') result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', 'sweet/bananas', @@ -538,7 +514,6 @@ def testRandomSeedStressWithDefault(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--random-seed-stress-count=2', @@ -553,7 +528,6 @@ def testRandomSeedStressWithSeed(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--random-seed-stress-count=2', @@ -577,7 +551,6 @@ def testSpecificVariants(self): override_build_config(basedir, is_asan=True) result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default,stress', 'sweet/bananas', @@ -599,7 +572,6 @@ def testDotsProgress(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=dots', 'sweet/cherries', 'sweet/bananas', @@ -620,7 +592,6 @@ def _testCompactProgress(self, name): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=%s' % name, 'sweet/cherries', 'sweet/bananas', @@ -641,7 +612,6 @@ def testExitAfterNFailures(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--exit-after-n-failures=2', '-j1', @@ -660,7 +630,7 @@ def testExitAfterNFailures(self): self.assertEqual(1, result.returncode, result) def testNumFuzzer(self): - sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/Release'] + sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/build'] with temp_base() as basedir: with capture() as (stdout, stderr): @@ -674,7 +644,6 @@ def testRunnerFlags(self): with temp_base() as basedir: result = run_tests( basedir, - '--mode=Release', '--progress=verbose', '--variants=default', '--random-seed=42', diff --git a/deps/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8 b/deps/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8 new file mode 100644 index 00000000000000..9daeafb9864cf4 --- /dev/null +++ b/deps/v8/tools/unittests/testdata/builddirs/dir1/out/build/d8 @@ -0,0 +1 @@ +test diff --git a/deps/v8/tools/unittests/testdata/expected_test_results1.json b/deps/v8/tools/unittests/testdata/expected_test_results1.json index d1fdb49525d8d8..08ac623cd734b2 100644 --- a/deps/v8/tools/unittests/testdata/expected_test_results1.json +++ b/deps/v8/tools/unittests/testdata/expected_test_results1.json @@ -1,10 +1,8 @@ { - "arch": "x64", "duration_mean": 1, - "mode": "release", "results": [ { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -29,7 +27,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -54,7 +52,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -81,7 +79,7 @@ ], "slowest_tests": [ { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -105,7 +103,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -129,7 +127,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ diff --git a/deps/v8/tools/unittests/testdata/expected_test_results2.json b/deps/v8/tools/unittests/testdata/expected_test_results2.json index ac9ab9cc595845..dc353f687553e5 100644 --- a/deps/v8/tools/unittests/testdata/expected_test_results2.json +++ b/deps/v8/tools/unittests/testdata/expected_test_results2.json @@ -1,10 +1,8 @@ { - "arch": "x64", "duration_mean": 1, - "mode": "release", "results": [ { - "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [ @@ -28,7 +26,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 0, "expected": [ @@ -54,7 +52,7 @@ ], "slowest_tests": [ { - "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 0, "expected": [ @@ -77,7 +75,7 @@ "variant_flags": [] }, { - "command": "/usr/bin/python out/Release/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", + "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner", "duration": 1, "exit_code": 1, "expected": [