Skip to content

Commit

Permalink
deps,src: patch V8 to be API/ABI compatible with 7.4 (from 7.8)
Browse files Browse the repository at this point in the history
deps: revert 2ac8bb719 from upstream V8

Original commit message:

    profiler: Allow querying SnapshotObjectId for native objects

    - Adds regular native heap entries to the HeapObjectsMap.
    - Adds a side map for keeping a mapping of native objects to their canonical
      heap entry that they have been merged into.

    Change-Id: Ida00628126ded1948ceb2a0cbe14da817af7f361
    Bug: chromium:988350
    Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1720810
    Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
    Reviewed-by: Alexei Filippov <alph@chromium.org>
    Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
    Cr-Commit-Position: refs/heads/master@{#63140}

Refs: v8/v8@2ac8bb7

[The `SuppressMicrotaskExecutionScope` hack only works because
the constructor that allows specifying an alternative microtask
queue was never actually implemented.]

deps: revert fb698cec37 from upstream V8

Original commit message:

    [isolate-data] Move hot fields closer to isolate_root

    In generated code, we access fields inside IsolateData through the
    root-register. On some platforms it is significantly cheaper to access
    things that are close to the root-register value than things that are
    located far away. The motivation for this CL was a 5% difference in
    Octane/Mandreel scores between

    // Part of the stack check.
    cmpq rsp,[r13+0x9ea8]

    and

    cmpq rsp,[r13-0x30]  // Mandreel score improved by 5%.

    This moves the StackGuard up to fix Mandreel. As a drive-by, also move
    two more fields up that are accessed by each CallCFunction.

    Tbr: yangguo@chromium.org
    Bug: v8:9534,chromium:993264
    Change-Id: I5418b63d40274a138e285fa3c99b96e33a814fb1
    Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1751345
    Reviewed-by: Jakob Gruber <jgruber@chromium.org>
    Reviewed-by: Yang Guo <yangguo@chromium.org>
    Auto-Submit: Jakob Gruber <jgruber@chromium.org>
    Commit-Queue: Yang Guo <yangguo@chromium.org>
    Cr-Commit-Position: refs/heads/master@{#63187}

Refs: v8/v8@fb698ce

src: re-add flags removed in V8 7.8 as no-ops

PR-URL: #30109
Reviewed-By: Myles Borins <myles.borins@gmail.com>
  • Loading branch information
addaleax authored and BethGriggs committed Feb 6, 2020
1 parent b335529 commit 8d336ff
Show file tree
Hide file tree
Showing 20 changed files with 113 additions and 337 deletions.
2 changes: 1 addition & 1 deletion common.gypi
Expand Up @@ -38,7 +38,7 @@

# Reset this number to 0 on major V8 upgrades.
# Increment by one for each non-official patch applied to deps/v8.
'v8_embedder_string': '-node.20',
'v8_embedder_string': '-node.21',

##### V8 defaults for Node.js #####

Expand Down
11 changes: 2 additions & 9 deletions deps/v8/include/v8-internal.h
Expand Up @@ -152,22 +152,15 @@ class Internals {

static const uint32_t kNumIsolateDataSlots = 4;

// IsolateData layout guarantees.
static const int kIsolateEmbedderDataOffset = 0;
static const int kExternalMemoryOffset =
kNumIsolateDataSlots * kApiSystemPointerSize;
static const int kExternalMemoryLimitOffset =
kExternalMemoryOffset + kApiInt64Size;
static const int kExternalMemoryAtLastMarkCompactOffset =
kExternalMemoryLimitOffset + kApiInt64Size;
static const int kIsolateFastCCallCallerFpOffset =
kExternalMemoryAtLastMarkCompactOffset + kApiInt64Size;
static const int kIsolateFastCCallCallerPcOffset =
kIsolateFastCCallCallerFpOffset + kApiSystemPointerSize;
static const int kIsolateStackGuardOffset =
kIsolateFastCCallCallerPcOffset + kApiSystemPointerSize;
static const int kIsolateRootsOffset =
kIsolateStackGuardOffset + 7 * kApiSystemPointerSize;
kExternalMemoryAtLastMarkCompactOffset + kApiInt64Size;

static const int kUndefinedValueRootIndex = 4;
static const int kTheHoleValueRootIndex = 5;
Expand All @@ -186,7 +179,7 @@ class Internals {

static const int kFirstNonstringType = 0x40;
static const int kOddballType = 0x43;
static const int kForeignType = 0x46;
static const int kForeignType = 0x47;
static const int kJSSpecialApiObjectType = 0x410;
static const int kJSApiObjectType = 0x420;
static const int kJSObjectType = 0x421;
Expand Down
8 changes: 8 additions & 0 deletions deps/v8/include/v8-platform.h
Expand Up @@ -439,6 +439,14 @@ class Platform {
*/
virtual void DumpWithoutCrashing() {}

/**
* Lets the embedder to add crash keys.
*/
virtual void AddCrashKey(int id, const char* name, uintptr_t value) {
// "noop" is a valid implementation if the embedder doesn't care to log
// additional data for crashes.
}

protected:
/**
* Default implementation of current wall-clock time in milliseconds
Expand Down
29 changes: 9 additions & 20 deletions deps/v8/include/v8-profiler.h
Expand Up @@ -18,8 +18,8 @@ namespace v8 {
class HeapGraphNode;
struct HeapStatsUpdate;

using NativeObject = void*;
using SnapshotObjectId = uint32_t;
typedef uint32_t SnapshotObjectId;


struct CpuProfileDeoptFrame {
int script_id;
Expand Down Expand Up @@ -272,10 +272,12 @@ class V8_EXPORT CpuProfilingOptions {
* zero, the sampling interval will be equal to
* the profiler's sampling interval.
*/
CpuProfilingOptions(
CpuProfilingMode mode = kLeafNodeLineNumbers,
unsigned max_samples = kNoSampleLimit, int sampling_interval_us = 0,
MaybeLocal<Context> filter_context = MaybeLocal<Context>());
CpuProfilingOptions(CpuProfilingMode mode = kLeafNodeLineNumbers,
unsigned max_samples = kNoSampleLimit,
int sampling_interval_us = 0)
: mode_(mode),
max_samples_(max_samples),
sampling_interval_us_(sampling_interval_us) {}

CpuProfilingMode mode() const { return mode_; }
unsigned max_samples() const { return max_samples_; }
Expand All @@ -284,13 +286,12 @@ class V8_EXPORT CpuProfilingOptions {
private:
friend class internal::CpuProfile;

bool has_filter_context() const { return !filter_context_.IsEmpty(); }
bool has_filter_context() const;
void* raw_filter_context() const;

CpuProfilingMode mode_;
unsigned max_samples_;
int sampling_interval_us_;
CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
};

/**
Expand Down Expand Up @@ -752,12 +753,6 @@ class V8_EXPORT EmbedderGraph {
*/
virtual const char* NamePrefix() { return nullptr; }

/**
* Returns the NativeObject that can be used for querying the
* |HeapSnapshot|.
*/
virtual NativeObject GetNativeObject() { return nullptr; }

Node(const Node&) = delete;
Node& operator=(const Node&) = delete;
};
Expand Down Expand Up @@ -820,12 +815,6 @@ class V8_EXPORT HeapProfiler {
*/
SnapshotObjectId GetObjectId(Local<Value> value);

/**
* Returns SnapshotObjectId for a native object referenced by |value| if it
* has been seen by the heap profiler, kUnknownObjectId otherwise.
*/
SnapshotObjectId GetObjectId(NativeObject value);

/**
* Returns heap object with given SnapshotObjectId if the object is alive,
* otherwise empty handle is returned.
Expand Down
14 changes: 8 additions & 6 deletions deps/v8/include/v8.h
Expand Up @@ -2094,7 +2094,6 @@ struct SampleInfo {
StateTag vm_state; // Current VM state.
void* external_callback_entry; // External callback address if VM is
// executing an external callback.
void* top_context; // Incumbent native context address.
};

struct MemoryRange {
Expand Down Expand Up @@ -7561,9 +7560,8 @@ class V8_EXPORT EmbedderHeapTracer {
* overriden to fill a |TraceSummary| that is used by V8 to schedule future
* garbage collections.
*/
V8_DEPRECATE_SOON("Use version with parameter.",
virtual void TraceEpilogue()) {}
virtual void TraceEpilogue(TraceSummary* trace_summary);
virtual void TraceEpilogue() {}
virtual void TraceEpilogue(TraceSummary* trace_summary) { TraceEpilogue(); }

/**
* Called upon entering the final marking pause. No more incremental marking
Expand Down Expand Up @@ -7835,7 +7833,6 @@ class V8_EXPORT Isolate {
class V8_EXPORT SuppressMicrotaskExecutionScope {
public:
explicit SuppressMicrotaskExecutionScope(Isolate* isolate);
explicit SuppressMicrotaskExecutionScope(MicrotaskQueue* microtask_queue);
~SuppressMicrotaskExecutionScope();

// Prevent copying of Scope objects.
Expand All @@ -7846,8 +7843,13 @@ class V8_EXPORT Isolate {

private:
internal::Isolate* const isolate_;
internal::MicrotaskQueue* const microtask_queue_;
internal::Address previous_stack_height_;
static_assert(sizeof(internal::Address) ==
sizeof(internal::MicrotaskQueue*) &&
alignof(internal::Address) ==
alignof(internal::MicrotaskQueue*),
"The previous_stack_height_ field can replace the "
"microtask_queue_ field ABI-wise");

friend class internal::ThreadLocalTop;
};
Expand Down
41 changes: 6 additions & 35 deletions deps/v8/src/api/api.cc
Expand Up @@ -8183,14 +8183,13 @@ Isolate::AllowJavascriptExecutionScope::~AllowJavascriptExecutionScope() {

Isolate::SuppressMicrotaskExecutionScope::SuppressMicrotaskExecutionScope(
Isolate* isolate)
: isolate_(reinterpret_cast<i::Isolate*>(isolate)),
microtask_queue_(isolate_->default_microtask_queue()) {
: isolate_(reinterpret_cast<i::Isolate*>(isolate)) {
isolate_->thread_local_top()->IncrementCallDepth(this);
microtask_queue_->IncrementMicrotasksSuppressions();
isolate_->default_microtask_queue()->IncrementMicrotasksSuppressions();
}

Isolate::SuppressMicrotaskExecutionScope::~SuppressMicrotaskExecutionScope() {
microtask_queue_->DecrementMicrotasksSuppressions();
isolate_->default_microtask_queue()->DecrementMicrotasksSuppressions();
isolate_->thread_local_top()->DecrementCallDepth(this);
}

Expand Down Expand Up @@ -9975,25 +9974,12 @@ CpuProfiler* CpuProfiler::New(Isolate* isolate, CpuProfilingNamingMode mode) {
return New(isolate, mode, kLazyLogging);
}

CpuProfilingOptions::CpuProfilingOptions(CpuProfilingMode mode,
unsigned max_samples,
int sampling_interval_us,
MaybeLocal<Context> filter_context)
: mode_(mode),
max_samples_(max_samples),
sampling_interval_us_(sampling_interval_us) {
if (!filter_context.IsEmpty()) {
Local<Context> local_filter_context = filter_context.ToLocalChecked();
filter_context_.Reset(local_filter_context->GetIsolate(),
local_filter_context);
}
bool CpuProfilingOptions::has_filter_context() const {
return false;
}

void* CpuProfilingOptions::raw_filter_context() const {
return reinterpret_cast<void*>(
i::Context::cast(*Utils::OpenPersistent(filter_context_))
.native_context()
.address());
return nullptr;
}

void CpuProfiler::Dispose() { delete reinterpret_cast<i::CpuProfiler*>(this); }
Expand Down Expand Up @@ -10265,10 +10251,6 @@ SnapshotObjectId HeapProfiler::GetObjectId(Local<Value> value) {
return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotObjectId(obj);
}

SnapshotObjectId HeapProfiler::GetObjectId(NativeObject value) {
return reinterpret_cast<i::HeapProfiler*>(this)->GetSnapshotObjectId(value);
}

Local<Value> HeapProfiler::FindObjectById(SnapshotObjectId id) {
i::Handle<i::Object> obj =
reinterpret_cast<i::HeapProfiler*>(this)->FindHeapObjectById(id);
Expand Down Expand Up @@ -10401,17 +10383,6 @@ void EmbedderHeapTracer::TracePrologue(TraceFlags flags) {
#endif
}

void EmbedderHeapTracer::TraceEpilogue(TraceSummary* trace_summary) {
#if __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wdeprecated"
#endif
TraceEpilogue();
#if __clang__
#pragma clang diagnostic pop
#endif
}

void EmbedderHeapTracer::FinalizeTracing() {
if (isolate_) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(isolate_);
Expand Down
36 changes: 15 additions & 21 deletions deps/v8/src/execution/isolate-data.h
Expand Up @@ -111,27 +111,21 @@ class IsolateData final {
Address* builtins() { return builtins_; }

private:
// Static layout definition.
//
// Note: The location of fields within IsolateData is significant. The
// closer they are to the value of kRootRegister (i.e.: isolate_root()), the
// cheaper it is to access them. See also: https://crbug.com/993264.
// The recommend guideline is to put frequently-accessed fields close to the
// beginning of IsolateData.
// Static layout definition.
#define FIELDS(V) \
V(kEmbedderDataOffset, Internals::kNumIsolateDataSlots* kSystemPointerSize) \
V(kExternalMemoryOffset, kInt64Size) \
V(kExternalMemoryLlimitOffset, kInt64Size) \
V(kExternalMemoryAtLastMarkCompactOffset, kInt64Size) \
V(kFastCCallCallerFPOffset, kSystemPointerSize) \
V(kFastCCallCallerPCOffset, kSystemPointerSize) \
V(kStackGuardOffset, StackGuard::kSizeInBytes) \
V(kRootsTableOffset, RootsTable::kEntriesCount* kSystemPointerSize) \
V(kExternalReferenceTableOffset, ExternalReferenceTable::kSizeInBytes) \
V(kThreadLocalTopOffset, ThreadLocalTop::kSizeInBytes) \
V(kBuiltinEntryTableOffset, Builtins::builtin_count* kSystemPointerSize) \
V(kBuiltinsTableOffset, Builtins::builtin_count* kSystemPointerSize) \
V(kVirtualCallTargetRegisterOffset, kSystemPointerSize) \
V(kFastCCallCallerFPOffset, kSystemPointerSize) \
V(kFastCCallCallerPCOffset, kSystemPointerSize) \
V(kStackGuardOffset, StackGuard::kSizeInBytes) \
V(kStackIsIterableOffset, kUInt8Size) \
/* This padding aligns IsolateData size by 8 bytes. */ \
V(kPaddingOffset, \
Expand Down Expand Up @@ -159,17 +153,6 @@ class IsolateData final {
// Caches the amount of external memory registered at the last MC.
int64_t external_memory_at_last_mark_compact_ = 0;

// Stores the state of the caller for TurboAssembler::CallCFunction so that
// the sampling CPU profiler can iterate the stack during such calls. These
// are stored on IsolateData so that they can be stored to with only one move
// instruction in compiled code.
Address fast_c_call_caller_fp_ = kNullAddress;
Address fast_c_call_caller_pc_ = kNullAddress;

// Fields related to the system and JS stack. In particular, this contains the
// stack limit used by stack checks in generated code.
StackGuard stack_guard_;

RootsTable roots_;

ExternalReferenceTable external_reference_table_;
Expand All @@ -189,6 +172,17 @@ class IsolateData final {
// ia32 (otherwise the arguments adaptor call runs out of registers).
void* virtual_call_target_register_ = nullptr;

// Stores the state of the caller for TurboAssembler::CallCFunction so that
// the sampling CPU profiler can iterate the stack during such calls. These
// are stored on IsolateData so that they can be stored to with only one move
// instruction in compiled code.
Address fast_c_call_caller_fp_ = kNullAddress;
Address fast_c_call_caller_pc_ = kNullAddress;

// Fields related to the system and JS stack. In particular, this contains the
// stack limit used by stack checks in generated code.
StackGuard stack_guard_;

// Whether the SafeStackFrameIterator can successfully iterate the current
// stack. Only valid values are 0 or 1.
uint8_t stack_is_iterable_ = 1;
Expand Down
8 changes: 0 additions & 8 deletions deps/v8/src/execution/isolate.cc
Expand Up @@ -2924,14 +2924,6 @@ void Isolate::CheckIsolateLayout() {
CHECK_EQ(OFFSET_OF(Isolate, isolate_data_), 0);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.embedder_data_)),
Internals::kIsolateEmbedderDataOffset);
CHECK_EQ(static_cast<int>(
OFFSET_OF(Isolate, isolate_data_.fast_c_call_caller_fp_)),
Internals::kIsolateFastCCallCallerFpOffset);
CHECK_EQ(static_cast<int>(
OFFSET_OF(Isolate, isolate_data_.fast_c_call_caller_pc_)),
Internals::kIsolateFastCCallCallerPcOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.stack_guard_)),
Internals::kIsolateStackGuardOffset);
CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, isolate_data_.roots_)),
Internals::kIsolateRootsOffset);
CHECK_EQ(Internals::kExternalMemoryOffset % 8, 0);
Expand Down
2 changes: 1 addition & 1 deletion deps/v8/src/objects/instance-type.h
Expand Up @@ -133,8 +133,8 @@ enum InstanceType : uint16_t {

// "Data", objects that cannot contain non-map-word pointers to heap
// objects.
FOREIGN_TYPE,
BYTE_ARRAY_TYPE,
FOREIGN_TYPE,
BYTECODE_ARRAY_TYPE,
FREE_SPACE_TYPE,
FIXED_DOUBLE_ARRAY_TYPE,
Expand Down
2 changes: 1 addition & 1 deletion deps/v8/src/objects/objects-definitions.h
Expand Up @@ -67,8 +67,8 @@ namespace internal {
\
V(MAP_TYPE) \
V(CODE_TYPE) \
V(FOREIGN_TYPE) \
V(BYTE_ARRAY_TYPE) \
V(FOREIGN_TYPE) \
V(BYTECODE_ARRAY_TYPE) \
V(FREE_SPACE_TYPE) \
\
Expand Down
11 changes: 0 additions & 11 deletions deps/v8/src/profiler/heap-profiler.cc
Expand Up @@ -151,17 +151,6 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Object> obj) {
return ids_->FindEntry(HeapObject::cast(*obj).address());
}

SnapshotObjectId HeapProfiler::GetSnapshotObjectId(NativeObject obj) {
// Try to find id of regular native node first.
SnapshotObjectId id = ids_->FindEntry(reinterpret_cast<Address>(obj));
// In case no id has been found, check whether there exists an entry where the
// native objects has been merged into a V8 entry.
if (id == v8::HeapProfiler::kUnknownObjectId) {
id = ids_->FindMergedNativeEntry(obj);
}
return id;
}

void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
base::MutexGuard guard(&profiler_mutex_);
bool known_object = ids_->MoveObject(from, to, size);
Expand Down
1 change: 0 additions & 1 deletion deps/v8/src/profiler/heap-profiler.h
Expand Up @@ -52,7 +52,6 @@ class HeapProfiler : public HeapObjectAllocationTracker {
int GetSnapshotsCount();
HeapSnapshot* GetSnapshot(int index);
SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
SnapshotObjectId GetSnapshotObjectId(NativeObject obj);
void DeleteAllSnapshots();
void RemoveSnapshot(HeapSnapshot* snapshot);

Expand Down

0 comments on commit 8d336ff

Please sign in to comment.