diff --git a/node.gyp b/node.gyp index 8f131acf661b8e..b4ac0b16965dbe 100644 --- a/node.gyp +++ b/node.gyp @@ -478,6 +478,7 @@ 'src/api/utils.cc', 'src/async_wrap.cc', 'src/cares_wrap.cc', + 'src/cleanup_queue.cc', 'src/connect_wrap.cc', 'src/connection_wrap.cc', 'src/debug_utils.cc', @@ -577,6 +578,8 @@ 'src/base64-inl.h', 'src/callback_queue.h', 'src/callback_queue-inl.h', + 'src/cleanup_queue.h', + 'src/cleanup_queue-inl.h', 'src/connect_wrap.h', 'src/connection_wrap.h', 'src/debug_utils.h', diff --git a/src/base_object.h b/src/base_object.h index 842f763a56d75c..a17879be5b452a 100644 --- a/src/base_object.h +++ b/src/base_object.h @@ -173,7 +173,7 @@ class BaseObject : public MemoryRetainer { // position of members in memory are predictable. For more information please // refer to `doc/contributing/node-postmortem-support.md` friend int GenDebugSymbols(); - friend class CleanupHookCallback; + friend class CleanupQueue; template friend class BaseObjectPtrImpl; diff --git a/src/cleanup_queue-inl.h b/src/cleanup_queue-inl.h new file mode 100644 index 00000000000000..5d9a56e6b07956 --- /dev/null +++ b/src/cleanup_queue-inl.h @@ -0,0 +1,60 @@ +#ifndef SRC_CLEANUP_QUEUE_INL_H_ +#define SRC_CLEANUP_QUEUE_INL_H_ + +#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS + +#include "base_object.h" +#include "cleanup_queue.h" +#include "memory_tracker-inl.h" +#include "util.h" + +namespace node { + +inline void CleanupQueue::MemoryInfo(MemoryTracker* tracker) const { + ForEachBaseObject([&](BaseObject* obj) { + if (obj->IsDoneInitializing()) tracker->Track(obj); + }); +} + +inline size_t CleanupQueue::SelfSize() const { + return sizeof(CleanupQueue) + + cleanup_hooks_.size() * sizeof(CleanupHookCallback); +} + +bool CleanupQueue::empty() const { + return cleanup_hooks_.empty(); +} + +void CleanupQueue::Add(Callback cb, void* arg) { + auto insertion_info = cleanup_hooks_.emplace( + CleanupHookCallback{cb, arg, cleanup_hook_counter_++}); + // Make sure there was no existing element with these values. + CHECK_EQ(insertion_info.second, true); +} + +void CleanupQueue::Remove(Callback cb, void* arg) { + CleanupHookCallback search{cb, arg, 0}; + cleanup_hooks_.erase(search); +} + +template +void CleanupQueue::ForEachBaseObject(T&& iterator) const { + for (const auto& hook : cleanup_hooks_) { + BaseObject* obj = GetBaseObject(hook); + if (obj != nullptr) iterator(obj); + } +} + +BaseObject* CleanupQueue::GetBaseObject( + const CleanupHookCallback& callback) const { + if (callback.fn_ == BaseObject::DeleteMe) + return static_cast(callback.arg_); + else + return nullptr; +} + +} // namespace node + +#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS + +#endif // SRC_CLEANUP_QUEUE_INL_H_ diff --git a/src/cleanup_queue.cc b/src/cleanup_queue.cc new file mode 100644 index 00000000000000..5235513f16c257 --- /dev/null +++ b/src/cleanup_queue.cc @@ -0,0 +1,44 @@ +#include "cleanup_queue.h" // NOLINT(build/include_inline) +#include +#include "cleanup_queue-inl.h" + +namespace node { + +void CleanupQueue::Drain() { + // Copy into a vector, since we can't sort an unordered_set in-place. + std::vector callbacks(cleanup_hooks_.begin(), + cleanup_hooks_.end()); + // We can't erase the copied elements from `cleanup_hooks_` yet, because we + // need to be able to check whether they were un-scheduled by another hook. + + std::sort(callbacks.begin(), + callbacks.end(), + [](const CleanupHookCallback& a, const CleanupHookCallback& b) { + // Sort in descending order so that the most recently inserted + // callbacks are run first. + return a.insertion_order_counter_ > b.insertion_order_counter_; + }); + + for (const CleanupHookCallback& cb : callbacks) { + if (cleanup_hooks_.count(cb) == 0) { + // This hook was removed from the `cleanup_hooks_` set during another + // hook that was run earlier. Nothing to do here. + continue; + } + + cb.fn_(cb.arg_); + cleanup_hooks_.erase(cb); + } +} + +size_t CleanupQueue::CleanupHookCallback::Hash::operator()( + const CleanupHookCallback& cb) const { + return std::hash()(cb.arg_); +} + +bool CleanupQueue::CleanupHookCallback::Equal::operator()( + const CleanupHookCallback& a, const CleanupHookCallback& b) const { + return a.fn_ == b.fn_ && a.arg_ == b.arg_; +} + +} // namespace node diff --git a/src/cleanup_queue.h b/src/cleanup_queue.h new file mode 100644 index 00000000000000..64e04e1856a197 --- /dev/null +++ b/src/cleanup_queue.h @@ -0,0 +1,83 @@ +#ifndef SRC_CLEANUP_QUEUE_H_ +#define SRC_CLEANUP_QUEUE_H_ + +#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS + +#include +#include +#include + +#include "memory_tracker.h" + +namespace node { + +class BaseObject; + +class CleanupQueue : public MemoryRetainer { + public: + typedef void (*Callback)(void*); + + CleanupQueue() {} + + // Not copyable. + CleanupQueue(const CleanupQueue&) = delete; + + SET_MEMORY_INFO_NAME(CleanupQueue) + inline void MemoryInfo(node::MemoryTracker* tracker) const override; + inline size_t SelfSize() const override; + + inline bool empty() const; + + inline void Add(Callback cb, void* arg); + inline void Remove(Callback cb, void* arg); + void Drain(); + + template + inline void ForEachBaseObject(T&& iterator) const; + + private: + class CleanupHookCallback { + public: + CleanupHookCallback(Callback fn, + void* arg, + uint64_t insertion_order_counter) + : fn_(fn), + arg_(arg), + insertion_order_counter_(insertion_order_counter) {} + + // Only hashes `arg_`, since that is usually enough to identify the hook. + struct Hash { + size_t operator()(const CleanupHookCallback& cb) const; + }; + + // Compares by `fn_` and `arg_` being equal. + struct Equal { + bool operator()(const CleanupHookCallback& a, + const CleanupHookCallback& b) const; + }; + + private: + friend class CleanupQueue; + Callback fn_; + void* arg_; + + // We keep track of the insertion order for these objects, so that we can + // call the callbacks in reverse order when we are cleaning up. + uint64_t insertion_order_counter_; + }; + + inline BaseObject* GetBaseObject(const CleanupHookCallback& callback) const; + + // Use an unordered_set, so that we have efficient insertion and removal. + std::unordered_set + cleanup_hooks_; + uint64_t cleanup_hook_counter_ = 0; +}; + +} // namespace node + +#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS + +#endif // SRC_CLEANUP_QUEUE_H_ diff --git a/src/env-inl.h b/src/env-inl.h index 59dee7761d4ea5..afa6981a80a33d 100644 --- a/src/env-inl.h +++ b/src/env-inl.h @@ -787,43 +787,17 @@ inline void Environment::ThrowUVException(int errorno, UVException(isolate(), errorno, syscall, message, path, dest)); } -void Environment::AddCleanupHook(CleanupCallback fn, void* arg) { - auto insertion_info = cleanup_hooks_.emplace(CleanupHookCallback { - fn, arg, cleanup_hook_counter_++ - }); - // Make sure there was no existing element with these values. - CHECK_EQ(insertion_info.second, true); -} - -void Environment::RemoveCleanupHook(CleanupCallback fn, void* arg) { - CleanupHookCallback search { fn, arg, 0 }; - cleanup_hooks_.erase(search); -} - -size_t CleanupHookCallback::Hash::operator()( - const CleanupHookCallback& cb) const { - return std::hash()(cb.arg_); +void Environment::AddCleanupHook(CleanupQueue::Callback fn, void* arg) { + cleanup_queue_.Add(fn, arg); } -bool CleanupHookCallback::Equal::operator()( - const CleanupHookCallback& a, const CleanupHookCallback& b) const { - return a.fn_ == b.fn_ && a.arg_ == b.arg_; -} - -BaseObject* CleanupHookCallback::GetBaseObject() const { - if (fn_ == BaseObject::DeleteMe) - return static_cast(arg_); - else - return nullptr; +void Environment::RemoveCleanupHook(CleanupQueue::Callback fn, void* arg) { + cleanup_queue_.Remove(fn, arg); } template void Environment::ForEachBaseObject(T&& iterator) { - for (const auto& hook : cleanup_hooks_) { - BaseObject* obj = hook.GetBaseObject(); - if (obj != nullptr) - iterator(obj); - } + cleanup_queue_.ForEachBaseObject(std::forward(iterator)); } template diff --git a/src/env.cc b/src/env.cc index 18bb41b83bd03a..01908740e0cbc5 100644 --- a/src/env.cc +++ b/src/env.cc @@ -1022,33 +1022,10 @@ void Environment::RunCleanup() { bindings_.clear(); CleanupHandles(); - while (!cleanup_hooks_.empty() || - native_immediates_.size() > 0 || + while (!cleanup_queue_.empty() || native_immediates_.size() > 0 || native_immediates_threadsafe_.size() > 0 || native_immediates_interrupts_.size() > 0) { - // Copy into a vector, since we can't sort an unordered_set in-place. - std::vector callbacks( - cleanup_hooks_.begin(), cleanup_hooks_.end()); - // We can't erase the copied elements from `cleanup_hooks_` yet, because we - // need to be able to check whether they were un-scheduled by another hook. - - std::sort(callbacks.begin(), callbacks.end(), - [](const CleanupHookCallback& a, const CleanupHookCallback& b) { - // Sort in descending order so that the most recently inserted callbacks - // are run first. - return a.insertion_order_counter_ > b.insertion_order_counter_; - }); - - for (const CleanupHookCallback& cb : callbacks) { - if (cleanup_hooks_.count(cb) == 0) { - // This hook was removed from the `cleanup_hooks_` set during another - // hook that was run earlier. Nothing to do here. - continue; - } - - cb.fn_(cb.arg_); - cleanup_hooks_.erase(cb); - } + cleanup_queue_.Drain(); CleanupHandles(); } @@ -1847,10 +1824,6 @@ void Environment::BuildEmbedderGraph(Isolate* isolate, MemoryTracker tracker(isolate, graph); Environment* env = static_cast(data); tracker.Track(env); - env->ForEachBaseObject([&](BaseObject* obj) { - if (obj->IsDoneInitializing()) - tracker.Track(obj); - }); } size_t Environment::NearHeapLimitCallback(void* data, @@ -1985,6 +1958,7 @@ inline size_t Environment::SelfSize() const { // this can be done for common types within the Track* calls automatically // if a certain scope is entered. size -= sizeof(async_hooks_); + size -= sizeof(cleanup_queue_); size -= sizeof(tick_info_); size -= sizeof(immediate_info_); return size; @@ -2002,8 +1976,7 @@ void Environment::MemoryInfo(MemoryTracker* tracker) const { tracker->TrackField("should_abort_on_uncaught_toggle", should_abort_on_uncaught_toggle_); tracker->TrackField("stream_base_state", stream_base_state_); - tracker->TrackFieldWithSize( - "cleanup_hooks", cleanup_hooks_.size() * sizeof(CleanupHookCallback)); + tracker->TrackField("cleanup_queue", cleanup_queue_); tracker->TrackField("async_hooks", async_hooks_); tracker->TrackField("immediate_info", immediate_info_); tracker->TrackField("tick_info", tick_info_); diff --git a/src/env.h b/src/env.h index 6e909092f7a16c..280a6c958ea38f 100644 --- a/src/env.h +++ b/src/env.h @@ -30,6 +30,7 @@ #include "inspector_profiler.h" #endif #include "callback_queue.h" +#include "cleanup_queue-inl.h" #include "debug_utils.h" #include "handle_wrap.h" #include "node.h" @@ -922,38 +923,6 @@ class ShouldNotAbortOnUncaughtScope { Environment* env_; }; -class CleanupHookCallback { - public: - typedef void (*Callback)(void*); - - CleanupHookCallback(Callback fn, - void* arg, - uint64_t insertion_order_counter) - : fn_(fn), arg_(arg), insertion_order_counter_(insertion_order_counter) {} - - // Only hashes `arg_`, since that is usually enough to identify the hook. - struct Hash { - inline size_t operator()(const CleanupHookCallback& cb) const; - }; - - // Compares by `fn_` and `arg_` being equal. - struct Equal { - inline bool operator()(const CleanupHookCallback& a, - const CleanupHookCallback& b) const; - }; - - inline BaseObject* GetBaseObject() const; - - private: - friend class Environment; - Callback fn_; - void* arg_; - - // We keep track of the insertion order for these objects, so that we can - // call the callbacks in reverse order when we are cleaning up. - uint64_t insertion_order_counter_; -}; - typedef void (*DeserializeRequestCallback)(v8::Local context, v8::Local holder, int index, @@ -1388,9 +1357,8 @@ class Environment : public MemoryRetainer { void ScheduleTimer(int64_t duration); void ToggleTimerRef(bool ref); - using CleanupCallback = CleanupHookCallback::Callback; - inline void AddCleanupHook(CleanupCallback cb, void* arg); - inline void RemoveCleanupHook(CleanupCallback cb, void* arg); + inline void AddCleanupHook(CleanupQueue::Callback cb, void* arg); + inline void RemoveCleanupHook(CleanupQueue::Callback cb, void* arg); void RunCleanup(); static size_t NearHeapLimitCallback(void* data, @@ -1598,11 +1566,7 @@ class Environment : public MemoryRetainer { BindingDataStore bindings_; - // Use an unordered_set, so that we have efficient insertion and removal. - std::unordered_set cleanup_hooks_; - uint64_t cleanup_hook_counter_ = 0; + CleanupQueue cleanup_queue_; bool started_cleanup_ = false; int64_t base_object_count_ = 0; diff --git a/test/common/heap.js b/test/common/heap.js index fa74e606f03ee0..2d1ea2c8179cd1 100644 --- a/test/common/heap.js +++ b/test/common/heap.js @@ -89,7 +89,7 @@ function inspectNode(snapshot) { } function isEdge(edge, { node_name, edge_name }) { - if (edge.name !== edge_name) { + if (edge_name !== undefined && edge.name !== edge_name) { return false; } // From our internal embedded graph diff --git a/test/pummel/test-heapdump-env.js b/test/pummel/test-heapdump-env.js index 7030271469c512..1d0b7fe824fd02 100644 --- a/test/pummel/test-heapdump-env.js +++ b/test/pummel/test-heapdump-env.js @@ -15,10 +15,16 @@ const context = require('vm').createScript('const foo = 123'); validateSnapshotNodes('Node / Environment', [{ children: [ - { node_name: 'Node / cleanup_hooks', edge_name: 'cleanup_hooks' }, + { node_name: 'Node / CleanupQueue', edge_name: 'cleanup_queue' }, { node_name: 'process', edge_name: 'process_object' }, { node_name: 'Node / IsolateData', edge_name: 'isolate_data' }, ] }]); +validateSnapshotNodes('Node / CleanupQueue', [{ + children: [ + { node_name: 'Node / ContextifyScript' }, + ] +}]); + console.log(context); // Make sure it's not GC'ed