mirror of
https://github.com/nodejs/node.git
synced 2025-08-15 13:48:44 +02:00
src: consolidate environment cleanup queue
Each Realm tracks its own cleanup hooks and drains the hooks when it is going to be destroyed. Moves the implementations of the cleanup queue to its own class so that it can be used in `node::Realm` too. PR-URL: https://github.com/nodejs/node/pull/44379 Refs: https://github.com/nodejs/node/pull/44348 Refs: https://github.com/nodejs/node/issues/42528 Reviewed-By: Joyee Cheung <joyeec9h3@gmail.com>
This commit is contained in:
parent
f0cf1005a3
commit
0b5b5edd86
10 changed files with 212 additions and 105 deletions
3
node.gyp
3
node.gyp
|
@ -466,6 +466,7 @@
|
|||
'src/api/utils.cc',
|
||||
'src/async_wrap.cc',
|
||||
'src/cares_wrap.cc',
|
||||
'src/cleanup_queue.cc',
|
||||
'src/connect_wrap.cc',
|
||||
'src/connection_wrap.cc',
|
||||
'src/debug_utils.cc',
|
||||
|
@ -567,6 +568,8 @@
|
|||
'src/base64-inl.h',
|
||||
'src/callback_queue.h',
|
||||
'src/callback_queue-inl.h',
|
||||
'src/cleanup_queue.h',
|
||||
'src/cleanup_queue-inl.h',
|
||||
'src/connect_wrap.h',
|
||||
'src/connection_wrap.h',
|
||||
'src/debug_utils.h',
|
||||
|
|
|
@ -178,7 +178,7 @@ class BaseObject : public MemoryRetainer {
|
|||
// position of members in memory are predictable. For more information please
|
||||
// refer to `doc/contributing/node-postmortem-support.md`
|
||||
friend int GenDebugSymbols();
|
||||
friend class CleanupHookCallback;
|
||||
friend class CleanupQueue;
|
||||
template <typename T, bool kIsWeak>
|
||||
friend class BaseObjectPtrImpl;
|
||||
|
||||
|
|
60
src/cleanup_queue-inl.h
Normal file
60
src/cleanup_queue-inl.h
Normal file
|
@ -0,0 +1,60 @@
|
|||
#ifndef SRC_CLEANUP_QUEUE_INL_H_
|
||||
#define SRC_CLEANUP_QUEUE_INL_H_
|
||||
|
||||
#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
|
||||
|
||||
#include "base_object.h"
|
||||
#include "cleanup_queue.h"
|
||||
#include "memory_tracker-inl.h"
|
||||
#include "util.h"
|
||||
|
||||
namespace node {
|
||||
|
||||
inline void CleanupQueue::MemoryInfo(MemoryTracker* tracker) const {
|
||||
ForEachBaseObject([&](BaseObject* obj) {
|
||||
if (obj->IsDoneInitializing()) tracker->Track(obj);
|
||||
});
|
||||
}
|
||||
|
||||
inline size_t CleanupQueue::SelfSize() const {
|
||||
return sizeof(CleanupQueue) +
|
||||
cleanup_hooks_.size() * sizeof(CleanupHookCallback);
|
||||
}
|
||||
|
||||
bool CleanupQueue::empty() const {
|
||||
return cleanup_hooks_.empty();
|
||||
}
|
||||
|
||||
void CleanupQueue::Add(Callback cb, void* arg) {
|
||||
auto insertion_info = cleanup_hooks_.emplace(
|
||||
CleanupHookCallback{cb, arg, cleanup_hook_counter_++});
|
||||
// Make sure there was no existing element with these values.
|
||||
CHECK_EQ(insertion_info.second, true);
|
||||
}
|
||||
|
||||
void CleanupQueue::Remove(Callback cb, void* arg) {
|
||||
CleanupHookCallback search{cb, arg, 0};
|
||||
cleanup_hooks_.erase(search);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void CleanupQueue::ForEachBaseObject(T&& iterator) const {
|
||||
for (const auto& hook : cleanup_hooks_) {
|
||||
BaseObject* obj = GetBaseObject(hook);
|
||||
if (obj != nullptr) iterator(obj);
|
||||
}
|
||||
}
|
||||
|
||||
BaseObject* CleanupQueue::GetBaseObject(
|
||||
const CleanupHookCallback& callback) const {
|
||||
if (callback.fn_ == BaseObject::DeleteMe)
|
||||
return static_cast<BaseObject*>(callback.arg_);
|
||||
else
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace node
|
||||
|
||||
#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
|
||||
|
||||
#endif // SRC_CLEANUP_QUEUE_INL_H_
|
44
src/cleanup_queue.cc
Normal file
44
src/cleanup_queue.cc
Normal file
|
@ -0,0 +1,44 @@
|
|||
#include "cleanup_queue.h" // NOLINT(build/include_inline)
|
||||
#include <vector>
|
||||
#include "cleanup_queue-inl.h"
|
||||
|
||||
namespace node {
|
||||
|
||||
void CleanupQueue::Drain() {
|
||||
// Copy into a vector, since we can't sort an unordered_set in-place.
|
||||
std::vector<CleanupHookCallback> callbacks(cleanup_hooks_.begin(),
|
||||
cleanup_hooks_.end());
|
||||
// We can't erase the copied elements from `cleanup_hooks_` yet, because we
|
||||
// need to be able to check whether they were un-scheduled by another hook.
|
||||
|
||||
std::sort(callbacks.begin(),
|
||||
callbacks.end(),
|
||||
[](const CleanupHookCallback& a, const CleanupHookCallback& b) {
|
||||
// Sort in descending order so that the most recently inserted
|
||||
// callbacks are run first.
|
||||
return a.insertion_order_counter_ > b.insertion_order_counter_;
|
||||
});
|
||||
|
||||
for (const CleanupHookCallback& cb : callbacks) {
|
||||
if (cleanup_hooks_.count(cb) == 0) {
|
||||
// This hook was removed from the `cleanup_hooks_` set during another
|
||||
// hook that was run earlier. Nothing to do here.
|
||||
continue;
|
||||
}
|
||||
|
||||
cb.fn_(cb.arg_);
|
||||
cleanup_hooks_.erase(cb);
|
||||
}
|
||||
}
|
||||
|
||||
size_t CleanupQueue::CleanupHookCallback::Hash::operator()(
|
||||
const CleanupHookCallback& cb) const {
|
||||
return std::hash<void*>()(cb.arg_);
|
||||
}
|
||||
|
||||
bool CleanupQueue::CleanupHookCallback::Equal::operator()(
|
||||
const CleanupHookCallback& a, const CleanupHookCallback& b) const {
|
||||
return a.fn_ == b.fn_ && a.arg_ == b.arg_;
|
||||
}
|
||||
|
||||
} // namespace node
|
83
src/cleanup_queue.h
Normal file
83
src/cleanup_queue.h
Normal file
|
@ -0,0 +1,83 @@
|
|||
#ifndef SRC_CLEANUP_QUEUE_H_
|
||||
#define SRC_CLEANUP_QUEUE_H_
|
||||
|
||||
#if defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <unordered_set>
|
||||
|
||||
#include "memory_tracker.h"
|
||||
|
||||
namespace node {
|
||||
|
||||
class BaseObject;
|
||||
|
||||
class CleanupQueue : public MemoryRetainer {
|
||||
public:
|
||||
typedef void (*Callback)(void*);
|
||||
|
||||
CleanupQueue() {}
|
||||
|
||||
// Not copyable.
|
||||
CleanupQueue(const CleanupQueue&) = delete;
|
||||
|
||||
SET_MEMORY_INFO_NAME(CleanupQueue)
|
||||
inline void MemoryInfo(node::MemoryTracker* tracker) const override;
|
||||
inline size_t SelfSize() const override;
|
||||
|
||||
inline bool empty() const;
|
||||
|
||||
inline void Add(Callback cb, void* arg);
|
||||
inline void Remove(Callback cb, void* arg);
|
||||
void Drain();
|
||||
|
||||
template <typename T>
|
||||
inline void ForEachBaseObject(T&& iterator) const;
|
||||
|
||||
private:
|
||||
class CleanupHookCallback {
|
||||
public:
|
||||
CleanupHookCallback(Callback fn,
|
||||
void* arg,
|
||||
uint64_t insertion_order_counter)
|
||||
: fn_(fn),
|
||||
arg_(arg),
|
||||
insertion_order_counter_(insertion_order_counter) {}
|
||||
|
||||
// Only hashes `arg_`, since that is usually enough to identify the hook.
|
||||
struct Hash {
|
||||
size_t operator()(const CleanupHookCallback& cb) const;
|
||||
};
|
||||
|
||||
// Compares by `fn_` and `arg_` being equal.
|
||||
struct Equal {
|
||||
bool operator()(const CleanupHookCallback& a,
|
||||
const CleanupHookCallback& b) const;
|
||||
};
|
||||
|
||||
private:
|
||||
friend class CleanupQueue;
|
||||
Callback fn_;
|
||||
void* arg_;
|
||||
|
||||
// We keep track of the insertion order for these objects, so that we can
|
||||
// call the callbacks in reverse order when we are cleaning up.
|
||||
uint64_t insertion_order_counter_;
|
||||
};
|
||||
|
||||
inline BaseObject* GetBaseObject(const CleanupHookCallback& callback) const;
|
||||
|
||||
// Use an unordered_set, so that we have efficient insertion and removal.
|
||||
std::unordered_set<CleanupHookCallback,
|
||||
CleanupHookCallback::Hash,
|
||||
CleanupHookCallback::Equal>
|
||||
cleanup_hooks_;
|
||||
uint64_t cleanup_hook_counter_ = 0;
|
||||
};
|
||||
|
||||
} // namespace node
|
||||
|
||||
#endif // defined(NODE_WANT_INTERNALS) && NODE_WANT_INTERNALS
|
||||
|
||||
#endif // SRC_CLEANUP_QUEUE_H_
|
|
@ -781,43 +781,17 @@ inline void Environment::ThrowUVException(int errorno,
|
|||
UVException(isolate(), errorno, syscall, message, path, dest));
|
||||
}
|
||||
|
||||
void Environment::AddCleanupHook(CleanupCallback fn, void* arg) {
|
||||
auto insertion_info = cleanup_hooks_.emplace(CleanupHookCallback {
|
||||
fn, arg, cleanup_hook_counter_++
|
||||
});
|
||||
// Make sure there was no existing element with these values.
|
||||
CHECK_EQ(insertion_info.second, true);
|
||||
void Environment::AddCleanupHook(CleanupQueue::Callback fn, void* arg) {
|
||||
cleanup_queue_.Add(fn, arg);
|
||||
}
|
||||
|
||||
void Environment::RemoveCleanupHook(CleanupCallback fn, void* arg) {
|
||||
CleanupHookCallback search { fn, arg, 0 };
|
||||
cleanup_hooks_.erase(search);
|
||||
}
|
||||
|
||||
size_t CleanupHookCallback::Hash::operator()(
|
||||
const CleanupHookCallback& cb) const {
|
||||
return std::hash<void*>()(cb.arg_);
|
||||
}
|
||||
|
||||
bool CleanupHookCallback::Equal::operator()(
|
||||
const CleanupHookCallback& a, const CleanupHookCallback& b) const {
|
||||
return a.fn_ == b.fn_ && a.arg_ == b.arg_;
|
||||
}
|
||||
|
||||
BaseObject* CleanupHookCallback::GetBaseObject() const {
|
||||
if (fn_ == BaseObject::DeleteMe)
|
||||
return static_cast<BaseObject*>(arg_);
|
||||
else
|
||||
return nullptr;
|
||||
void Environment::RemoveCleanupHook(CleanupQueue::Callback fn, void* arg) {
|
||||
cleanup_queue_.Remove(fn, arg);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void Environment::ForEachBaseObject(T&& iterator) {
|
||||
for (const auto& hook : cleanup_hooks_) {
|
||||
BaseObject* obj = hook.GetBaseObject();
|
||||
if (obj != nullptr)
|
||||
iterator(obj);
|
||||
}
|
||||
cleanup_queue_.ForEachBaseObject(std::forward<T>(iterator));
|
||||
}
|
||||
|
||||
void Environment::modify_base_object_count(int64_t delta) {
|
||||
|
|
35
src/env.cc
35
src/env.cc
|
@ -1000,33 +1000,10 @@ void Environment::RunCleanup() {
|
|||
bindings_.clear();
|
||||
CleanupHandles();
|
||||
|
||||
while (!cleanup_hooks_.empty() ||
|
||||
native_immediates_.size() > 0 ||
|
||||
while (!cleanup_queue_.empty() || native_immediates_.size() > 0 ||
|
||||
native_immediates_threadsafe_.size() > 0 ||
|
||||
native_immediates_interrupts_.size() > 0) {
|
||||
// Copy into a vector, since we can't sort an unordered_set in-place.
|
||||
std::vector<CleanupHookCallback> callbacks(
|
||||
cleanup_hooks_.begin(), cleanup_hooks_.end());
|
||||
// We can't erase the copied elements from `cleanup_hooks_` yet, because we
|
||||
// need to be able to check whether they were un-scheduled by another hook.
|
||||
|
||||
std::sort(callbacks.begin(), callbacks.end(),
|
||||
[](const CleanupHookCallback& a, const CleanupHookCallback& b) {
|
||||
// Sort in descending order so that the most recently inserted callbacks
|
||||
// are run first.
|
||||
return a.insertion_order_counter_ > b.insertion_order_counter_;
|
||||
});
|
||||
|
||||
for (const CleanupHookCallback& cb : callbacks) {
|
||||
if (cleanup_hooks_.count(cb) == 0) {
|
||||
// This hook was removed from the `cleanup_hooks_` set during another
|
||||
// hook that was run earlier. Nothing to do here.
|
||||
continue;
|
||||
}
|
||||
|
||||
cb.fn_(cb.arg_);
|
||||
cleanup_hooks_.erase(cb);
|
||||
}
|
||||
cleanup_queue_.Drain();
|
||||
CleanupHandles();
|
||||
}
|
||||
|
||||
|
@ -1730,10 +1707,6 @@ void Environment::BuildEmbedderGraph(Isolate* isolate,
|
|||
MemoryTracker tracker(isolate, graph);
|
||||
Environment* env = static_cast<Environment*>(data);
|
||||
tracker.Track(env);
|
||||
env->ForEachBaseObject([&](BaseObject* obj) {
|
||||
if (obj->IsDoneInitializing())
|
||||
tracker.Track(obj);
|
||||
});
|
||||
}
|
||||
|
||||
size_t Environment::NearHeapLimitCallback(void* data,
|
||||
|
@ -1868,6 +1841,7 @@ inline size_t Environment::SelfSize() const {
|
|||
// this can be done for common types within the Track* calls automatically
|
||||
// if a certain scope is entered.
|
||||
size -= sizeof(async_hooks_);
|
||||
size -= sizeof(cleanup_queue_);
|
||||
size -= sizeof(tick_info_);
|
||||
size -= sizeof(immediate_info_);
|
||||
return size;
|
||||
|
@ -1885,8 +1859,7 @@ void Environment::MemoryInfo(MemoryTracker* tracker) const {
|
|||
tracker->TrackField("should_abort_on_uncaught_toggle",
|
||||
should_abort_on_uncaught_toggle_);
|
||||
tracker->TrackField("stream_base_state", stream_base_state_);
|
||||
tracker->TrackFieldWithSize(
|
||||
"cleanup_hooks", cleanup_hooks_.size() * sizeof(CleanupHookCallback));
|
||||
tracker->TrackField("cleanup_queue", cleanup_queue_);
|
||||
tracker->TrackField("async_hooks", async_hooks_);
|
||||
tracker->TrackField("immediate_info", immediate_info_);
|
||||
tracker->TrackField("tick_info", tick_info_);
|
||||
|
|
44
src/env.h
44
src/env.h
|
@ -30,6 +30,7 @@
|
|||
#include "inspector_profiler.h"
|
||||
#endif
|
||||
#include "callback_queue.h"
|
||||
#include "cleanup_queue-inl.h"
|
||||
#include "debug_utils.h"
|
||||
#include "env_properties.h"
|
||||
#include "handle_wrap.h"
|
||||
|
@ -492,38 +493,6 @@ class ShouldNotAbortOnUncaughtScope {
|
|||
Environment* env_;
|
||||
};
|
||||
|
||||
class CleanupHookCallback {
|
||||
public:
|
||||
typedef void (*Callback)(void*);
|
||||
|
||||
CleanupHookCallback(Callback fn,
|
||||
void* arg,
|
||||
uint64_t insertion_order_counter)
|
||||
: fn_(fn), arg_(arg), insertion_order_counter_(insertion_order_counter) {}
|
||||
|
||||
// Only hashes `arg_`, since that is usually enough to identify the hook.
|
||||
struct Hash {
|
||||
inline size_t operator()(const CleanupHookCallback& cb) const;
|
||||
};
|
||||
|
||||
// Compares by `fn_` and `arg_` being equal.
|
||||
struct Equal {
|
||||
inline bool operator()(const CleanupHookCallback& a,
|
||||
const CleanupHookCallback& b) const;
|
||||
};
|
||||
|
||||
inline BaseObject* GetBaseObject() const;
|
||||
|
||||
private:
|
||||
friend class Environment;
|
||||
Callback fn_;
|
||||
void* arg_;
|
||||
|
||||
// We keep track of the insertion order for these objects, so that we can
|
||||
// call the callbacks in reverse order when we are cleaning up.
|
||||
uint64_t insertion_order_counter_;
|
||||
};
|
||||
|
||||
typedef void (*DeserializeRequestCallback)(v8::Local<v8::Context> context,
|
||||
v8::Local<v8::Object> holder,
|
||||
int index,
|
||||
|
@ -990,9 +959,8 @@ class Environment : public MemoryRetainer {
|
|||
void ScheduleTimer(int64_t duration);
|
||||
void ToggleTimerRef(bool ref);
|
||||
|
||||
using CleanupCallback = CleanupHookCallback::Callback;
|
||||
inline void AddCleanupHook(CleanupCallback cb, void* arg);
|
||||
inline void RemoveCleanupHook(CleanupCallback cb, void* arg);
|
||||
inline void AddCleanupHook(CleanupQueue::Callback cb, void* arg);
|
||||
inline void RemoveCleanupHook(CleanupQueue::Callback cb, void* arg);
|
||||
void RunCleanup();
|
||||
|
||||
static size_t NearHeapLimitCallback(void* data,
|
||||
|
@ -1208,11 +1176,7 @@ class Environment : public MemoryRetainer {
|
|||
|
||||
BindingDataStore bindings_;
|
||||
|
||||
// Use an unordered_set, so that we have efficient insertion and removal.
|
||||
std::unordered_set<CleanupHookCallback,
|
||||
CleanupHookCallback::Hash,
|
||||
CleanupHookCallback::Equal> cleanup_hooks_;
|
||||
uint64_t cleanup_hook_counter_ = 0;
|
||||
CleanupQueue cleanup_queue_;
|
||||
bool started_cleanup_ = false;
|
||||
|
||||
int64_t base_object_count_ = 0;
|
||||
|
|
|
@ -89,7 +89,7 @@ function inspectNode(snapshot) {
|
|||
}
|
||||
|
||||
function isEdge(edge, { node_name, edge_name }) {
|
||||
if (edge.name !== edge_name) {
|
||||
if (edge_name !== undefined && edge.name !== edge_name) {
|
||||
return false;
|
||||
}
|
||||
// From our internal embedded graph
|
||||
|
|
|
@ -15,12 +15,18 @@ const context = require('vm').createScript('const foo = 123');
|
|||
|
||||
validateSnapshotNodes('Node / Environment', [{
|
||||
children: [
|
||||
{ node_name: 'Node / cleanup_hooks', edge_name: 'cleanup_hooks' },
|
||||
{ node_name: 'Node / CleanupQueue', edge_name: 'cleanup_queue' },
|
||||
{ node_name: 'Node / IsolateData', edge_name: 'isolate_data' },
|
||||
{ node_name: 'Node / Realm', edge_name: 'principal_realm' },
|
||||
]
|
||||
}]);
|
||||
|
||||
validateSnapshotNodes('Node / CleanupQueue', [{
|
||||
children: [
|
||||
{ node_name: 'Node / ContextifyScript' },
|
||||
]
|
||||
}]);
|
||||
|
||||
validateSnapshotNodes('Node / Realm', [{
|
||||
children: [
|
||||
{ node_name: 'process', edge_name: 'process_object' },
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue