GCC Code Coverage Report | |||||||||||||||||||||
|
|||||||||||||||||||||
Line | Branch | Exec | Source |
1 |
#include "env.h" |
||
2 |
#include "async_wrap.h" |
||
3 |
#include "base_object-inl.h" |
||
4 |
#include "debug_utils-inl.h" |
||
5 |
#include "diagnosticfilename-inl.h" |
||
6 |
#include "memory_tracker-inl.h" |
||
7 |
#include "node_buffer.h" |
||
8 |
#include "node_context_data.h" |
||
9 |
#include "node_contextify.h" |
||
10 |
#include "node_errors.h" |
||
11 |
#include "node_internals.h" |
||
12 |
#include "node_options-inl.h" |
||
13 |
#include "node_process-inl.h" |
||
14 |
#include "node_v8_platform-inl.h" |
||
15 |
#include "node_worker.h" |
||
16 |
#include "req_wrap-inl.h" |
||
17 |
#include "stream_base.h" |
||
18 |
#include "tracing/agent.h" |
||
19 |
#include "tracing/traced_value.h" |
||
20 |
#include "util-inl.h" |
||
21 |
#include "v8-profiler.h" |
||
22 |
|||
23 |
#include <algorithm> |
||
24 |
#include <atomic> |
||
25 |
#include <cinttypes> |
||
26 |
#include <cstdio> |
||
27 |
#include <iostream> |
||
28 |
#include <limits> |
||
29 |
#include <memory> |
||
30 |
|||
31 |
namespace node { |
||
32 |
|||
33 |
using errors::TryCatchScope; |
||
34 |
using v8::Array; |
||
35 |
using v8::Boolean; |
||
36 |
using v8::Context; |
||
37 |
using v8::EmbedderGraph; |
||
38 |
using v8::EscapableHandleScope; |
||
39 |
using v8::Function; |
||
40 |
using v8::FunctionCallbackInfo; |
||
41 |
using v8::FunctionTemplate; |
||
42 |
using v8::HandleScope; |
||
43 |
using v8::HeapSpaceStatistics; |
||
44 |
using v8::Integer; |
||
45 |
using v8::Isolate; |
||
46 |
using v8::Local; |
||
47 |
using v8::MaybeLocal; |
||
48 |
using v8::NewStringType; |
||
49 |
using v8::Number; |
||
50 |
using v8::Object; |
||
51 |
using v8::Private; |
||
52 |
using v8::Script; |
||
53 |
using v8::SnapshotCreator; |
||
54 |
using v8::StackTrace; |
||
55 |
using v8::String; |
||
56 |
using v8::Symbol; |
||
57 |
using v8::TracingController; |
||
58 |
using v8::TryCatch; |
||
59 |
using v8::Undefined; |
||
60 |
using v8::Value; |
||
61 |
using v8::WeakCallbackInfo; |
||
62 |
using v8::WeakCallbackType; |
||
63 |
using worker::Worker; |
||
64 |
|||
65 |
int const ContextEmbedderTag::kNodeContextTag = 0x6e6f64; |
||
66 |
void* const ContextEmbedderTag::kNodeContextTagPtr = const_cast<void*>( |
||
67 |
static_cast<const void*>(&ContextEmbedderTag::kNodeContextTag)); |
||
68 |
|||
69 |
16350 |
void AsyncHooks::SetJSPromiseHooks(Local<Function> init, |
|
70 |
Local<Function> before, |
||
71 |
Local<Function> after, |
||
72 |
Local<Function> resolve) { |
||
73 |
16350 |
js_promise_hooks_[0].Reset(env()->isolate(), init); |
|
74 |
16350 |
js_promise_hooks_[1].Reset(env()->isolate(), before); |
|
75 |
16350 |
js_promise_hooks_[2].Reset(env()->isolate(), after); |
|
76 |
16350 |
js_promise_hooks_[3].Reset(env()->isolate(), resolve); |
|
77 |
✓✓ | 33045 |
for (auto it = contexts_.begin(); it != contexts_.end(); it++) { |
78 |
✗✓ | 16695 |
if (it->IsEmpty()) { |
79 |
contexts_.erase(it--); |
||
80 |
continue; |
||
81 |
} |
||
82 |
33390 |
PersistentToLocal::Weak(env()->isolate(), *it) |
|
83 |
16695 |
->SetPromiseHooks(init, before, after, resolve); |
|
84 |
} |
||
85 |
16350 |
} |
|
86 |
|||
87 |
// Remember to keep this code aligned with pushAsyncContext() in JS. |
||
88 |
842024 |
void AsyncHooks::push_async_context(double async_id, |
|
89 |
double trigger_async_id, |
||
90 |
Local<Object> resource) { |
||
91 |
// Since async_hooks is experimental, do only perform the check |
||
92 |
// when async_hooks is enabled. |
||
93 |
✓✓ | 842024 |
if (fields_[kCheck] > 0) { |
94 |
✗✓ | 842020 |
CHECK_GE(async_id, -1); |
95 |
✗✓ | 842020 |
CHECK_GE(trigger_async_id, -1); |
96 |
} |
||
97 |
|||
98 |
842024 |
uint32_t offset = fields_[kStackLength]; |
|
99 |
✓✓ | 842024 |
if (offset * 2 >= async_ids_stack_.Length()) grow_async_ids_stack(); |
100 |
842024 |
async_ids_stack_[2 * offset] = async_id_fields_[kExecutionAsyncId]; |
|
101 |
842024 |
async_ids_stack_[2 * offset + 1] = async_id_fields_[kTriggerAsyncId]; |
|
102 |
842024 |
fields_[kStackLength] += 1; |
|
103 |
842024 |
async_id_fields_[kExecutionAsyncId] = async_id; |
|
104 |
842024 |
async_id_fields_[kTriggerAsyncId] = trigger_async_id; |
|
105 |
|||
106 |
#ifdef DEBUG |
||
107 |
for (uint32_t i = offset; i < native_execution_async_resources_.size(); i++) |
||
108 |
CHECK(native_execution_async_resources_[i].IsEmpty()); |
||
109 |
#endif |
||
110 |
|||
111 |
// When this call comes from JS (as a way of increasing the stack size), |
||
112 |
// `resource` will be empty, because JS caches these values anyway. |
||
113 |
✓✓ | 842024 |
if (!resource.IsEmpty()) { |
114 |
842020 |
native_execution_async_resources_.resize(offset + 1); |
|
115 |
// Caveat: This is a v8::Local<> assignment, we do not keep a v8::Global<>! |
||
116 |
842020 |
native_execution_async_resources_[offset] = resource; |
|
117 |
} |
||
118 |
842024 |
} |
|
119 |
|||
120 |
// Remember to keep this code aligned with popAsyncContext() in JS. |
||
121 |
841614 |
bool AsyncHooks::pop_async_context(double async_id) { |
|
122 |
// In case of an exception then this may have already been reset, if the |
||
123 |
// stack was multiple MakeCallback()'s deep. |
||
124 |
✓✓ | 841614 |
if (UNLIKELY(fields_[kStackLength] == 0)) return false; |
125 |
|||
126 |
// Ask for the async_id to be restored as a check that the stack |
||
127 |
// hasn't been corrupted. |
||
128 |
1681112 |
if (UNLIKELY(fields_[kCheck] > 0 && |
|
129 |
✓✓✓✓ ✓✓ |
1681112 |
async_id_fields_[kExecutionAsyncId] != async_id)) { |
130 |
4 |
FailWithCorruptedAsyncStack(async_id); |
|
131 |
} |
||
132 |
|||
133 |
840554 |
uint32_t offset = fields_[kStackLength] - 1; |
|
134 |
840554 |
async_id_fields_[kExecutionAsyncId] = async_ids_stack_[2 * offset]; |
|
135 |
840554 |
async_id_fields_[kTriggerAsyncId] = async_ids_stack_[2 * offset + 1]; |
|
136 |
840554 |
fields_[kStackLength] = offset; |
|
137 |
|||
138 |
1681108 |
if (LIKELY(offset < native_execution_async_resources_.size() && |
|
139 |
✓✗✓✗ ✓✗ |
1681108 |
!native_execution_async_resources_[offset].IsEmpty())) { |
140 |
#ifdef DEBUG |
||
141 |
for (uint32_t i = offset + 1; i < native_execution_async_resources_.size(); |
||
142 |
i++) { |
||
143 |
CHECK(native_execution_async_resources_[i].IsEmpty()); |
||
144 |
} |
||
145 |
#endif |
||
146 |
840554 |
native_execution_async_resources_.resize(offset); |
|
147 |
840554 |
if (native_execution_async_resources_.size() < |
|
148 |
✓✓✗✓ ✗✓ |
1102812 |
native_execution_async_resources_.capacity() / 2 && |
149 |
262258 |
native_execution_async_resources_.size() > 16) { |
|
150 |
native_execution_async_resources_.shrink_to_fit(); |
||
151 |
} |
||
152 |
} |
||
153 |
|||
154 |
✓✓ | 1681108 |
if (UNLIKELY(js_execution_async_resources()->Length() > offset)) { |
155 |
42059 |
HandleScope handle_scope(env()->isolate()); |
|
156 |
84118 |
USE(js_execution_async_resources()->Set( |
|
157 |
env()->context(), |
||
158 |
env()->length_string(), |
||
159 |
168236 |
Integer::NewFromUnsigned(env()->isolate(), offset))); |
|
160 |
} |
||
161 |
|||
162 |
840554 |
return fields_[kStackLength] > 0; |
|
163 |
} |
||
164 |
|||
165 |
2338 |
void AsyncHooks::clear_async_id_stack() { |
|
166 |
2338 |
Isolate* isolate = env()->isolate(); |
|
167 |
2338 |
HandleScope handle_scope(isolate); |
|
168 |
✓✓ | 2338 |
if (!js_execution_async_resources_.IsEmpty()) { |
169 |
3100 |
USE(PersistentToLocal::Strong(js_execution_async_resources_) |
|
170 |
3100 |
->Set(env()->context(), |
|
171 |
env()->length_string(), |
||
172 |
6200 |
Integer::NewFromUnsigned(isolate, 0))); |
|
173 |
} |
||
174 |
2338 |
native_execution_async_resources_.clear(); |
|
175 |
2338 |
native_execution_async_resources_.shrink_to_fit(); |
|
176 |
|||
177 |
2338 |
async_id_fields_[kExecutionAsyncId] = 0; |
|
178 |
2338 |
async_id_fields_[kTriggerAsyncId] = 0; |
|
179 |
2338 |
fields_[kStackLength] = 0; |
|
180 |
2338 |
} |
|
181 |
|||
182 |
6952 |
void AsyncHooks::AddContext(Local<Context> ctx) { |
|
183 |
✓✓ | 20856 |
ctx->SetPromiseHooks(js_promise_hooks_[0].IsEmpty() |
184 |
6952 |
? Local<Function>() |
|
185 |
205 |
: PersistentToLocal::Strong(js_promise_hooks_[0]), |
|
186 |
✓✓ | 6952 |
js_promise_hooks_[1].IsEmpty() |
187 |
6952 |
? Local<Function>() |
|
188 |
205 |
: PersistentToLocal::Strong(js_promise_hooks_[1]), |
|
189 |
✓✓ | 6952 |
js_promise_hooks_[2].IsEmpty() |
190 |
6952 |
? Local<Function>() |
|
191 |
205 |
: PersistentToLocal::Strong(js_promise_hooks_[2]), |
|
192 |
✓✗ | 6952 |
js_promise_hooks_[3].IsEmpty() |
193 |
6952 |
? Local<Function>() |
|
194 |
: PersistentToLocal::Strong(js_promise_hooks_[3])); |
||
195 |
|||
196 |
6952 |
size_t id = contexts_.size(); |
|
197 |
6952 |
contexts_.resize(id + 1); |
|
198 |
6952 |
contexts_[id].Reset(env()->isolate(), ctx); |
|
199 |
6952 |
contexts_[id].SetWeak(); |
|
200 |
6952 |
} |
|
201 |
|||
202 |
530 |
void AsyncHooks::RemoveContext(Local<Context> ctx) { |
|
203 |
530 |
Isolate* isolate = env()->isolate(); |
|
204 |
1060 |
HandleScope handle_scope(isolate); |
|
205 |
530 |
contexts_.erase(std::remove_if(contexts_.begin(), |
|
206 |
contexts_.end(), |
||
207 |
4762 |
[&](auto&& el) { return el.IsEmpty(); }), |
|
208 |
1060 |
contexts_.end()); |
|
209 |
✓✓ | 4700 |
for (auto it = contexts_.begin(); it != contexts_.end(); it++) { |
210 |
4170 |
Local<Context> saved_context = PersistentToLocal::Weak(isolate, *it); |
|
211 |
✗✓ | 4170 |
if (saved_context == ctx) { |
212 |
it->Reset(); |
||
213 |
contexts_.erase(it); |
||
214 |
break; |
||
215 |
} |
||
216 |
} |
||
217 |
530 |
} |
|
218 |
|||
219 |
240735 |
AsyncHooks::DefaultTriggerAsyncIdScope::DefaultTriggerAsyncIdScope( |
|
220 |
240735 |
Environment* env, double default_trigger_async_id) |
|
221 |
240735 |
: async_hooks_(env->async_hooks()) { |
|
222 |
✓✗ | 240735 |
if (env->async_hooks()->fields()[AsyncHooks::kCheck] > 0) { |
223 |
✗✓ | 240735 |
CHECK_GE(default_trigger_async_id, 0); |
224 |
} |
||
225 |
|||
226 |
240735 |
old_default_trigger_async_id_ = |
|
227 |
240735 |
async_hooks_->async_id_fields()[AsyncHooks::kDefaultTriggerAsyncId]; |
|
228 |
240735 |
async_hooks_->async_id_fields()[AsyncHooks::kDefaultTriggerAsyncId] = |
|
229 |
240735 |
default_trigger_async_id; |
|
230 |
240735 |
} |
|
231 |
|||
232 |
481468 |
AsyncHooks::DefaultTriggerAsyncIdScope::~DefaultTriggerAsyncIdScope() { |
|
233 |
240734 |
async_hooks_->async_id_fields()[AsyncHooks::kDefaultTriggerAsyncId] = |
|
234 |
240734 |
old_default_trigger_async_id_; |
|
235 |
240734 |
} |
|
236 |
|||
237 |
240735 |
AsyncHooks::DefaultTriggerAsyncIdScope::DefaultTriggerAsyncIdScope( |
|
238 |
240735 |
AsyncWrap* async_wrap) |
|
239 |
: DefaultTriggerAsyncIdScope(async_wrap->env(), |
||
240 |
240735 |
async_wrap->get_async_id()) {} |
|
241 |
|||
242 |
12 |
std::ostream& operator<<(std::ostream& output, |
|
243 |
const std::vector<SnapshotIndex>& v) { |
||
244 |
12 |
output << "{ "; |
|
245 |
✓✓ | 2142 |
for (const SnapshotIndex i : v) { |
246 |
2130 |
output << i << ", "; |
|
247 |
} |
||
248 |
12 |
output << " }"; |
|
249 |
12 |
return output; |
|
250 |
} |
||
251 |
|||
252 |
6 |
std::ostream& operator<<(std::ostream& output, |
|
253 |
const IsolateDataSerializeInfo& i) { |
||
254 |
output << "{\n" |
||
255 |
6 |
<< "// -- primitive begins --\n" |
|
256 |
6 |
<< i.primitive_values << ",\n" |
|
257 |
<< "// -- primitive ends --\n" |
||
258 |
6 |
<< "// -- template_values begins --\n" |
|
259 |
6 |
<< i.template_values << ",\n" |
|
260 |
<< "// -- template_values ends --\n" |
||
261 |
6 |
<< "}"; |
|
262 |
6 |
return output; |
|
263 |
} |
||
264 |
|||
265 |
6 |
std::ostream& operator<<(std::ostream& output, const SnapshotMetadata& i) { |
|
266 |
output << "{\n" |
||
267 |
<< " " |
||
268 |
6 |
<< (i.type == SnapshotMetadata::Type::kDefault |
|
269 |
? "SnapshotMetadata::Type::kDefault" |
||
270 |
: "SnapshotMetadata::Type::kFullyCustomized") |
||
271 |
<< ", // type\n" |
||
272 |
6 |
<< " \"" << i.node_version << "\", // node_version\n" |
|
273 |
6 |
<< " \"" << i.node_arch << "\", // node_arch\n" |
|
274 |
6 |
<< " \"" << i.node_platform << "\", // node_platform\n" |
|
275 |
✓✗ | 6 |
<< " " << i.v8_cache_version_tag << ", // v8_cache_version_tag\n" |
276 |
6 |
<< "}"; |
|
277 |
6 |
return output; |
|
278 |
} |
||
279 |
|||
280 |
6 |
IsolateDataSerializeInfo IsolateData::Serialize(SnapshotCreator* creator) { |
|
281 |
6 |
Isolate* isolate = creator->GetIsolate(); |
|
282 |
6 |
IsolateDataSerializeInfo info; |
|
283 |
12 |
HandleScope handle_scope(isolate); |
|
284 |
// XXX(joyeecheung): technically speaking, the indexes here should be |
||
285 |
// consecutive and we could just return a range instead of an array, |
||
286 |
// but that's not part of the V8 API contract so we use an array |
||
287 |
// just to be safe. |
||
288 |
|||
289 |
#define VP(PropertyName, StringValue) V(Private, PropertyName) |
||
290 |
#define VY(PropertyName, StringValue) V(Symbol, PropertyName) |
||
291 |
#define VS(PropertyName, StringValue) V(String, PropertyName) |
||
292 |
#define V(TypeName, PropertyName) \ |
||
293 |
info.primitive_values.push_back( \ |
||
294 |
creator->AddData(PropertyName##_.Get(isolate))); |
||
295 |
60 |
PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(VP) |
|
296 |
78 |
PER_ISOLATE_SYMBOL_PROPERTIES(VY) |
|
297 |
1662 |
PER_ISOLATE_STRING_PROPERTIES(VS) |
|
298 |
#undef V |
||
299 |
#undef VY |
||
300 |
#undef VS |
||
301 |
#undef VP |
||
302 |
|||
303 |
✓✓ | 354 |
for (size_t i = 0; i < AsyncWrap::PROVIDERS_LENGTH; i++) |
304 |
696 |
info.primitive_values.push_back(creator->AddData(async_wrap_provider(i))); |
|
305 |
|||
306 |
6 |
uint32_t id = 0; |
|
307 |
#define V(PropertyName, TypeName) \ |
||
308 |
do { \ |
||
309 |
Local<TypeName> field = PropertyName(); \ |
||
310 |
if (!field.IsEmpty()) { \ |
||
311 |
size_t index = creator->AddData(field); \ |
||
312 |
info.template_values.push_back({#PropertyName, id, index}); \ |
||
313 |
} \ |
||
314 |
id++; \ |
||
315 |
} while (0); |
||
316 |
✓✗✗✓ ✓✗✓✗ ✗✓✗✓ ✓✗✓✗ ✓✗✓✗ ✓✗✓✗ ✓✗✓✗ ✓✗✗✓ ✗✓✗✓ ✓✗✗✓ ✗✓✓✗ ✓✗✗✓ ✗✓✗✓ ✓✗✓✗ ✓✗✗✓ ✗✓✗✓ ✗✓✗✓ ✓✗✓✗ ✗✓ |
348 |
PER_ISOLATE_TEMPLATE_PROPERTIES(V) |
317 |
#undef V |
||
318 |
|||
319 |
6 |
return info; |
|
320 |
} |
||
321 |
|||
322 |
5521 |
void IsolateData::DeserializeProperties(const IsolateDataSerializeInfo* info) { |
|
323 |
5521 |
size_t i = 0; |
|
324 |
5521 |
HandleScope handle_scope(isolate_); |
|
325 |
|||
326 |
#define VP(PropertyName, StringValue) V(Private, PropertyName) |
||
327 |
#define VY(PropertyName, StringValue) V(Symbol, PropertyName) |
||
328 |
#define VS(PropertyName, StringValue) V(String, PropertyName) |
||
329 |
#define V(TypeName, PropertyName) \ |
||
330 |
do { \ |
||
331 |
MaybeLocal<TypeName> maybe_field = \ |
||
332 |
isolate_->GetDataFromSnapshotOnce<TypeName>( \ |
||
333 |
info->primitive_values[i++]); \ |
||
334 |
Local<TypeName> field; \ |
||
335 |
if (!maybe_field.ToLocal(&field)) { \ |
||
336 |
fprintf(stderr, "Failed to deserialize " #PropertyName "\n"); \ |
||
337 |
} \ |
||
338 |
PropertyName##_.Set(isolate_, field); \ |
||
339 |
} while (0); |
||
340 |
✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓ |
104899 |
PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(VP) |
341 |
✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ |
138025 |
PER_ISOLATE_SYMBOL_PROPERTIES(VY) |
342 |
✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ |
3053113 |
PER_ISOLATE_STRING_PROPERTIES(VS) |
343 |
#undef V |
||
344 |
#undef VY |
||
345 |
#undef VS |
||
346 |
#undef VP |
||
347 |
|||
348 |
✓✓ | 325739 |
for (size_t j = 0; j < AsyncWrap::PROVIDERS_LENGTH; j++) { |
349 |
MaybeLocal<String> maybe_field = |
||
350 |
640436 |
isolate_->GetDataFromSnapshotOnce<String>(info->primitive_values[i++]); |
|
351 |
Local<String> field; |
||
352 |
✗✓ | 320218 |
if (!maybe_field.ToLocal(&field)) { |
353 |
fprintf(stderr, "Failed to deserialize AsyncWrap provider %zu\n", j); |
||
354 |
} |
||
355 |
320218 |
async_wrap_providers_[j].Set(isolate_, field); |
|
356 |
} |
||
357 |
|||
358 |
5521 |
const std::vector<PropInfo>& values = info->template_values; |
|
359 |
5521 |
i = 0; // index to the array |
|
360 |
5521 |
uint32_t id = 0; |
|
361 |
#define V(PropertyName, TypeName) \ |
||
362 |
do { \ |
||
363 |
if (values.size() > i && id == values[i].id) { \ |
||
364 |
const PropInfo& d = values[i]; \ |
||
365 |
DCHECK_EQ(d.name, #PropertyName); \ |
||
366 |
MaybeLocal<TypeName> maybe_field = \ |
||
367 |
isolate_->GetDataFromSnapshotOnce<TypeName>(d.index); \ |
||
368 |
Local<TypeName> field; \ |
||
369 |
if (!maybe_field.ToLocal(&field)) { \ |
||
370 |
fprintf(stderr, \ |
||
371 |
"Failed to deserialize isolate data template " #PropertyName \ |
||
372 |
"\n"); \ |
||
373 |
} \ |
||
374 |
set_##PropertyName(field); \ |
||
375 |
i++; \ |
||
376 |
} \ |
||
377 |
id++; \ |
||
378 |
} while (0); |
||
379 |
|||
380 |
✓✗✓✗ ✓✗✗✓ ✓✗✗✓ ✗✓✗✗ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✓✗ ✓✗✗✓ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✗✓✗✗ ✗✓✗✗ |
226361 |
PER_ISOLATE_TEMPLATE_PROPERTIES(V); |
381 |
#undef V |
||
382 |
5521 |
} |
|
383 |
|||
384 |
798 |
void IsolateData::CreateProperties() { |
|
385 |
// Create string and private symbol properties as internalized one byte |
||
386 |
// strings after the platform is properly initialized. |
||
387 |
// |
||
388 |
// Internalized because it makes property lookups a little faster and |
||
389 |
// because the string is created in the old space straight away. It's going |
||
390 |
// to end up in the old space sooner or later anyway but now it doesn't go |
||
391 |
// through v8::Eternal's new space handling first. |
||
392 |
// |
||
393 |
// One byte because our strings are ASCII and we can safely skip V8's UTF-8 |
||
394 |
// decoding step. |
||
395 |
|||
396 |
1596 |
HandleScope handle_scope(isolate_); |
|
397 |
|||
398 |
#define V(PropertyName, StringValue) \ |
||
399 |
PropertyName##_.Set( \ |
||
400 |
isolate_, \ |
||
401 |
Private::New(isolate_, \ |
||
402 |
String::NewFromOneByte( \ |
||
403 |
isolate_, \ |
||
404 |
reinterpret_cast<const uint8_t*>(StringValue), \ |
||
405 |
NewStringType::kInternalized, \ |
||
406 |
sizeof(StringValue) - 1) \ |
||
407 |
.ToLocalChecked())); |
||
408 |
7980 |
PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(V) |
|
409 |
#undef V |
||
410 |
#define V(PropertyName, StringValue) \ |
||
411 |
PropertyName##_.Set( \ |
||
412 |
isolate_, \ |
||
413 |
Symbol::New(isolate_, \ |
||
414 |
String::NewFromOneByte( \ |
||
415 |
isolate_, \ |
||
416 |
reinterpret_cast<const uint8_t*>(StringValue), \ |
||
417 |
NewStringType::kInternalized, \ |
||
418 |
sizeof(StringValue) - 1) \ |
||
419 |
.ToLocalChecked())); |
||
420 |
10374 |
PER_ISOLATE_SYMBOL_PROPERTIES(V) |
|
421 |
#undef V |
||
422 |
#define V(PropertyName, StringValue) \ |
||
423 |
PropertyName##_.Set( \ |
||
424 |
isolate_, \ |
||
425 |
String::NewFromOneByte(isolate_, \ |
||
426 |
reinterpret_cast<const uint8_t*>(StringValue), \ |
||
427 |
NewStringType::kInternalized, \ |
||
428 |
sizeof(StringValue) - 1) \ |
||
429 |
.ToLocalChecked()); |
||
430 |
221046 |
PER_ISOLATE_STRING_PROPERTIES(V) |
|
431 |
#undef V |
||
432 |
|||
433 |
// Create all the provider strings that will be passed to JS. Place them in |
||
434 |
// an array so the array index matches the PROVIDER id offset. This way the |
||
435 |
// strings can be retrieved quickly. |
||
436 |
#define V(Provider) \ |
||
437 |
async_wrap_providers_[AsyncWrap::PROVIDER_ ## Provider].Set( \ |
||
438 |
isolate_, \ |
||
439 |
String::NewFromOneByte( \ |
||
440 |
isolate_, \ |
||
441 |
reinterpret_cast<const uint8_t*>(#Provider), \ |
||
442 |
NewStringType::kInternalized, \ |
||
443 |
sizeof(#Provider) - 1).ToLocalChecked()); |
||
444 |
47082 |
NODE_ASYNC_PROVIDER_TYPES(V) |
|
445 |
#undef V |
||
446 |
|||
447 |
// TODO(legendecas): eagerly create per isolate templates. |
||
448 |
798 |
Local<FunctionTemplate> templ = FunctionTemplate::New(isolate()); |
|
449 |
1596 |
templ->InstanceTemplate()->SetInternalFieldCount( |
|
450 |
BaseObject::kInternalFieldCount); |
||
451 |
798 |
templ->Inherit(BaseObject::GetConstructorTemplate(this)); |
|
452 |
798 |
set_binding_data_ctor_template(templ); |
|
453 |
|||
454 |
798 |
set_contextify_global_template( |
|
455 |
798 |
contextify::ContextifyContext::CreateGlobalTemplate(isolate_)); |
|
456 |
798 |
} |
|
457 |
|||
458 |
6319 |
IsolateData::IsolateData(Isolate* isolate, |
|
459 |
uv_loop_t* event_loop, |
||
460 |
MultiIsolatePlatform* platform, |
||
461 |
ArrayBufferAllocator* node_allocator, |
||
462 |
6319 |
const IsolateDataSerializeInfo* isolate_data_info) |
|
463 |
: isolate_(isolate), |
||
464 |
event_loop_(event_loop), |
||
465 |
51 |
node_allocator_(node_allocator == nullptr ? nullptr |
|
466 |
6268 |
: node_allocator->GetImpl()), |
|
467 |
✓✓ | 12638 |
platform_(platform) { |
468 |
6319 |
options_.reset( |
|
469 |
6319 |
new PerIsolateOptions(*(per_process::cli_options->per_isolate))); |
|
470 |
|||
471 |
✓✓ | 6319 |
if (isolate_data_info == nullptr) { |
472 |
798 |
CreateProperties(); |
|
473 |
} else { |
||
474 |
5521 |
DeserializeProperties(isolate_data_info); |
|
475 |
} |
||
476 |
6319 |
} |
|
477 |
|||
478 |
25 |
void IsolateData::MemoryInfo(MemoryTracker* tracker) const { |
|
479 |
#define V(PropertyName, StringValue) \ |
||
480 |
tracker->TrackField(#PropertyName, PropertyName()); |
||
481 |
25 |
PER_ISOLATE_SYMBOL_PROPERTIES(V) |
|
482 |
|||
483 |
25 |
PER_ISOLATE_STRING_PROPERTIES(V) |
|
484 |
#undef V |
||
485 |
|||
486 |
25 |
tracker->TrackField("async_wrap_providers", async_wrap_providers_); |
|
487 |
|||
488 |
✓✗ | 25 |
if (node_allocator_ != nullptr) { |
489 |
25 |
tracker->TrackFieldWithSize( |
|
490 |
"node_allocator", sizeof(*node_allocator_), "NodeArrayBufferAllocator"); |
||
491 |
} |
||
492 |
25 |
tracker->TrackFieldWithSize( |
|
493 |
"platform", sizeof(*platform_), "MultiIsolatePlatform"); |
||
494 |
// TODO(joyeecheung): implement MemoryRetainer in the option classes. |
||
495 |
25 |
} |
|
496 |
|||
497 |
154 |
void TrackingTraceStateObserver::UpdateTraceCategoryState() { |
|
498 |
✓✓✓✓ ✓✓ |
154 |
if (!env_->owns_process_state() || !env_->can_call_into_js()) { |
499 |
// Ideally, we’d have a consistent story that treats all threads/Environment |
||
500 |
// instances equally here. However, tracing is essentially global, and this |
||
501 |
// callback is called from whichever thread calls `StartTracing()` or |
||
502 |
// `StopTracing()`. The only way to do this in a threadsafe fashion |
||
503 |
// seems to be only tracking this from the main thread, and only allowing |
||
504 |
// these state modifications from the main thread. |
||
505 |
96 |
return; |
|
506 |
} |
||
507 |
|||
508 |
✓✓ | 143 |
if (env_->principal_realm() == nullptr) { |
509 |
85 |
return; |
|
510 |
} |
||
511 |
|||
512 |
58 |
bool async_hooks_enabled = (*(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED( |
|
513 |
58 |
TRACING_CATEGORY_NODE1(async_hooks)))) != 0; |
|
514 |
|||
515 |
58 |
Isolate* isolate = env_->isolate(); |
|
516 |
58 |
HandleScope handle_scope(isolate); |
|
517 |
58 |
Local<Function> cb = env_->trace_category_state_function(); |
|
518 |
✗✓ | 58 |
if (cb.IsEmpty()) |
519 |
return; |
||
520 |
58 |
TryCatchScope try_catch(env_); |
|
521 |
58 |
try_catch.SetVerbose(true); |
|
522 |
✓✓ | 116 |
Local<Value> args[] = {Boolean::New(isolate, async_hooks_enabled)}; |
523 |
116 |
USE(cb->Call(env_->context(), Undefined(isolate), arraysize(args), args)); |
|
524 |
} |
||
525 |
|||
526 |
6952 |
void Environment::AssignToContext(Local<v8::Context> context, |
|
527 |
Realm* realm, |
||
528 |
const ContextInfo& info) { |
||
529 |
6952 |
context->SetAlignedPointerInEmbedderData(ContextEmbedderIndex::kEnvironment, |
|
530 |
this); |
||
531 |
6952 |
context->SetAlignedPointerInEmbedderData(ContextEmbedderIndex::kRealm, realm); |
|
532 |
// Used to retrieve bindings |
||
533 |
13904 |
context->SetAlignedPointerInEmbedderData( |
|
534 |
6952 |
ContextEmbedderIndex::kBindingListIndex, &(this->bindings_)); |
|
535 |
|||
536 |
// ContextifyContexts will update this to a pointer to the native object. |
||
537 |
6952 |
context->SetAlignedPointerInEmbedderData( |
|
538 |
ContextEmbedderIndex::kContextifyContext, nullptr); |
||
539 |
|||
540 |
// This must not be done before other context fields are initialized. |
||
541 |
6952 |
ContextEmbedderTag::TagNodeContext(context); |
|
542 |
|||
543 |
#if HAVE_INSPECTOR |
||
544 |
6952 |
inspector_agent()->ContextCreated(context, info); |
|
545 |
#endif // HAVE_INSPECTOR |
||
546 |
|||
547 |
6952 |
this->async_hooks()->AddContext(context); |
|
548 |
6952 |
} |
|
549 |
|||
550 |
185 |
void Environment::TryLoadAddon( |
|
551 |
const char* filename, |
||
552 |
int flags, |
||
553 |
const std::function<bool(binding::DLib*)>& was_loaded) { |
||
554 |
185 |
loaded_addons_.emplace_back(filename, flags); |
|
555 |
✓✓ | 185 |
if (!was_loaded(&loaded_addons_.back())) { |
556 |
10 |
loaded_addons_.pop_back(); |
|
557 |
} |
||
558 |
185 |
} |
|
559 |
|||
560 |
12 |
std::string Environment::GetCwd() { |
|
561 |
char cwd[PATH_MAX_BYTES]; |
||
562 |
12 |
size_t size = PATH_MAX_BYTES; |
|
563 |
12 |
const int err = uv_cwd(cwd, &size); |
|
564 |
|||
565 |
✓✗ | 12 |
if (err == 0) { |
566 |
✗✓ | 12 |
CHECK_GT(size, 0); |
567 |
12 |
return cwd; |
|
568 |
} |
||
569 |
|||
570 |
// This can fail if the cwd is deleted. In that case, fall back to |
||
571 |
// exec_path. |
||
572 |
const std::string& exec_path = exec_path_; |
||
573 |
return exec_path.substr(0, exec_path.find_last_of(kPathSeparator)); |
||
574 |
} |
||
575 |
|||
576 |
1903 |
void Environment::add_refs(int64_t diff) { |
|
577 |
1903 |
task_queues_async_refs_ += diff; |
|
578 |
✗✓ | 1903 |
CHECK_GE(task_queues_async_refs_, 0); |
579 |
✓✓ | 1903 |
if (task_queues_async_refs_ == 0) |
580 |
421 |
uv_unref(reinterpret_cast<uv_handle_t*>(&task_queues_async_)); |
|
581 |
else |
||
582 |
1482 |
uv_ref(reinterpret_cast<uv_handle_t*>(&task_queues_async_)); |
|
583 |
1903 |
} |
|
584 |
|||
585 |
67214 |
uv_buf_t Environment::allocate_managed_buffer(const size_t suggested_size) { |
|
586 |
134428 |
NoArrayBufferZeroFillScope no_zero_fill_scope(isolate_data()); |
|
587 |
std::unique_ptr<v8::BackingStore> bs = |
||
588 |
67214 |
v8::ArrayBuffer::NewBackingStore(isolate(), suggested_size); |
|
589 |
67214 |
uv_buf_t buf = uv_buf_init(static_cast<char*>(bs->Data()), bs->ByteLength()); |
|
590 |
67214 |
released_allocated_buffers_.emplace(buf.base, std::move(bs)); |
|
591 |
67214 |
return buf; |
|
592 |
} |
||
593 |
|||
594 |
82184 |
std::unique_ptr<v8::BackingStore> Environment::release_managed_buffer( |
|
595 |
const uv_buf_t& buf) { |
||
596 |
82184 |
std::unique_ptr<v8::BackingStore> bs; |
|
597 |
✓✓ | 82184 |
if (buf.base != nullptr) { |
598 |
67214 |
auto it = released_allocated_buffers_.find(buf.base); |
|
599 |
✗✓ | 67214 |
CHECK_NE(it, released_allocated_buffers_.end()); |
600 |
67214 |
bs = std::move(it->second); |
|
601 |
67214 |
released_allocated_buffers_.erase(it); |
|
602 |
} |
||
603 |
82184 |
return bs; |
|
604 |
} |
||
605 |
|||
606 |
6309 |
std::string GetExecPath(const std::vector<std::string>& argv) { |
|
607 |
char exec_path_buf[2 * PATH_MAX]; |
||
608 |
6309 |
size_t exec_path_len = sizeof(exec_path_buf); |
|
609 |
6309 |
std::string exec_path; |
|
610 |
✓✗ | 6309 |
if (uv_exepath(exec_path_buf, &exec_path_len) == 0) { |
611 |
6309 |
exec_path = std::string(exec_path_buf, exec_path_len); |
|
612 |
} else { |
||
613 |
exec_path = argv[0]; |
||
614 |
} |
||
615 |
|||
616 |
// On OpenBSD process.execPath will be relative unless we |
||
617 |
// get the full path before process.execPath is used. |
||
618 |
#if defined(__OpenBSD__) |
||
619 |
uv_fs_t req; |
||
620 |
req.ptr = nullptr; |
||
621 |
if (0 == |
||
622 |
uv_fs_realpath(nullptr, &req, exec_path.c_str(), nullptr)) { |
||
623 |
CHECK_NOT_NULL(req.ptr); |
||
624 |
exec_path = std::string(static_cast<char*>(req.ptr)); |
||
625 |
} |
||
626 |
uv_fs_req_cleanup(&req); |
||
627 |
#endif |
||
628 |
|||
629 |
6309 |
return exec_path; |
|
630 |
} |
||
631 |
|||
632 |
6309 |
Environment::Environment(IsolateData* isolate_data, |
|
633 |
Isolate* isolate, |
||
634 |
const std::vector<std::string>& args, |
||
635 |
const std::vector<std::string>& exec_args, |
||
636 |
const EnvSerializeInfo* env_info, |
||
637 |
EnvironmentFlags::Flags flags, |
||
638 |
6309 |
ThreadId thread_id) |
|
639 |
: isolate_(isolate), |
||
640 |
isolate_data_(isolate_data), |
||
641 |
async_hooks_(isolate, MAYBE_FIELD_PTR(env_info, async_hooks)), |
||
642 |
immediate_info_(isolate, MAYBE_FIELD_PTR(env_info, immediate_info)), |
||
643 |
tick_info_(isolate, MAYBE_FIELD_PTR(env_info, tick_info)), |
||
644 |
6309 |
timer_base_(uv_now(isolate_data->event_loop())), |
|
645 |
exec_argv_(exec_args), |
||
646 |
argv_(args), |
||
647 |
exec_path_(GetExecPath(args)), |
||
648 |
6309 |
exiting_(isolate_, 1, MAYBE_FIELD_PTR(env_info, exiting)), |
|
649 |
should_abort_on_uncaught_toggle_( |
||
650 |
6309 |
isolate_, |
|
651 |
1, |
||
652 |
MAYBE_FIELD_PTR(env_info, should_abort_on_uncaught_toggle)), |
||
653 |
6309 |
stream_base_state_(isolate_, |
|
654 |
StreamBase::kNumStreamBaseStateFields, |
||
655 |
MAYBE_FIELD_PTR(env_info, stream_base_state)), |
||
656 |
6309 |
time_origin_(PERFORMANCE_NOW()), |
|
657 |
6309 |
time_origin_timestamp_(GetCurrentTimeInMicroseconds()), |
|
658 |
flags_(flags), |
||
659 |
6309 |
thread_id_(thread_id.id == static_cast<uint64_t>(-1) |
|
660 |
6309 |
? AllocateEnvironmentThreadId().id |
|
661 |
✓✓✓✓ ✓✓✓✓ ✓✓✓✓ ✓✓ |
25236 |
: thread_id.id) { |
662 |
// We'll be creating new objects so make sure we've entered the context. |
||
663 |
12618 |
HandleScope handle_scope(isolate); |
|
664 |
|||
665 |
// Set some flags if only kDefaultFlags was passed. This can make API version |
||
666 |
// transitions easier for embedders. |
||
667 |
✓✓ | 6309 |
if (flags_ & EnvironmentFlags::kDefaultFlags) { |
668 |
11156 |
flags_ = flags_ | |
|
669 |
5578 |
EnvironmentFlags::kOwnsProcessState | |
|
670 |
EnvironmentFlags::kOwnsInspector; |
||
671 |
} |
||
672 |
|||
673 |
6309 |
set_env_vars(per_process::system_environment); |
|
674 |
6309 |
enabled_debug_list_.Parse(env_vars(), isolate); |
|
675 |
|||
676 |
// We create new copies of the per-Environment option sets, so that it is |
||
677 |
// easier to modify them after Environment creation. The defaults are |
||
678 |
// part of the per-Isolate option set, for which in turn the defaults are |
||
679 |
// part of the per-process option set. |
||
680 |
12618 |
options_ = std::make_shared<EnvironmentOptions>( |
|
681 |
18927 |
*isolate_data->options()->per_env); |
|
682 |
6309 |
inspector_host_port_ = std::make_shared<ExclusiveAccess<HostPort>>( |
|
683 |
6309 |
options_->debug_options().host_port); |
|
684 |
|||
685 |
6309 |
heap_snapshot_near_heap_limit_ = |
|
686 |
6309 |
static_cast<uint32_t>(options_->heap_snapshot_near_heap_limit); |
|
687 |
|||
688 |
✓✓ | 6309 |
if (!(flags_ & EnvironmentFlags::kOwnsProcessState)) { |
689 |
731 |
set_abort_on_uncaught_exception(false); |
|
690 |
} |
||
691 |
|||
692 |
#if HAVE_INSPECTOR |
||
693 |
// We can only create the inspector agent after having cloned the options. |
||
694 |
6309 |
inspector_agent_ = std::make_unique<inspector::Agent>(this); |
|
695 |
#endif |
||
696 |
|||
697 |
✓✗ | 6309 |
if (tracing::AgentWriterHandle* writer = GetTracingAgentWriter()) { |
698 |
6309 |
trace_state_observer_ = std::make_unique<TrackingTraceStateObserver>(this); |
|
699 |
✓✓ | 6309 |
if (TracingController* tracing_controller = writer->GetTracingController()) |
700 |
6258 |
tracing_controller->AddTraceStateObserver(trace_state_observer_.get()); |
|
701 |
} |
||
702 |
|||
703 |
6309 |
destroy_async_id_list_.reserve(512); |
|
704 |
|||
705 |
6309 |
performance_state_ = std::make_unique<performance::PerformanceState>( |
|
706 |
✓✓ | 6309 |
isolate, MAYBE_FIELD_PTR(env_info, performance_state)); |
707 |
|||
708 |
6309 |
if (*TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED( |
|
709 |
✓✓ | 6309 |
TRACING_CATEGORY_NODE1(environment)) != 0) { |
710 |
16 |
auto traced_value = tracing::TracedValue::Create(); |
|
711 |
8 |
traced_value->BeginArray("args"); |
|
712 |
✓✓ | 18 |
for (const std::string& arg : args) traced_value->AppendString(arg); |
713 |
8 |
traced_value->EndArray(); |
|
714 |
8 |
traced_value->BeginArray("exec_args"); |
|
715 |
✓✓ | 33 |
for (const std::string& arg : exec_args) traced_value->AppendString(arg); |
716 |
8 |
traced_value->EndArray(); |
|
717 |
✓✓✓✗ |
15 |
TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(TRACING_CATEGORY_NODE1(environment), |
718 |
"Environment", |
||
719 |
this, |
||
720 |
"args", |
||
721 |
std::move(traced_value)); |
||
722 |
} |
||
723 |
6309 |
} |
|
724 |
|||
725 |
788 |
Environment::Environment(IsolateData* isolate_data, |
|
726 |
Local<Context> context, |
||
727 |
const std::vector<std::string>& args, |
||
728 |
const std::vector<std::string>& exec_args, |
||
729 |
const EnvSerializeInfo* env_info, |
||
730 |
EnvironmentFlags::Flags flags, |
||
731 |
788 |
ThreadId thread_id) |
|
732 |
: Environment(isolate_data, |
||
733 |
context->GetIsolate(), |
||
734 |
args, |
||
735 |
exec_args, |
||
736 |
env_info, |
||
737 |
flags, |
||
738 |
788 |
thread_id) { |
|
739 |
788 |
InitializeMainContext(context, env_info); |
|
740 |
788 |
} |
|
741 |
|||
742 |
6309 |
void Environment::InitializeMainContext(Local<Context> context, |
|
743 |
const EnvSerializeInfo* env_info) { |
||
744 |
6309 |
principal_realm_ = std::make_unique<Realm>( |
|
745 |
✓✓ | 6309 |
this, context, MAYBE_FIELD_PTR(env_info, principal_realm)); |
746 |
6309 |
AssignToContext(context, principal_realm_.get(), ContextInfo("")); |
|
747 |
✓✓ | 6309 |
if (env_info != nullptr) { |
748 |
5521 |
DeserializeProperties(env_info); |
|
749 |
} |
||
750 |
|||
751 |
✓✓ | 6309 |
if (!options_->force_async_hooks_checks) { |
752 |
1 |
async_hooks_.no_force_checks(); |
|
753 |
} |
||
754 |
|||
755 |
// By default, always abort when --abort-on-uncaught-exception was passed. |
||
756 |
6309 |
should_abort_on_uncaught_toggle_[0] = 1; |
|
757 |
|||
758 |
// The process is not exiting by default. |
||
759 |
6309 |
set_exiting(false); |
|
760 |
|||
761 |
6309 |
performance_state_->Mark(performance::NODE_PERFORMANCE_MILESTONE_ENVIRONMENT, |
|
762 |
time_origin_); |
||
763 |
6309 |
performance_state_->Mark(performance::NODE_PERFORMANCE_MILESTONE_NODE_START, |
|
764 |
per_process::node_start_time); |
||
765 |
|||
766 |
✓✓ | 6309 |
if (per_process::v8_initialized) { |
767 |
6265 |
performance_state_->Mark(performance::NODE_PERFORMANCE_MILESTONE_V8_START, |
|
768 |
performance::performance_v8_start); |
||
769 |
} |
||
770 |
6309 |
} |
|
771 |
|||
772 |
22580 |
Environment::~Environment() { |
|
773 |
HandleScope handle_scope(isolate()); |
||
774 |
5645 |
Local<Context> ctx = context(); |
|
775 |
|||
776 |
✓✓ | 5645 |
if (Environment** interrupt_data = interrupt_data_.load()) { |
777 |
// There are pending RequestInterrupt() callbacks. Tell them not to run, |
||
778 |
// then force V8 to run interrupts by compiling and running an empty script |
||
779 |
// so as not to leak memory. |
||
780 |
11 |
*interrupt_data = nullptr; |
|
781 |
|||
782 |
22 |
Isolate::AllowJavascriptExecutionScope allow_js_here(isolate()); |
|
783 |
22 |
TryCatch try_catch(isolate()); |
|
784 |
11 |
Context::Scope context_scope(ctx); |
|
785 |
|||
786 |
#ifdef DEBUG |
||
787 |
bool consistency_check = false; |
||
788 |
isolate()->RequestInterrupt([](Isolate*, void* data) { |
||
789 |
*static_cast<bool*>(data) = true; |
||
790 |
}, &consistency_check); |
||
791 |
#endif |
||
792 |
|||
793 |
Local<Script> script; |
||
794 |
✓✗ | 33 |
if (Script::Compile(ctx, String::Empty(isolate())).ToLocal(&script)) |
795 |
11 |
USE(script->Run(ctx)); |
|
796 |
|||
797 |
DCHECK(consistency_check); |
||
798 |
} |
||
799 |
|||
800 |
// FreeEnvironment() should have set this. |
||
801 |
✗✓ | 5645 |
CHECK(is_stopping()); |
802 |
|||
803 |
✗✓ | 5645 |
if (heapsnapshot_near_heap_limit_callback_added_) { |
804 |
RemoveHeapSnapshotNearHeapLimitCallback(0); |
||
805 |
} |
||
806 |
|||
807 |
5645 |
isolate()->GetHeapProfiler()->RemoveBuildEmbedderGraphCallback( |
|
808 |
BuildEmbedderGraph, this); |
||
809 |
|||
810 |
#if HAVE_INSPECTOR |
||
811 |
// Destroy inspector agent before erasing the context. The inspector |
||
812 |
// destructor depends on the context still being accessible. |
||
813 |
5645 |
inspector_agent_.reset(); |
|
814 |
#endif |
||
815 |
|||
816 |
5645 |
ctx->SetAlignedPointerInEmbedderData(ContextEmbedderIndex::kEnvironment, |
|
817 |
nullptr); |
||
818 |
5645 |
ctx->SetAlignedPointerInEmbedderData(ContextEmbedderIndex::kRealm, nullptr); |
|
819 |
|||
820 |
✓✗ | 5645 |
if (trace_state_observer_) { |
821 |
5645 |
tracing::AgentWriterHandle* writer = GetTracingAgentWriter(); |
|
822 |
✗✓ | 5645 |
CHECK_NOT_NULL(writer); |
823 |
✓✓ | 5645 |
if (TracingController* tracing_controller = writer->GetTracingController()) |
824 |
5596 |
tracing_controller->RemoveTraceStateObserver(trace_state_observer_.get()); |
|
825 |
} |
||
826 |
|||
827 |
✓✓✓✓ |
10528 |
TRACE_EVENT_NESTABLE_ASYNC_END0( |
828 |
TRACING_CATEGORY_NODE1(environment), "Environment", this); |
||
829 |
|||
830 |
// Do not unload addons on the main thread. Some addons need to retain memory |
||
831 |
// beyond the Environment's lifetime, and unloading them early would break |
||
832 |
// them; with Worker threads, we have the opportunity to be stricter. |
||
833 |
// Also, since the main thread usually stops just before the process exits, |
||
834 |
// this is far less relevant here. |
||
835 |
✓✓ | 5645 |
if (!is_main_thread()) { |
836 |
// Dereference all addons that were loaded into this environment. |
||
837 |
✓✓ | 742 |
for (binding::DLib& addon : loaded_addons_) { |
838 |
14 |
addon.Close(); |
|
839 |
} |
||
840 |
} |
||
841 |
|||
842 |
✗✓ | 5645 |
CHECK_EQ(base_object_count_, 0); |
843 |
5645 |
} |
|
844 |
|||
845 |
6274 |
void Environment::InitializeLibuv() { |
|
846 |
12548 |
HandleScope handle_scope(isolate()); |
|
847 |
6274 |
Context::Scope context_scope(context()); |
|
848 |
|||
849 |
✗✓ | 6274 |
CHECK_EQ(0, uv_timer_init(event_loop(), timer_handle())); |
850 |
6274 |
uv_unref(reinterpret_cast<uv_handle_t*>(timer_handle())); |
|
851 |
|||
852 |
✗✓ | 6274 |
CHECK_EQ(0, uv_check_init(event_loop(), immediate_check_handle())); |
853 |
6274 |
uv_unref(reinterpret_cast<uv_handle_t*>(immediate_check_handle())); |
|
854 |
|||
855 |
✗✓ | 6274 |
CHECK_EQ(0, uv_idle_init(event_loop(), immediate_idle_handle())); |
856 |
|||
857 |
✗✓ | 6274 |
CHECK_EQ(0, uv_check_start(immediate_check_handle(), CheckImmediate)); |
858 |
|||
859 |
// Inform V8's CPU profiler when we're idle. The profiler is sampling-based |
||
860 |
// but not all samples are created equal; mark the wall clock time spent in |
||
861 |
// epoll_wait() and friends so profiling tools can filter it out. The samples |
||
862 |
// still end up in v8.log but with state=IDLE rather than state=EXTERNAL. |
||
863 |
✗✓ | 6274 |
CHECK_EQ(0, uv_prepare_init(event_loop(), &idle_prepare_handle_)); |
864 |
✗✓ | 6274 |
CHECK_EQ(0, uv_check_init(event_loop(), &idle_check_handle_)); |
865 |
|||
866 |
✗✓ | 25580 |
CHECK_EQ(0, uv_async_init( |
867 |
event_loop(), |
||
868 |
&task_queues_async_, |
||
869 |
[](uv_async_t* async) { |
||
870 |
Environment* env = ContainerOf( |
||
871 |
&Environment::task_queues_async_, async); |
||
872 |
HandleScope handle_scope(env->isolate()); |
||
873 |
Context::Scope context_scope(env->context()); |
||
874 |
env->RunAndClearNativeImmediates(); |
||
875 |
})); |
||
876 |
6274 |
uv_unref(reinterpret_cast<uv_handle_t*>(&idle_prepare_handle_)); |
|
877 |
6274 |
uv_unref(reinterpret_cast<uv_handle_t*>(&idle_check_handle_)); |
|
878 |
6274 |
uv_unref(reinterpret_cast<uv_handle_t*>(&task_queues_async_)); |
|
879 |
|||
880 |
{ |
||
881 |
12548 |
Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); |
|
882 |
6274 |
task_queues_async_initialized_ = true; |
|
883 |
✓✗✓✓ ✓✓ |
12548 |
if (native_immediates_threadsafe_.size() > 0 || |
884 |
6274 |
native_immediates_interrupts_.size() > 0) { |
|
885 |
5517 |
uv_async_send(&task_queues_async_); |
|
886 |
} |
||
887 |
} |
||
888 |
|||
889 |
// Register clean-up cb to be called to clean up the handles |
||
890 |
// when the environment is freed, note that they are not cleaned in |
||
891 |
// the one environment per process setup, but will be called in |
||
892 |
// FreeEnvironment. |
||
893 |
6274 |
RegisterHandleCleanups(); |
|
894 |
|||
895 |
6274 |
StartProfilerIdleNotifier(); |
|
896 |
6274 |
} |
|
897 |
|||
898 |
378 |
void Environment::ExitEnv() { |
|
899 |
378 |
set_can_call_into_js(false); |
|
900 |
378 |
set_stopping(true); |
|
901 |
378 |
isolate_->TerminateExecution(); |
|
902 |
756 |
SetImmediateThreadsafe([](Environment* env) { uv_stop(env->event_loop()); }); |
|
903 |
378 |
} |
|
904 |
|||
905 |
6274 |
void Environment::RegisterHandleCleanups() { |
|
906 |
6274 |
HandleCleanupCb close_and_finish = [](Environment* env, uv_handle_t* handle, |
|
907 |
33660 |
void* arg) { |
|
908 |
33660 |
handle->data = env; |
|
909 |
|||
910 |
33660 |
env->CloseHandle(handle, [](uv_handle_t* handle) { |
|
911 |
#ifdef DEBUG |
||
912 |
memset(handle, 0xab, uv_handle_size(handle->type)); |
||
913 |
#endif |
||
914 |
33660 |
}); |
|
915 |
33660 |
}; |
|
916 |
|||
917 |
37644 |
auto register_handle = [&](uv_handle_t* handle) { |
|
918 |
37644 |
RegisterHandleCleanup(handle, close_and_finish, nullptr); |
|
919 |
43918 |
}; |
|
920 |
6274 |
register_handle(reinterpret_cast<uv_handle_t*>(timer_handle())); |
|
921 |
6274 |
register_handle(reinterpret_cast<uv_handle_t*>(immediate_check_handle())); |
|
922 |
6274 |
register_handle(reinterpret_cast<uv_handle_t*>(immediate_idle_handle())); |
|
923 |
6274 |
register_handle(reinterpret_cast<uv_handle_t*>(&idle_prepare_handle_)); |
|
924 |
6274 |
register_handle(reinterpret_cast<uv_handle_t*>(&idle_check_handle_)); |
|
925 |
6274 |
register_handle(reinterpret_cast<uv_handle_t*>(&task_queues_async_)); |
|
926 |
6274 |
} |
|
927 |
|||
928 |
11283 |
void Environment::CleanupHandles() { |
|
929 |
{ |
||
930 |
11283 |
Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); |
|
931 |
11283 |
task_queues_async_initialized_ = false; |
|
932 |
} |
||
933 |
|||
934 |
Isolate::DisallowJavascriptExecutionScope disallow_js(isolate(), |
||
935 |
22566 |
Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE); |
|
936 |
|||
937 |
11283 |
RunAndClearNativeImmediates(true /* skip unrefed SetImmediate()s */); |
|
938 |
|||
939 |
✓✓ | 11445 |
for (ReqWrapBase* request : req_wrap_queue_) |
940 |
162 |
request->Cancel(); |
|
941 |
|||
942 |
✓✓ | 15877 |
for (HandleWrap* handle : handle_wrap_queue_) |
943 |
9188 |
handle->Close(); |
|
944 |
|||
945 |
✓✓ | 44943 |
for (HandleCleanup& hc : handle_cleanup_queue_) |
946 |
33660 |
hc.cb_(this, hc.handle_, hc.arg_); |
|
947 |
11283 |
handle_cleanup_queue_.clear(); |
|
948 |
|||
949 |
10584 |
while (handle_cleanup_waiting_ != 0 || |
|
950 |
✓✓✓✓ ✓✓ |
33152 |
request_waiting_ != 0 || |
951 |
✓✓ | 11285 |
!handle_wrap_queue_.IsEmpty()) { |
952 |
10584 |
uv_run(event_loop(), UV_RUN_ONCE); |
|
953 |
} |
||
954 |
11283 |
} |
|
955 |
|||
956 |
6274 |
void Environment::StartProfilerIdleNotifier() { |
|
957 |
6274 |
uv_prepare_start(&idle_prepare_handle_, [](uv_prepare_t* handle) { |
|
958 |
213931 |
Environment* env = ContainerOf(&Environment::idle_prepare_handle_, handle); |
|
959 |
213931 |
env->isolate()->SetIdle(true); |
|
960 |
213931 |
}); |
|
961 |
6274 |
uv_check_start(&idle_check_handle_, [](uv_check_t* handle) { |
|
962 |
213641 |
Environment* env = ContainerOf(&Environment::idle_check_handle_, handle); |
|
963 |
213641 |
env->isolate()->SetIdle(false); |
|
964 |
213641 |
}); |
|
965 |
6274 |
} |
|
966 |
|||
967 |
748720 |
void Environment::PrintSyncTrace() const { |
|
968 |
✓✓ | 748720 |
if (!trace_sync_io_) return; |
969 |
|||
970 |
2 |
HandleScope handle_scope(isolate()); |
|
971 |
|||
972 |
1 |
fprintf( |
|
973 |
stderr, "(node:%d) WARNING: Detected use of sync API\n", uv_os_getpid()); |
||
974 |
1 |
PrintStackTrace(isolate(), |
|
975 |
StackTrace::CurrentStackTrace( |
||
976 |
isolate(), stack_trace_limit(), StackTrace::kDetailed)); |
||
977 |
} |
||
978 |
|||
979 |
5286 |
MaybeLocal<Value> Environment::RunSnapshotSerializeCallback() const { |
|
980 |
5286 |
EscapableHandleScope handle_scope(isolate()); |
|
981 |
✗✓ | 10572 |
if (!snapshot_serialize_callback().IsEmpty()) { |
982 |
Context::Scope context_scope(context()); |
||
983 |
return handle_scope.EscapeMaybe(snapshot_serialize_callback()->Call( |
||
984 |
context(), v8::Undefined(isolate()), 0, nullptr)); |
||
985 |
} |
||
986 |
10572 |
return handle_scope.Escape(Undefined(isolate())); |
|
987 |
} |
||
988 |
|||
989 |
MaybeLocal<Value> Environment::RunSnapshotDeserializeMain() const { |
||
990 |
EscapableHandleScope handle_scope(isolate()); |
||
991 |
if (!snapshot_deserialize_main().IsEmpty()) { |
||
992 |
Context::Scope context_scope(context()); |
||
993 |
return handle_scope.EscapeMaybe(snapshot_deserialize_main()->Call( |
||
994 |
context(), v8::Undefined(isolate()), 0, nullptr)); |
||
995 |
} |
||
996 |
return handle_scope.Escape(Undefined(isolate())); |
||
997 |
} |
||
998 |
|||
999 |
5645 |
void Environment::RunCleanup() { |
|
1000 |
5645 |
started_cleanup_ = true; |
|
1001 |
✓✓✓✓ |
16173 |
TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), "RunCleanup"); |
1002 |
5645 |
bindings_.clear(); |
|
1003 |
5645 |
CleanupHandles(); |
|
1004 |
|||
1005 |
✓✓✓✗ |
22575 |
while (!cleanup_queue_.empty() || native_immediates_.size() > 0 || |
1006 |
✓✓✗✓ ✓✓ |
22575 |
native_immediates_threadsafe_.size() > 0 || |
1007 |
5645 |
native_immediates_interrupts_.size() > 0) { |
|
1008 |
5638 |
cleanup_queue_.Drain(); |
|
1009 |
5638 |
CleanupHandles(); |
|
1010 |
} |
||
1011 |
|||
1012 |
✓✓ | 5648 |
for (const int fd : unmanaged_fds_) { |
1013 |
uv_fs_t close_req; |
||
1014 |
3 |
uv_fs_close(nullptr, &close_req, fd, nullptr); |
|
1015 |
3 |
uv_fs_req_cleanup(&close_req); |
|
1016 |
} |
||
1017 |
5645 |
} |
|
1018 |
|||
1019 |
6382 |
void Environment::RunAtExitCallbacks() { |
|
1020 |
✓✓✓✓ |
18288 |
TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), "AtExit"); |
1021 |
✓✓ | 18962 |
for (ExitCallback at_exit : at_exit_functions_) { |
1022 |
12580 |
at_exit.cb_(at_exit.arg_); |
|
1023 |
} |
||
1024 |
6382 |
at_exit_functions_.clear(); |
|
1025 |
6382 |
} |
|
1026 |
|||
1027 |
12608 |
void Environment::AtExit(void (*cb)(void* arg), void* arg) { |
|
1028 |
12608 |
at_exit_functions_.push_front(ExitCallback{cb, arg}); |
|
1029 |
12608 |
} |
|
1030 |
|||
1031 |
252492 |
void Environment::RunAndClearInterrupts() { |
|
1032 |
✓✓ | 252492 |
while (native_immediates_interrupts_.size() > 0) { |
1033 |
10703 |
NativeImmediateQueue queue; |
|
1034 |
{ |
||
1035 |
21410 |
Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); |
|
1036 |
10705 |
queue.ConcatMove(std::move(native_immediates_interrupts_)); |
|
1037 |
} |
||
1038 |
10705 |
DebugSealHandleScope seal_handle_scope(isolate()); |
|
1039 |
|||
1040 |
✓✓ | 21418 |
while (auto head = queue.Shift()) |
1041 |
21428 |
head->Call(this); |
|
1042 |
} |
||
1043 |
241787 |
} |
|
1044 |
|||
1045 |
231362 |
void Environment::RunAndClearNativeImmediates(bool only_refed) { |
|
1046 |
✓✓✓✓ |
467919 |
TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), |
1047 |
"RunAndClearNativeImmediates"); |
||
1048 |
462716 |
HandleScope handle_scope(isolate_); |
|
1049 |
462716 |
InternalCallbackScope cb_scope(this, Object::New(isolate_), { 0, 0 }); |
|
1050 |
|||
1051 |
231362 |
size_t ref_count = 0; |
|
1052 |
|||
1053 |
// Handle interrupts first. These functions are not allowed to throw |
||
1054 |
// exceptions, so we do not need to handle that. |
||
1055 |
231362 |
RunAndClearInterrupts(); |
|
1056 |
|||
1057 |
462720 |
auto drain_list = [&](NativeImmediateQueue* queue) { |
|
1058 |
925433 |
TryCatchScope try_catch(this); |
|
1059 |
462720 |
DebugSealHandleScope seal_handle_scope(isolate()); |
|
1060 |
✓✓ | 522893 |
while (auto head = queue->Shift()) { |
1061 |
60181 |
bool is_refed = head->flags() & CallbackFlags::kRefed; |
|
1062 |
✓✓ | 60181 |
if (is_refed) |
1063 |
35174 |
ref_count++; |
|
1064 |
|||
1065 |
✓✓✓✓ |
60181 |
if (is_refed || !only_refed) |
1066 |
59910 |
head->Call(this); |
|
1067 |
|||
1068 |
60176 |
head.reset(); // Destroy now so that this is also observed by try_catch. |
|
1069 |
|||
1070 |
✓✓ | 60176 |
if (UNLIKELY(try_catch.HasCaught())) { |
1071 |
✓✗✓✗ ✓✗ |
3 |
if (!try_catch.HasTerminated() && can_call_into_js()) |
1072 |
3 |
errors::TriggerUncaughtException(isolate(), try_catch); |
|
1073 |
|||
1074 |
1 |
return true; |
|
1075 |
} |
||
1076 |
60173 |
} |
|
1077 |
462712 |
return false; |
|
1078 |
231361 |
}; |
|
1079 |
✗✓ | 231361 |
while (drain_list(&native_immediates_)) {} |
1080 |
|||
1081 |
231358 |
immediate_info()->ref_count_dec(ref_count); |
|
1082 |
|||
1083 |
✓✓ | 231358 |
if (immediate_info()->ref_count() == 0) |
1084 |
175578 |
ToggleImmediateRef(false); |
|
1085 |
|||
1086 |
// It is safe to check .size() first, because there is a causal relationship |
||
1087 |
// between pushes to the threadsafe immediate list and this function being |
||
1088 |
// called. For the common case, it's worth checking the size first before |
||
1089 |
// establishing a mutex lock. |
||
1090 |
// This is intentionally placed after the `ref_count` handling, because when |
||
1091 |
// refed threadsafe immediates are created, they are not counted towards the |
||
1092 |
// count in immediate_info() either. |
||
1093 |
231354 |
NativeImmediateQueue threadsafe_immediates; |
|
1094 |
✓✓ | 231358 |
if (native_immediates_threadsafe_.size() > 0) { |
1095 |
2196 |
Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); |
|
1096 |
1098 |
threadsafe_immediates.ConcatMove(std::move(native_immediates_threadsafe_)); |
|
1097 |
} |
||
1098 |
✓✓ | 231359 |
while (drain_list(&threadsafe_immediates)) {} |
1099 |
231354 |
} |
|
1100 |
|||
1101 |
10720 |
void Environment::RequestInterruptFromV8() { |
|
1102 |
// The Isolate may outlive the Environment, so some logic to handle the |
||
1103 |
// situation in which the Environment is destroyed before the handler runs |
||
1104 |
// is required. |
||
1105 |
|||
1106 |
// We allocate a new pointer to a pointer to this Environment instance, and |
||
1107 |
// try to set it as interrupt_data_. If interrupt_data_ was already set, then |
||
1108 |
// callbacks are already scheduled to run and we can delete our own pointer |
||
1109 |
// and just return. If it was nullptr previously, the Environment** is stored; |
||
1110 |
// ~Environment sets the Environment* contained in it to nullptr, so that |
||
1111 |
// the callback can check whether ~Environment has already run and it is thus |
||
1112 |
// not safe to access the Environment instance itself. |
||
1113 |
10720 |
Environment** interrupt_data = new Environment*(this); |
|
1114 |
10720 |
Environment** dummy = nullptr; |
|
1115 |
✓✓ | 10720 |
if (!interrupt_data_.compare_exchange_strong(dummy, interrupt_data)) { |
1116 |
388 |
delete interrupt_data; |
|
1117 |
388 |
return; // Already scheduled. |
|
1118 |
} |
||
1119 |
|||
1120 |
10332 |
isolate()->RequestInterrupt([](Isolate* isolate, void* data) { |
|
1121 |
10322 |
std::unique_ptr<Environment*> env_ptr { static_cast<Environment**>(data) }; |
|
1122 |
10322 |
Environment* env = *env_ptr; |
|
1123 |
✓✓ | 10322 |
if (env == nullptr) { |
1124 |
// The Environment has already been destroyed. That should be okay; any |
||
1125 |
// callback added before the Environment shuts down would have been |
||
1126 |
// handled during cleanup. |
||
1127 |
11 |
return; |
|
1128 |
} |
||
1129 |
10311 |
env->interrupt_data_.store(nullptr); |
|
1130 |
10311 |
env->RunAndClearInterrupts(); |
|
1131 |
}, interrupt_data); |
||
1132 |
} |
||
1133 |
|||
1134 |
9573 |
void Environment::ScheduleTimer(int64_t duration_ms) { |
|
1135 |
✗✓ | 9573 |
if (started_cleanup_) return; |
1136 |
9573 |
uv_timer_start(timer_handle(), RunTimers, duration_ms, 0); |
|
1137 |
} |
||
1138 |
|||
1139 |
3970 |
void Environment::ToggleTimerRef(bool ref) { |
|
1140 |
✗✓ | 3970 |
if (started_cleanup_) return; |
1141 |
|||
1142 |
✓✓ | 3970 |
if (ref) { |
1143 |
2664 |
uv_ref(reinterpret_cast<uv_handle_t*>(timer_handle())); |
|
1144 |
} else { |
||
1145 |
1306 |
uv_unref(reinterpret_cast<uv_handle_t*>(timer_handle())); |
|
1146 |
} |
||
1147 |
} |
||
1148 |
|||
1149 |
7537 |
void Environment::RunTimers(uv_timer_t* handle) { |
|
1150 |
7537 |
Environment* env = Environment::from_timer_handle(handle); |
|
1151 |
✓✓✓✓ |
8060 |
TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), "RunTimers"); |
1152 |
|||
1153 |
✗✓ | 7537 |
if (!env->can_call_into_js()) |
1154 |
return; |
||
1155 |
|||
1156 |
7537 |
HandleScope handle_scope(env->isolate()); |
|
1157 |
7537 |
Context::Scope context_scope(env->context()); |
|
1158 |
|||
1159 |
7537 |
Local<Object> process = env->process_object(); |
|
1160 |
7537 |
InternalCallbackScope scope(env, process, {0, 0}); |
|
1161 |
|||
1162 |
7537 |
Local<Function> cb = env->timers_callback_function(); |
|
1163 |
MaybeLocal<Value> ret; |
||
1164 |
7537 |
Local<Value> arg = env->GetNow(); |
|
1165 |
// This code will loop until all currently due timers will process. It is |
||
1166 |
// impossible for us to end up in an infinite loop due to how the JS-side |
||
1167 |
// is structured. |
||
1168 |
33 |
do { |
|
1169 |
7570 |
TryCatchScope try_catch(env); |
|
1170 |
7570 |
try_catch.SetVerbose(true); |
|
1171 |
7570 |
ret = cb->Call(env->context(), process, 1, &arg); |
|
1172 |
✓✓✓✓ ✓✓ |
7558 |
} while (ret.IsEmpty() && env->can_call_into_js()); |
1173 |
|||
1174 |
// NOTE(apapirovski): If it ever becomes possible that `call_into_js` above |
||
1175 |
// is reset back to `true` after being previously set to `false` then this |
||
1176 |
// code becomes invalid and needs to be rewritten. Otherwise catastrophic |
||
1177 |
// timers corruption will occur and all timers behaviour will become |
||
1178 |
// entirely unpredictable. |
||
1179 |
✓✓ | 7525 |
if (ret.IsEmpty()) |
1180 |
7 |
return; |
|
1181 |
|||
1182 |
// To allow for less JS-C++ boundary crossing, the value returned from JS |
||
1183 |
// serves a few purposes: |
||
1184 |
// 1. If it's 0, no more timers exist and the handle should be unrefed |
||
1185 |
// 2. If it's > 0, the value represents the next timer's expiry and there |
||
1186 |
// is at least one timer remaining that is refed. |
||
1187 |
// 3. If it's < 0, the absolute value represents the next timer's expiry |
||
1188 |
// and there are no timers that are refed. |
||
1189 |
int64_t expiry_ms = |
||
1190 |
7518 |
ret.ToLocalChecked()->IntegerValue(env->context()).FromJust(); |
|
1191 |
|||
1192 |
7518 |
uv_handle_t* h = reinterpret_cast<uv_handle_t*>(handle); |
|
1193 |
|||
1194 |
✓✓ | 7518 |
if (expiry_ms != 0) { |
1195 |
int64_t duration_ms = |
||
1196 |
6358 |
llabs(expiry_ms) - (uv_now(env->event_loop()) - env->timer_base()); |
|
1197 |
|||
1198 |
6358 |
env->ScheduleTimer(duration_ms > 0 ? duration_ms : 1); |
|
1199 |
|||
1200 |
✓✓ | 6358 |
if (expiry_ms > 0) |
1201 |
5645 |
uv_ref(h); |
|
1202 |
else |
||
1203 |
713 |
uv_unref(h); |
|
1204 |
} else { |
||
1205 |
1160 |
uv_unref(h); |
|
1206 |
} |
||
1207 |
} |
||
1208 |
|||
1209 |
|||
1210 |
213641 |
void Environment::CheckImmediate(uv_check_t* handle) { |
|
1211 |
213641 |
Environment* env = Environment::from_immediate_check_handle(handle); |
|
1212 |
✓✓✓✓ |
216771 |
TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), "CheckImmediate"); |
1213 |
|||
1214 |
213641 |
HandleScope scope(env->isolate()); |
|
1215 |
213641 |
Context::Scope context_scope(env->context()); |
|
1216 |
|||
1217 |
213641 |
env->RunAndClearNativeImmediates(); |
|
1218 |
|||
1219 |
✓✓✓✓ ✓✓ |
213641 |
if (env->immediate_info()->count() == 0 || !env->can_call_into_js()) |
1220 |
158443 |
return; |
|
1221 |
|||
1222 |
952 |
do { |
|
1223 |
56138 |
MakeCallback(env->isolate(), |
|
1224 |
env->process_object(), |
||
1225 |
env->immediate_callback_function(), |
||
1226 |
0, |
||
1227 |
nullptr, |
||
1228 |
56150 |
{0, 0}).ToLocalChecked(); |
|
1229 |
✓✓✓✓ ✓✓ |
56138 |
} while (env->immediate_info()->has_outstanding() && env->can_call_into_js()); |
1230 |
|||
1231 |
✓✓ | 55186 |
if (env->immediate_info()->ref_count() == 0) |
1232 |
4702 |
env->ToggleImmediateRef(false); |
|
1233 |
} |
||
1234 |
|||
1235 |
258912 |
void Environment::ToggleImmediateRef(bool ref) { |
|
1236 |
✓✓ | 258912 |
if (started_cleanup_) return; |
1237 |
|||
1238 |
✓✓ | 247710 |
if (ref) { |
1239 |
// Idle handle is needed only to stop the event loop from blocking in poll. |
||
1240 |
78608 |
uv_idle_start(immediate_idle_handle(), [](uv_idle_t*){ }); |
|
1241 |
} else { |
||
1242 |
169102 |
uv_idle_stop(immediate_idle_handle()); |
|
1243 |
} |
||
1244 |
} |
||
1245 |
|||
1246 |
|||
1247 |
47509 |
Local<Value> Environment::GetNow() { |
|
1248 |
47509 |
uv_update_time(event_loop()); |
|
1249 |
47509 |
uint64_t now = uv_now(event_loop()); |
|
1250 |
✗✓ | 47509 |
CHECK_GE(now, timer_base()); |
1251 |
47509 |
now -= timer_base(); |
|
1252 |
✓✗ | 47509 |
if (now <= 0xffffffff) |
1253 |
95018 |
return Integer::NewFromUnsigned(isolate(), static_cast<uint32_t>(now)); |
|
1254 |
else |
||
1255 |
return Number::New(isolate(), static_cast<double>(now)); |
||
1256 |
} |
||
1257 |
|||
1258 |
28 |
void CollectExceptionInfo(Environment* env, |
|
1259 |
Local<Object> obj, |
||
1260 |
int errorno, |
||
1261 |
const char* err_string, |
||
1262 |
const char* syscall, |
||
1263 |
const char* message, |
||
1264 |
const char* path, |
||
1265 |
const char* dest) { |
||
1266 |
28 |
obj->Set(env->context(), |
|
1267 |
env->errno_string(), |
||
1268 |
112 |
Integer::New(env->isolate(), errorno)).Check(); |
|
1269 |
|||
1270 |
28 |
obj->Set(env->context(), env->code_string(), |
|
1271 |
84 |
OneByteString(env->isolate(), err_string)).Check(); |
|
1272 |
|||
1273 |
✓✗ | 28 |
if (message != nullptr) { |
1274 |
28 |
obj->Set(env->context(), env->message_string(), |
|
1275 |
112 |
OneByteString(env->isolate(), message)).Check(); |
|
1276 |
} |
||
1277 |
|||
1278 |
Local<Value> path_buffer; |
||
1279 |
✗✓ | 28 |
if (path != nullptr) { |
1280 |
path_buffer = |
||
1281 |
Buffer::Copy(env->isolate(), path, strlen(path)).ToLocalChecked(); |
||
1282 |
obj->Set(env->context(), env->path_string(), path_buffer).Check(); |
||
1283 |
} |
||
1284 |
|||
1285 |
Local<Value> dest_buffer; |
||
1286 |
✗✓ | 28 |
if (dest != nullptr) { |
1287 |
dest_buffer = |
||
1288 |
Buffer::Copy(env->isolate(), dest, strlen(dest)).ToLocalChecked(); |
||
1289 |
obj->Set(env->context(), env->dest_string(), dest_buffer).Check(); |
||
1290 |
} |
||
1291 |
|||
1292 |
✓✗ | 28 |
if (syscall != nullptr) { |
1293 |
28 |
obj->Set(env->context(), env->syscall_string(), |
|
1294 |
112 |
OneByteString(env->isolate(), syscall)).Check(); |
|
1295 |
} |
||
1296 |
28 |
} |
|
1297 |
|||
1298 |
28 |
void Environment::CollectUVExceptionInfo(Local<Value> object, |
|
1299 |
int errorno, |
||
1300 |
const char* syscall, |
||
1301 |
const char* message, |
||
1302 |
const char* path, |
||
1303 |
const char* dest) { |
||
1304 |
✓✗✗✓ ✗✓ |
28 |
if (!object->IsObject() || errorno == 0) |
1305 |
return; |
||
1306 |
|||
1307 |
28 |
Local<Object> obj = object.As<Object>(); |
|
1308 |
28 |
const char* err_string = uv_err_name(errorno); |
|
1309 |
|||
1310 |
✗✓✗✗ |
28 |
if (message == nullptr || message[0] == '\0') { |
1311 |
28 |
message = uv_strerror(errorno); |
|
1312 |
} |
||
1313 |
|||
1314 |
28 |
node::CollectExceptionInfo(this, obj, errorno, err_string, |
|
1315 |
syscall, message, path, dest); |
||
1316 |
} |
||
1317 |
|||
1318 |
6309 |
ImmediateInfo::ImmediateInfo(Isolate* isolate, const SerializeInfo* info) |
|
1319 |
✓✓ | 6309 |
: fields_(isolate, kFieldsCount, MAYBE_FIELD_PTR(info, fields)) {} |
1320 |
|||
1321 |
6 |
ImmediateInfo::SerializeInfo ImmediateInfo::Serialize( |
|
1322 |
Local<Context> context, SnapshotCreator* creator) { |
||
1323 |
6 |
return {fields_.Serialize(context, creator)}; |
|
1324 |
} |
||
1325 |
|||
1326 |
5521 |
void ImmediateInfo::Deserialize(Local<Context> context) { |
|
1327 |
5521 |
fields_.Deserialize(context); |
|
1328 |
5521 |
} |
|
1329 |
|||
1330 |
6 |
std::ostream& operator<<(std::ostream& output, |
|
1331 |
const ImmediateInfo::SerializeInfo& i) { |
||
1332 |
6 |
output << "{ " << i.fields << " }"; |
|
1333 |
6 |
return output; |
|
1334 |
} |
||
1335 |
|||
1336 |
25 |
void ImmediateInfo::MemoryInfo(MemoryTracker* tracker) const { |
|
1337 |
25 |
tracker->TrackField("fields", fields_); |
|
1338 |
25 |
} |
|
1339 |
|||
1340 |
6 |
TickInfo::SerializeInfo TickInfo::Serialize(Local<Context> context, |
|
1341 |
SnapshotCreator* creator) { |
||
1342 |
6 |
return {fields_.Serialize(context, creator)}; |
|
1343 |
} |
||
1344 |
|||
1345 |
5521 |
void TickInfo::Deserialize(Local<Context> context) { |
|
1346 |
5521 |
fields_.Deserialize(context); |
|
1347 |
5521 |
} |
|
1348 |
|||
1349 |
6 |
std::ostream& operator<<(std::ostream& output, |
|
1350 |
const TickInfo::SerializeInfo& i) { |
||
1351 |
6 |
output << "{ " << i.fields << " }"; |
|
1352 |
6 |
return output; |
|
1353 |
} |
||
1354 |
|||
1355 |
25 |
void TickInfo::MemoryInfo(MemoryTracker* tracker) const { |
|
1356 |
25 |
tracker->TrackField("fields", fields_); |
|
1357 |
25 |
} |
|
1358 |
|||
1359 |
6309 |
TickInfo::TickInfo(Isolate* isolate, const SerializeInfo* info) |
|
1360 |
: fields_( |
||
1361 |
✓✓ | 6309 |
isolate, kFieldsCount, info == nullptr ? nullptr : &(info->fields)) {} |
1362 |
|||
1363 |
6309 |
AsyncHooks::AsyncHooks(Isolate* isolate, const SerializeInfo* info) |
|
1364 |
: async_ids_stack_(isolate, 16 * 2, MAYBE_FIELD_PTR(info, async_ids_stack)), |
||
1365 |
fields_(isolate, kFieldsCount, MAYBE_FIELD_PTR(info, fields)), |
||
1366 |
async_id_fields_( |
||
1367 |
isolate, kUidFieldsCount, MAYBE_FIELD_PTR(info, async_id_fields)), |
||
1368 |
✓✓✓✓ ✓✓ |
6309 |
info_(info) { |
1369 |
12618 |
HandleScope handle_scope(isolate); |
|
1370 |
✓✓ | 6309 |
if (info == nullptr) { |
1371 |
788 |
clear_async_id_stack(); |
|
1372 |
|||
1373 |
// Always perform async_hooks checks, not just when async_hooks is enabled. |
||
1374 |
// TODO(AndreasMadsen): Consider removing this for LTS releases. |
||
1375 |
// See discussion in https://github.com/nodejs/node/pull/15454 |
||
1376 |
// When removing this, do it by reverting the commit. Otherwise the test |
||
1377 |
// and flag changes won't be included. |
||
1378 |
788 |
fields_[kCheck] = 1; |
|
1379 |
|||
1380 |
// kDefaultTriggerAsyncId should be -1, this indicates that there is no |
||
1381 |
// specified default value and it should fallback to the executionAsyncId. |
||
1382 |
// 0 is not used as the magic value, because that indicates a missing |
||
1383 |
// context which is different from a default context. |
||
1384 |
788 |
async_id_fields_[AsyncHooks::kDefaultTriggerAsyncId] = -1; |
|
1385 |
|||
1386 |
// kAsyncIdCounter should start at 1 because that'll be the id the execution |
||
1387 |
// context during bootstrap (code that runs before entering uv_run()). |
||
1388 |
788 |
async_id_fields_[AsyncHooks::kAsyncIdCounter] = 1; |
|
1389 |
} |
||
1390 |
6309 |
} |
|
1391 |
|||
1392 |
5521 |
void AsyncHooks::Deserialize(Local<Context> context) { |
|
1393 |
5521 |
async_ids_stack_.Deserialize(context); |
|
1394 |
5521 |
fields_.Deserialize(context); |
|
1395 |
5521 |
async_id_fields_.Deserialize(context); |
|
1396 |
|||
1397 |
Local<Array> js_execution_async_resources; |
||
1398 |
✓✗ | 5521 |
if (info_->js_execution_async_resources != 0) { |
1399 |
js_execution_async_resources = |
||
1400 |
5521 |
context->GetDataFromSnapshotOnce<Array>( |
|
1401 |
✗✓ | 16563 |
info_->js_execution_async_resources).ToLocalChecked(); |
1402 |
} else { |
||
1403 |
js_execution_async_resources = Array::New(context->GetIsolate()); |
||
1404 |
} |
||
1405 |
5521 |
js_execution_async_resources_.Reset( |
|
1406 |
context->GetIsolate(), js_execution_async_resources); |
||
1407 |
|||
1408 |
// The native_execution_async_resources_ field requires v8::Local<> instances |
||
1409 |
// for async calls whose resources were on the stack as JS objects when they |
||
1410 |
// were entered. We cannot recreate this here; however, storing these values |
||
1411 |
// on the JS equivalent gives the same result, so we do that instead. |
||
1412 |
✗✓ | 5521 |
for (size_t i = 0; i < info_->native_execution_async_resources.size(); ++i) { |
1413 |
if (info_->native_execution_async_resources[i] == SIZE_MAX) |
||
1414 |
continue; |
||
1415 |
Local<Object> obj = context->GetDataFromSnapshotOnce<Object>( |
||
1416 |
info_->native_execution_async_resources[i]) |
||
1417 |
.ToLocalChecked(); |
||
1418 |
js_execution_async_resources->Set(context, i, obj).Check(); |
||
1419 |
} |
||
1420 |
5521 |
info_ = nullptr; |
|
1421 |
5521 |
} |
|
1422 |
|||
1423 |
6 |
std::ostream& operator<<(std::ostream& output, |
|
1424 |
const AsyncHooks::SerializeInfo& i) { |
||
1425 |
output << "{\n" |
||
1426 |
6 |
<< " " << i.async_ids_stack << ", // async_ids_stack\n" |
|
1427 |
6 |
<< " " << i.fields << ", // fields\n" |
|
1428 |
6 |
<< " " << i.async_id_fields << ", // async_id_fields\n" |
|
1429 |
6 |
<< " " << i.js_execution_async_resources |
|
1430 |
<< ", // js_execution_async_resources\n" |
||
1431 |
6 |
<< " " << i.native_execution_async_resources |
|
1432 |
<< ", // native_execution_async_resources\n" |
||
1433 |
6 |
<< "}"; |
|
1434 |
6 |
return output; |
|
1435 |
} |
||
1436 |
|||
1437 |
6 |
AsyncHooks::SerializeInfo AsyncHooks::Serialize(Local<Context> context, |
|
1438 |
SnapshotCreator* creator) { |
||
1439 |
6 |
SerializeInfo info; |
|
1440 |
// TODO(joyeecheung): some of these probably don't need to be serialized. |
||
1441 |
6 |
info.async_ids_stack = async_ids_stack_.Serialize(context, creator); |
|
1442 |
6 |
info.fields = fields_.Serialize(context, creator); |
|
1443 |
6 |
info.async_id_fields = async_id_fields_.Serialize(context, creator); |
|
1444 |
✓✗ | 6 |
if (!js_execution_async_resources_.IsEmpty()) { |
1445 |
6 |
info.js_execution_async_resources = creator->AddData( |
|
1446 |
context, js_execution_async_resources_.Get(context->GetIsolate())); |
||
1447 |
✗✓ | 6 |
CHECK_NE(info.js_execution_async_resources, 0); |
1448 |
} else { |
||
1449 |
info.js_execution_async_resources = 0; |
||
1450 |
} |
||
1451 |
|||
1452 |
6 |
info.native_execution_async_resources.resize( |
|
1453 |
native_execution_async_resources_.size()); |
||
1454 |
✗✓ | 6 |
for (size_t i = 0; i < native_execution_async_resources_.size(); i++) { |
1455 |
info.native_execution_async_resources[i] = |
||
1456 |
native_execution_async_resources_[i].IsEmpty() ? SIZE_MAX : |
||
1457 |
creator->AddData( |
||
1458 |
context, |
||
1459 |
native_execution_async_resources_[i]); |
||
1460 |
} |
||
1461 |
✗✓ | 6 |
CHECK_EQ(contexts_.size(), 1); |
1462 |
✗✓✗✓ |
12 |
CHECK_EQ(contexts_[0], env()->context()); |
1463 |
✗✓ | 6 |
CHECK(js_promise_hooks_[0].IsEmpty()); |
1464 |
✗✓ | 6 |
CHECK(js_promise_hooks_[1].IsEmpty()); |
1465 |
✗✓ | 6 |
CHECK(js_promise_hooks_[2].IsEmpty()); |
1466 |
✗✓ | 6 |
CHECK(js_promise_hooks_[3].IsEmpty()); |
1467 |
|||
1468 |
6 |
return info; |
|
1469 |
} |
||
1470 |
|||
1471 |
25 |
void AsyncHooks::MemoryInfo(MemoryTracker* tracker) const { |
|
1472 |
25 |
tracker->TrackField("async_ids_stack", async_ids_stack_); |
|
1473 |
25 |
tracker->TrackField("fields", fields_); |
|
1474 |
25 |
tracker->TrackField("async_id_fields", async_id_fields_); |
|
1475 |
25 |
tracker->TrackField("js_promise_hooks", js_promise_hooks_); |
|
1476 |
25 |
} |
|
1477 |
|||
1478 |
4 |
void AsyncHooks::grow_async_ids_stack() { |
|
1479 |
4 |
async_ids_stack_.reserve(async_ids_stack_.Length() * 3); |
|
1480 |
|||
1481 |
4 |
env()->async_hooks_binding()->Set( |
|
1482 |
env()->context(), |
||
1483 |
env()->async_ids_stack_string(), |
||
1484 |
12 |
async_ids_stack_.GetJSArray()).Check(); |
|
1485 |
4 |
} |
|
1486 |
|||
1487 |
4 |
void AsyncHooks::FailWithCorruptedAsyncStack(double expected_async_id) { |
|
1488 |
4 |
fprintf(stderr, |
|
1489 |
"Error: async hook stack has become corrupted (" |
||
1490 |
"actual: %.f, expected: %.f)\n", |
||
1491 |
async_id_fields_.GetValue(kExecutionAsyncId), |
||
1492 |
expected_async_id); |
||
1493 |
4 |
DumpBacktrace(stderr); |
|
1494 |
4 |
fflush(stderr); |
|
1495 |
✓✗ | 4 |
if (!env()->abort_on_uncaught_exception()) |
1496 |
4 |
exit(1); |
|
1497 |
fprintf(stderr, "\n"); |
||
1498 |
fflush(stderr); |
||
1499 |
ABORT_NO_BACKTRACE(); |
||
1500 |
} |
||
1501 |
|||
1502 |
720 |
void Environment::Exit(int exit_code) { |
|
1503 |
✓✓ | 720 |
if (options()->trace_exit) { |
1504 |
4 |
HandleScope handle_scope(isolate()); |
|
1505 |
Isolate::DisallowJavascriptExecutionScope disallow_js( |
||
1506 |
4 |
isolate(), Isolate::DisallowJavascriptExecutionScope::CRASH_ON_FAILURE); |
|
1507 |
|||
1508 |
✓✓ | 2 |
if (is_main_thread()) { |
1509 |
1 |
fprintf(stderr, "(node:%d) ", uv_os_getpid()); |
|
1510 |
} else { |
||
1511 |
1 |
fprintf(stderr, "(node:%d, thread:%" PRIu64 ") ", |
|
1512 |
uv_os_getpid(), thread_id()); |
||
1513 |
} |
||
1514 |
|||
1515 |
2 |
fprintf( |
|
1516 |
stderr, "WARNING: Exited the environment with code %d\n", exit_code); |
||
1517 |
2 |
PrintStackTrace(isolate(), |
|
1518 |
StackTrace::CurrentStackTrace( |
||
1519 |
isolate(), stack_trace_limit(), StackTrace::kDetailed)); |
||
1520 |
} |
||
1521 |
720 |
process_exit_handler_(this, exit_code); |
|
1522 |
63 |
} |
|
1523 |
|||
1524 |
6329 |
void Environment::stop_sub_worker_contexts() { |
|
1525 |
DCHECK_EQ(Isolate::GetCurrent(), isolate()); |
||
1526 |
|||
1527 |
✓✓ | 6329 |
while (!sub_worker_contexts_.empty()) { |
1528 |
27 |
Worker* w = *sub_worker_contexts_.begin(); |
|
1529 |
27 |
remove_sub_worker_context(w); |
|
1530 |
27 |
w->Exit(1); |
|
1531 |
27 |
w->JoinThread(); |
|
1532 |
} |
||
1533 |
6302 |
} |
|
1534 |
|||
1535 |
10 |
Environment* Environment::worker_parent_env() const { |
|
1536 |
✓✗ | 10 |
if (worker_context() == nullptr) return nullptr; |
1537 |
return worker_context()->env(); |
||
1538 |
} |
||
1539 |
|||
1540 |
68976 |
void Environment::AddUnmanagedFd(int fd) { |
|
1541 |
✓✓ | 68976 |
if (!tracks_unmanaged_fds()) return; |
1542 |
2725 |
auto result = unmanaged_fds_.insert(fd); |
|
1543 |
✓✓ | 2725 |
if (!result.second) { |
1544 |
ProcessEmitWarning( |
||
1545 |
1 |
this, "File descriptor %d opened in unmanaged mode twice", fd); |
|
1546 |
} |
||
1547 |
} |
||
1548 |
|||
1549 |
68587 |
void Environment::RemoveUnmanagedFd(int fd) { |
|
1550 |
✓✓ | 68587 |
if (!tracks_unmanaged_fds()) return; |
1551 |
2722 |
size_t removed_count = unmanaged_fds_.erase(fd); |
|
1552 |
✓✓ | 2722 |
if (removed_count == 0) { |
1553 |
ProcessEmitWarning( |
||
1554 |
1 |
this, "File descriptor %d closed but not opened in unmanaged mode", fd); |
|
1555 |
} |
||
1556 |
} |
||
1557 |
|||
1558 |
5257 |
void Environment::PrintInfoForSnapshotIfDebug() { |
|
1559 |
✗✓ | 10514 |
if (enabled_debug_list()->enabled(DebugCategory::MKSNAPSHOT)) { |
1560 |
fprintf(stderr, "BaseObjects at the exit of the Environment:\n"); |
||
1561 |
PrintAllBaseObjects(); |
||
1562 |
fprintf(stderr, "\nNative modules without cache:\n"); |
||
1563 |
for (const auto& s : builtins_without_cache) { |
||
1564 |
fprintf(stderr, "%s\n", s.c_str()); |
||
1565 |
} |
||
1566 |
fprintf(stderr, "\nNative modules with cache:\n"); |
||
1567 |
for (const auto& s : builtins_with_cache) { |
||
1568 |
fprintf(stderr, "%s\n", s.c_str()); |
||
1569 |
} |
||
1570 |
fprintf(stderr, "\nStatic bindings (need to be registered):\n"); |
||
1571 |
for (const auto mod : internal_bindings) { |
||
1572 |
fprintf(stderr, "%s:%s\n", mod->nm_filename, mod->nm_modname); |
||
1573 |
} |
||
1574 |
} |
||
1575 |
5257 |
} |
|
1576 |
|||
1577 |
void Environment::PrintAllBaseObjects() { |
||
1578 |
size_t i = 0; |
||
1579 |
std::cout << "BaseObjects\n"; |
||
1580 |
ForEachBaseObject([&](BaseObject* obj) { |
||
1581 |
std::cout << "#" << i++ << " " << obj << ": " << |
||
1582 |
obj->MemoryInfoName() << "\n"; |
||
1583 |
}); |
||
1584 |
} |
||
1585 |
|||
1586 |
5257 |
void Environment::VerifyNoStrongBaseObjects() { |
|
1587 |
// When a process exits cleanly, i.e. because the event loop ends up without |
||
1588 |
// things to wait for, the Node.js objects that are left on the heap should |
||
1589 |
// be: |
||
1590 |
// |
||
1591 |
// 1. weak, i.e. ready for garbage collection once no longer referenced, or |
||
1592 |
// 2. detached, i.e. scheduled for destruction once no longer referenced, or |
||
1593 |
// 3. an unrefed libuv handle, i.e. does not keep the event loop alive, or |
||
1594 |
// 4. an inactive libuv handle (essentially the same here) |
||
1595 |
// |
||
1596 |
// There are a few exceptions to this rule, but generally, if there are |
||
1597 |
// C++-backed Node.js objects on the heap that do not fall into the above |
||
1598 |
// categories, we may be looking at a potential memory leak. Most likely, |
||
1599 |
// the cause is a missing MakeWeak() call on the corresponding object. |
||
1600 |
// |
||
1601 |
// In order to avoid this kind of problem, we check the list of BaseObjects |
||
1602 |
// for these criteria. Currently, we only do so when explicitly instructed to |
||
1603 |
// or when in debug mode (where --verify-base-objects is always-on). |
||
1604 |
|||
1605 |
✓✗ | 5257 |
if (!options()->verify_base_objects) return; |
1606 |
|||
1607 |
ForEachBaseObject([](BaseObject* obj) { |
||
1608 |
if (obj->IsNotIndicativeOfMemoryLeakAtExit()) return; |
||
1609 |
fprintf(stderr, "Found bad BaseObject during clean exit: %s\n", |
||
1610 |
obj->MemoryInfoName().c_str()); |
||
1611 |
fflush(stderr); |
||
1612 |
ABORT(); |
||
1613 |
}); |
||
1614 |
} |
||
1615 |
|||
1616 |
6 |
EnvSerializeInfo Environment::Serialize(SnapshotCreator* creator) { |
|
1617 |
6 |
EnvSerializeInfo info; |
|
1618 |
6 |
Local<Context> ctx = context(); |
|
1619 |
|||
1620 |
// Currently all modules are compiled without cache in builtin snapshot |
||
1621 |
// builder. |
||
1622 |
12 |
info.builtins = std::vector<std::string>(builtins_without_cache.begin(), |
|
1623 |
6 |
builtins_without_cache.end()); |
|
1624 |
|||
1625 |
6 |
info.async_hooks = async_hooks_.Serialize(ctx, creator); |
|
1626 |
6 |
info.immediate_info = immediate_info_.Serialize(ctx, creator); |
|
1627 |
6 |
info.tick_info = tick_info_.Serialize(ctx, creator); |
|
1628 |
6 |
info.performance_state = performance_state_->Serialize(ctx, creator); |
|
1629 |
6 |
info.exiting = exiting_.Serialize(ctx, creator); |
|
1630 |
6 |
info.stream_base_state = stream_base_state_.Serialize(ctx, creator); |
|
1631 |
6 |
info.should_abort_on_uncaught_toggle = |
|
1632 |
6 |
should_abort_on_uncaught_toggle_.Serialize(ctx, creator); |
|
1633 |
|||
1634 |
// Do this after other creator->AddData() calls so that Snapshotable objects |
||
1635 |
// can use 0 to indicate that a SnapshotIndex is invalid. |
||
1636 |
6 |
SerializeSnapshotableObjects(this, creator, &info); |
|
1637 |
|||
1638 |
6 |
info.principal_realm = principal_realm_->Serialize(creator); |
|
1639 |
6 |
return info; |
|
1640 |
} |
||
1641 |
|||
1642 |
22084 |
void Environment::EnqueueDeserializeRequest(DeserializeRequestCallback cb, |
|
1643 |
Local<Object> holder, |
||
1644 |
int index, |
||
1645 |
InternalFieldInfoBase* info) { |
||
1646 |
DCHECK_EQ(index, BaseObject::kEmbedderType); |
||
1647 |
44168 |
DeserializeRequest request{cb, {isolate(), holder}, index, info}; |
|
1648 |
22084 |
deserialize_requests_.push_back(std::move(request)); |
|
1649 |
22084 |
} |
|
1650 |
|||
1651 |
5521 |
void Environment::RunDeserializeRequests() { |
|
1652 |
11042 |
HandleScope scope(isolate()); |
|
1653 |
5521 |
Local<Context> ctx = context(); |
|
1654 |
5521 |
Isolate* is = isolate(); |
|
1655 |
✓✓ | 27605 |
while (!deserialize_requests_.empty()) { |
1656 |
44168 |
DeserializeRequest request(std::move(deserialize_requests_.front())); |
|
1657 |
22084 |
deserialize_requests_.pop_front(); |
|
1658 |
22084 |
Local<Object> holder = request.holder.Get(is); |
|
1659 |
22084 |
request.cb(ctx, holder, request.index, request.info); |
|
1660 |
request.holder.Reset(); |
||
1661 |
22084 |
request.info->Delete(); |
|
1662 |
} |
||
1663 |
5521 |
} |
|
1664 |
|||
1665 |
5521 |
void Environment::DeserializeProperties(const EnvSerializeInfo* info) { |
|
1666 |
5521 |
Local<Context> ctx = context(); |
|
1667 |
|||
1668 |
5521 |
RunDeserializeRequests(); |
|
1669 |
|||
1670 |
5521 |
builtins_in_snapshot = info->builtins; |
|
1671 |
5521 |
async_hooks_.Deserialize(ctx); |
|
1672 |
5521 |
immediate_info_.Deserialize(ctx); |
|
1673 |
5521 |
tick_info_.Deserialize(ctx); |
|
1674 |
5521 |
performance_state_->Deserialize(ctx); |
|
1675 |
5521 |
exiting_.Deserialize(ctx); |
|
1676 |
5521 |
stream_base_state_.Deserialize(ctx); |
|
1677 |
5521 |
should_abort_on_uncaught_toggle_.Deserialize(ctx); |
|
1678 |
|||
1679 |
5521 |
principal_realm_->DeserializeProperties(&info->principal_realm); |
|
1680 |
|||
1681 |
✗✓ | 5521 |
if (enabled_debug_list_.enabled(DebugCategory::MKSNAPSHOT)) { |
1682 |
fprintf(stderr, "deserializing...\n"); |
||
1683 |
std::cerr << *info << "\n"; |
||
1684 |
} |
||
1685 |
5521 |
} |
|
1686 |
|||
1687 |
4 |
uint64_t GuessMemoryAvailableToTheProcess() { |
|
1688 |
4 |
uint64_t free_in_system = uv_get_free_memory(); |
|
1689 |
4 |
size_t allowed = uv_get_constrained_memory(); |
|
1690 |
✗✓ | 4 |
if (allowed == 0) { |
1691 |
return free_in_system; |
||
1692 |
} |
||
1693 |
size_t rss; |
||
1694 |
4 |
int err = uv_resident_set_memory(&rss); |
|
1695 |
✗✓ | 4 |
if (err) { |
1696 |
return free_in_system; |
||
1697 |
} |
||
1698 |
✗✓ | 4 |
if (allowed < rss) { |
1699 |
// Something is probably wrong. Fallback to the free memory. |
||
1700 |
return free_in_system; |
||
1701 |
} |
||
1702 |
// There may still be room for swap, but we will just leave it here. |
||
1703 |
4 |
return allowed - rss; |
|
1704 |
} |
||
1705 |
|||
1706 |
25 |
void Environment::BuildEmbedderGraph(Isolate* isolate, |
|
1707 |
EmbedderGraph* graph, |
||
1708 |
void* data) { |
||
1709 |
50 |
MemoryTracker tracker(isolate, graph); |
|
1710 |
25 |
Environment* env = static_cast<Environment*>(data); |
|
1711 |
25 |
tracker.Track(env); |
|
1712 |
25 |
} |
|
1713 |
|||
1714 |
4 |
size_t Environment::NearHeapLimitCallback(void* data, |
|
1715 |
size_t current_heap_limit, |
||
1716 |
size_t initial_heap_limit) { |
||
1717 |
4 |
Environment* env = static_cast<Environment*>(data); |
|
1718 |
|||
1719 |
Debug(env, |
||
1720 |
DebugCategory::DIAGNOSTICS, |
||
1721 |
"Invoked NearHeapLimitCallback, processing=%d, " |
||
1722 |
"current_limit=%" PRIu64 ", " |
||
1723 |
"initial_limit=%" PRIu64 "\n", |
||
1724 |
4 |
env->is_in_heapsnapshot_heap_limit_callback_, |
|
1725 |
8 |
static_cast<uint64_t>(current_heap_limit), |
|
1726 |
4 |
static_cast<uint64_t>(initial_heap_limit)); |
|
1727 |
|||
1728 |
4 |
size_t max_young_gen_size = env->isolate_data()->max_young_gen_size; |
|
1729 |
4 |
size_t young_gen_size = 0; |
|
1730 |
4 |
size_t old_gen_size = 0; |
|
1731 |
|||
1732 |
4 |
HeapSpaceStatistics stats; |
|
1733 |
4 |
size_t num_heap_spaces = env->isolate()->NumberOfHeapSpaces(); |
|
1734 |
✓✓ | 36 |
for (size_t i = 0; i < num_heap_spaces; ++i) { |
1735 |
32 |
env->isolate()->GetHeapSpaceStatistics(&stats, i); |
|
1736 |
✓✓✓✓ |
60 |
if (strcmp(stats.space_name(), "new_space") == 0 || |
1737 |
✓✓ | 28 |
strcmp(stats.space_name(), "new_large_object_space") == 0) { |
1738 |
8 |
young_gen_size += stats.space_used_size(); |
|
1739 |
} else { |
||
1740 |
24 |
old_gen_size += stats.space_used_size(); |
|
1741 |
} |
||
1742 |
} |
||
1743 |
|||
1744 |
Debug(env, |
||
1745 |
DebugCategory::DIAGNOSTICS, |
||
1746 |
"max_young_gen_size=%" PRIu64 ", " |
||
1747 |
"young_gen_size=%" PRIu64 ", " |
||
1748 |
"old_gen_size=%" PRIu64 ", " |
||
1749 |
"total_size=%" PRIu64 "\n", |
||
1750 |
8 |
static_cast<uint64_t>(max_young_gen_size), |
|
1751 |
8 |
static_cast<uint64_t>(young_gen_size), |
|
1752 |
8 |
static_cast<uint64_t>(old_gen_size), |
|
1753 |
4 |
static_cast<uint64_t>(young_gen_size + old_gen_size)); |
|
1754 |
|||
1755 |
4 |
uint64_t available = GuessMemoryAvailableToTheProcess(); |
|
1756 |
// TODO(joyeecheung): get a better estimate about the native memory |
||
1757 |
// usage into the overhead, e.g. based on the count of objects. |
||
1758 |
4 |
uint64_t estimated_overhead = max_young_gen_size; |
|
1759 |
Debug(env, |
||
1760 |
DebugCategory::DIAGNOSTICS, |
||
1761 |
"Estimated available memory=%" PRIu64 ", " |
||
1762 |
"estimated overhead=%" PRIu64 "\n", |
||
1763 |
8 |
static_cast<uint64_t>(available), |
|
1764 |
4 |
static_cast<uint64_t>(estimated_overhead)); |
|
1765 |
|||
1766 |
// This might be hit when the snapshot is being taken in another |
||
1767 |
// NearHeapLimitCallback invocation. |
||
1768 |
// When taking the snapshot, objects in the young generation may be |
||
1769 |
// promoted to the old generation, result in increased heap usage, |
||
1770 |
// but it should be no more than the young generation size. |
||
1771 |
// Ideally, this should be as small as possible - the heap limit |
||
1772 |
// can only be restored when the heap usage falls down below the |
||
1773 |
// new limit, so in a heap with unbounded growth the isolate |
||
1774 |
// may eventually crash with this new limit - effectively raising |
||
1775 |
// the heap limit to the new one. |
||
1776 |
4 |
size_t new_limit = current_heap_limit + max_young_gen_size; |
|
1777 |
✓✓ | 4 |
if (env->is_in_heapsnapshot_heap_limit_callback_) { |
1778 |
Debug(env, |
||
1779 |
DebugCategory::DIAGNOSTICS, |
||
1780 |
"Not generating snapshots in nested callback. " |
||
1781 |
"new_limit=%" PRIu64 "\n", |
||
1782 |
2 |
static_cast<uint64_t>(new_limit)); |
|
1783 |
2 |
return new_limit; |
|
1784 |
} |
||
1785 |
|||
1786 |
// Estimate whether the snapshot is going to use up all the memory |
||
1787 |
// available to the process. If so, just give up to prevent the system |
||
1788 |
// from killing the process for a system OOM. |
||
1789 |
✗✓ | 2 |
if (estimated_overhead > available) { |
1790 |
Debug(env, |
||
1791 |
DebugCategory::DIAGNOSTICS, |
||
1792 |
"Not generating snapshots because it's too risky.\n"); |
||
1793 |
env->RemoveHeapSnapshotNearHeapLimitCallback(0); |
||
1794 |
// The new limit must be higher than current_heap_limit or V8 might |
||
1795 |
// crash. |
||
1796 |
return new_limit; |
||
1797 |
} |
||
1798 |
|||
1799 |
// Take the snapshot synchronously. |
||
1800 |
2 |
env->is_in_heapsnapshot_heap_limit_callback_ = true; |
|
1801 |
|||
1802 |
4 |
std::string dir = env->options()->diagnostic_dir; |
|
1803 |
✓✗ | 2 |
if (dir.empty()) { |
1804 |
2 |
dir = env->GetCwd(); |
|
1805 |
} |
||
1806 |
4 |
DiagnosticFilename name(env, "Heap", "heapsnapshot"); |
|
1807 |
2 |
std::string filename = dir + kPathSeparator + (*name); |
|
1808 |
|||
1809 |
2 |
Debug(env, DebugCategory::DIAGNOSTICS, "Start generating %s...\n", *name); |
|
1810 |
|||
1811 |
2 |
heap::WriteSnapshot(env, filename.c_str()); |
|
1812 |
2 |
env->heap_limit_snapshot_taken_ += 1; |
|
1813 |
|||
1814 |
Debug(env, |
||
1815 |
DebugCategory::DIAGNOSTICS, |
||
1816 |
"%" PRIu32 "/%" PRIu32 " snapshots taken.\n", |
||
1817 |
2 |
env->heap_limit_snapshot_taken_, |
|
1818 |
2 |
env->heap_snapshot_near_heap_limit_); |
|
1819 |
|||
1820 |
// Don't take more snapshots than the limit specified. |
||
1821 |
✓✗ | 2 |
if (env->heap_limit_snapshot_taken_ == env->heap_snapshot_near_heap_limit_) { |
1822 |
Debug(env, |
||
1823 |
DebugCategory::DIAGNOSTICS, |
||
1824 |
"Removing the near heap limit callback"); |
||
1825 |
2 |
env->RemoveHeapSnapshotNearHeapLimitCallback(0); |
|
1826 |
} |
||
1827 |
|||
1828 |
2 |
FPrintF(stderr, "Wrote snapshot to %s\n", filename.c_str()); |
|
1829 |
// Tell V8 to reset the heap limit once the heap usage falls down to |
||
1830 |
// 95% of the initial limit. |
||
1831 |
2 |
env->isolate()->AutomaticallyRestoreInitialHeapLimit(0.95); |
|
1832 |
|||
1833 |
2 |
env->is_in_heapsnapshot_heap_limit_callback_ = false; |
|
1834 |
|||
1835 |
// The new limit must be higher than current_heap_limit or V8 might |
||
1836 |
// crash. |
||
1837 |
2 |
return new_limit; |
|
1838 |
} |
||
1839 |
|||
1840 |
25 |
inline size_t Environment::SelfSize() const { |
|
1841 |
25 |
size_t size = sizeof(*this); |
|
1842 |
// Remove non pointer fields that will be tracked in MemoryInfo() |
||
1843 |
// TODO(joyeecheung): refactor the MemoryTracker interface so |
||
1844 |
// this can be done for common types within the Track* calls automatically |
||
1845 |
// if a certain scope is entered. |
||
1846 |
25 |
size -= sizeof(async_hooks_); |
|
1847 |
25 |
size -= sizeof(cleanup_queue_); |
|
1848 |
25 |
size -= sizeof(tick_info_); |
|
1849 |
25 |
size -= sizeof(immediate_info_); |
|
1850 |
25 |
return size; |
|
1851 |
} |
||
1852 |
|||
1853 |
25 |
void Environment::MemoryInfo(MemoryTracker* tracker) const { |
|
1854 |
// Iteratable STLs have their own sizes subtracted from the parent |
||
1855 |
// by default. |
||
1856 |
25 |
tracker->TrackField("isolate_data", isolate_data_); |
|
1857 |
25 |
tracker->TrackField("builtins_with_cache", builtins_with_cache); |
|
1858 |
25 |
tracker->TrackField("builtins_without_cache", builtins_without_cache); |
|
1859 |
25 |
tracker->TrackField("destroy_async_id_list", destroy_async_id_list_); |
|
1860 |
25 |
tracker->TrackField("exec_argv", exec_argv_); |
|
1861 |
25 |
tracker->TrackField("exiting", exiting_); |
|
1862 |
25 |
tracker->TrackField("should_abort_on_uncaught_toggle", |
|
1863 |
25 |
should_abort_on_uncaught_toggle_); |
|
1864 |
25 |
tracker->TrackField("stream_base_state", stream_base_state_); |
|
1865 |
25 |
tracker->TrackField("cleanup_queue", cleanup_queue_); |
|
1866 |
25 |
tracker->TrackField("async_hooks", async_hooks_); |
|
1867 |
25 |
tracker->TrackField("immediate_info", immediate_info_); |
|
1868 |
25 |
tracker->TrackField("tick_info", tick_info_); |
|
1869 |
25 |
tracker->TrackField("principal_realm", principal_realm_); |
|
1870 |
|||
1871 |
// FIXME(joyeecheung): track other fields in Environment. |
||
1872 |
// Currently MemoryTracker is unable to track these |
||
1873 |
// correctly: |
||
1874 |
// - Internal types that do not implement MemoryRetainer yet |
||
1875 |
// - STL containers with MemoryRetainer* inside |
||
1876 |
// - STL containers with numeric types inside that should not have their |
||
1877 |
// nodes elided e.g. numeric keys in maps. |
||
1878 |
// We also need to make sure that when we add a non-pointer field as its own |
||
1879 |
// node, we shift its sizeof() size out of the Environment node. |
||
1880 |
25 |
} |
|
1881 |
|||
1882 |
767702 |
void Environment::RunWeakRefCleanup() { |
|
1883 |
767702 |
isolate()->ClearKeptObjects(); |
|
1884 |
767702 |
} |
|
1885 |
|||
1886 |
// Not really any better place than env.cc at this moment. |
||
1887 |
1458307 |
BaseObject::BaseObject(Environment* env, Local<Object> object) |
|
1888 |
2916614 |
: persistent_handle_(env->isolate(), object), env_(env) { |
|
1889 |
✗✓ | 1458307 |
CHECK_EQ(false, object.IsEmpty()); |
1890 |
✗✓ | 1458307 |
CHECK_GE(object->InternalFieldCount(), BaseObject::kInternalFieldCount); |
1891 |
1458307 |
object->SetAlignedPointerInInternalField(BaseObject::kEmbedderType, |
|
1892 |
&kNodeEmbedderId); |
||
1893 |
1458307 |
object->SetAlignedPointerInInternalField(BaseObject::kSlot, |
|
1894 |
static_cast<void*>(this)); |
||
1895 |
1458307 |
env->AddCleanupHook(DeleteMe, static_cast<void*>(this)); |
|
1896 |
1458307 |
env->modify_base_object_count(1); |
|
1897 |
1458307 |
} |
|
1898 |
|||
1899 |
✓✓✓✓ |
7924286 |
BaseObject::~BaseObject() { |
1900 |
2885706 |
env()->modify_base_object_count(-1); |
|
1901 |
2885706 |
env()->RemoveCleanupHook(DeleteMe, static_cast<void*>(this)); |
|
1902 |
|||
1903 |
✓✓ | 2885706 |
if (UNLIKELY(has_pointer_data())) { |
1904 |
392578 |
PointerData* metadata = pointer_data(); |
|
1905 |
✗✓ | 392578 |
CHECK_EQ(metadata->strong_ptr_count, 0); |
1906 |
392578 |
metadata->self = nullptr; |
|
1907 |
✓✓ | 392578 |
if (metadata->weak_ptr_count == 0) delete metadata; |
1908 |
} |
||
1909 |
|||
1910 |
✓✓ | 2885706 |
if (persistent_handle_.IsEmpty()) { |
1911 |
// This most likely happened because the weak callback below cleared it. |
||
1912 |
2152874 |
return; |
|
1913 |
} |
||
1914 |
|||
1915 |
{ |
||
1916 |
732832 |
HandleScope handle_scope(env()->isolate()); |
|
1917 |
1465664 |
object()->SetAlignedPointerInInternalField(BaseObject::kSlot, nullptr); |
|
1918 |
} |
||
1919 |
} |
||
1920 |
|||
1921 |
1321568 |
void BaseObject::MakeWeak() { |
|
1922 |
✓✓ | 1321568 |
if (has_pointer_data()) { |
1923 |
42702 |
pointer_data()->wants_weak_jsobj = true; |
|
1924 |
✓✓ | 42702 |
if (pointer_data()->strong_ptr_count > 0) return; |
1925 |
} |
||
1926 |
|||
1927 |
2643134 |
persistent_handle_.SetWeak( |
|
1928 |
this, |
||
1929 |
1076436 |
[](const WeakCallbackInfo<BaseObject>& data) { |
|
1930 |
1076436 |
BaseObject* obj = data.GetParameter(); |
|
1931 |
// Clear the persistent handle so that ~BaseObject() doesn't attempt |
||
1932 |
// to mess with internal fields, since the JS object may have |
||
1933 |
// transitioned into an invalid state. |
||
1934 |
// Refs: https://github.com/nodejs/node/issues/18897 |
||
1935 |
1076436 |
obj->persistent_handle_.Reset(); |
|
1936 |
✓✓✗✓ ✗✓ |
1076436 |
CHECK_IMPLIES(obj->has_pointer_data(), |
1937 |
obj->pointer_data()->strong_ptr_count == 0); |
||
1938 |
1076436 |
obj->OnGCCollect(); |
|
1939 |
1076436 |
}, |
|
1940 |
WeakCallbackType::kParameter); |
||
1941 |
} |
||
1942 |
|||
1943 |
// This just has to be different from the Chromium ones: |
||
1944 |
// https://source.chromium.org/chromium/chromium/src/+/main:gin/public/gin_embedders.h;l=18-23;drc=5a758a97032f0b656c3c36a3497560762495501a |
||
1945 |
// Otherwise, when Node is loaded in an isolate which uses cppgc, cppgc will |
||
1946 |
// misinterpret the data stored in the embedder fields and try to garbage |
||
1947 |
// collect them. |
||
1948 |
uint16_t kNodeEmbedderId = 0x90de; |
||
1949 |
|||
1950 |
23732 |
void BaseObject::LazilyInitializedJSTemplateConstructor( |
|
1951 |
const FunctionCallbackInfo<Value>& args) { |
||
1952 |
DCHECK(args.IsConstructCall()); |
||
1953 |
✗✓ | 23732 |
CHECK_GE(args.This()->InternalFieldCount(), BaseObject::kInternalFieldCount); |
1954 |
23732 |
args.This()->SetAlignedPointerInInternalField(BaseObject::kEmbedderType, |
|
1955 |
&kNodeEmbedderId); |
||
1956 |
23732 |
args.This()->SetAlignedPointerInInternalField(BaseObject::kSlot, nullptr); |
|
1957 |
23732 |
} |
|
1958 |
|||
1959 |
21856 |
Local<FunctionTemplate> BaseObject::MakeLazilyInitializedJSTemplate( |
|
1960 |
Environment* env) { |
||
1961 |
Local<FunctionTemplate> t = NewFunctionTemplate( |
||
1962 |
21856 |
env->isolate(), LazilyInitializedJSTemplateConstructor); |
|
1963 |
21856 |
t->Inherit(BaseObject::GetConstructorTemplate(env)); |
|
1964 |
43712 |
t->InstanceTemplate()->SetInternalFieldCount(BaseObject::kInternalFieldCount); |
|
1965 |
21856 |
return t; |
|
1966 |
} |
||
1967 |
|||
1968 |
3352935 |
BaseObject::PointerData* BaseObject::pointer_data() { |
|
1969 |
✓✓ | 3352935 |
if (!has_pointer_data()) { |
1970 |
199096 |
PointerData* metadata = new PointerData(); |
|
1971 |
199096 |
metadata->wants_weak_jsobj = persistent_handle_.IsWeak(); |
|
1972 |
199096 |
metadata->self = this; |
|
1973 |
199096 |
pointer_data_ = metadata; |
|
1974 |
} |
||
1975 |
✗✓ | 3352935 |
CHECK(has_pointer_data()); |
1976 |
3352935 |
return pointer_data_; |
|
1977 |
} |
||
1978 |
|||
1979 |
836132 |
void BaseObject::decrease_refcount() { |
|
1980 |
✗✓ | 836132 |
CHECK(has_pointer_data()); |
1981 |
836132 |
PointerData* metadata = pointer_data(); |
|
1982 |
✗✓ | 836132 |
CHECK_GT(metadata->strong_ptr_count, 0); |
1983 |
836132 |
unsigned int new_refcount = --metadata->strong_ptr_count; |
|
1984 |
✓✓ | 836132 |
if (new_refcount == 0) { |
1985 |
✓✓ | 263637 |
if (metadata->is_detached) { |
1986 |
190330 |
OnGCCollect(); |
|
1987 |
✓✓✓✗ ✓✓ |
73307 |
} else if (metadata->wants_weak_jsobj && !persistent_handle_.IsEmpty()) { |
1988 |
42701 |
MakeWeak(); |
|
1989 |
} |
||
1990 |
} |
||
1991 |
836132 |
} |
|
1992 |
|||
1993 |
839091 |
void BaseObject::increase_refcount() { |
|
1994 |
839091 |
unsigned int prev_refcount = pointer_data()->strong_ptr_count++; |
|
1995 |
✓✓✓✓ ✓✓ |
839091 |
if (prev_refcount == 0 && !persistent_handle_.IsEmpty()) |
1996 |
266429 |
persistent_handle_.ClearWeak(); |
|
1997 |
839091 |
} |
|
1998 |
|||
1999 |
166952 |
void BaseObject::DeleteMe(void* data) { |
|
2000 |
166952 |
BaseObject* self = static_cast<BaseObject*>(data); |
|
2001 |
✓✓✓✓ |
171888 |
if (self->has_pointer_data() && |
2002 |
✓✓ | 4936 |
self->pointer_data()->strong_ptr_count > 0) { |
2003 |
822 |
return self->Detach(); |
|
2004 |
} |
||
2005 |
✓✗ | 166130 |
delete self; |
2006 |
} |
||
2007 |
|||
2008 |
589 |
bool BaseObject::IsDoneInitializing() const { return true; } |
|
2009 |
|||
2010 |
649 |
Local<Object> BaseObject::WrappedObject() const { |
|
2011 |
649 |
return object(); |
|
2012 |
} |
||
2013 |
|||
2014 |
1298 |
bool BaseObject::IsRootNode() const { |
|
2015 |
2596 |
return !persistent_handle_.IsWeak(); |
|
2016 |
} |
||
2017 |
|||
2018 |
56460 |
Local<FunctionTemplate> BaseObject::GetConstructorTemplate( |
|
2019 |
IsolateData* isolate_data) { |
||
2020 |
56460 |
Local<FunctionTemplate> tmpl = isolate_data->base_object_ctor_template(); |
|
2021 |
✓✓ | 56460 |
if (tmpl.IsEmpty()) { |
2022 |
798 |
tmpl = NewFunctionTemplate(isolate_data->isolate(), nullptr); |
|
2023 |
798 |
tmpl->SetClassName( |
|
2024 |
FIXED_ONE_BYTE_STRING(isolate_data->isolate(), "BaseObject")); |
||
2025 |
798 |
isolate_data->set_base_object_ctor_template(tmpl); |
|
2026 |
} |
||
2027 |
56460 |
return tmpl; |
|
2028 |
} |
||
2029 |
|||
2030 |
bool BaseObject::IsNotIndicativeOfMemoryLeakAtExit() const { |
||
2031 |
return IsWeakOrDetached(); |
||
2032 |
} |
||
2033 |
|||
2034 |
} // namespace node |
Generated by: GCOVR (Version 4.2) |