GCC Code Coverage Report | |||||||||||||||||||||
|
|||||||||||||||||||||
Line | Branch | Exec | Source |
1 |
#include "env.h" |
||
2 |
#include "async_wrap.h" |
||
3 |
#include "base_object-inl.h" |
||
4 |
#include "debug_utils-inl.h" |
||
5 |
#include "diagnosticfilename-inl.h" |
||
6 |
#include "memory_tracker-inl.h" |
||
7 |
#include "node_buffer.h" |
||
8 |
#include "node_context_data.h" |
||
9 |
#include "node_contextify.h" |
||
10 |
#include "node_errors.h" |
||
11 |
#include "node_internals.h" |
||
12 |
#include "node_options-inl.h" |
||
13 |
#include "node_process-inl.h" |
||
14 |
#include "node_v8_platform-inl.h" |
||
15 |
#include "node_worker.h" |
||
16 |
#include "req_wrap-inl.h" |
||
17 |
#include "stream_base.h" |
||
18 |
#include "tracing/agent.h" |
||
19 |
#include "tracing/traced_value.h" |
||
20 |
#include "util-inl.h" |
||
21 |
#include "v8-profiler.h" |
||
22 |
|||
23 |
#include <algorithm> |
||
24 |
#include <atomic> |
||
25 |
#include <cinttypes> |
||
26 |
#include <cstdio> |
||
27 |
#include <iostream> |
||
28 |
#include <limits> |
||
29 |
#include <memory> |
||
30 |
|||
31 |
namespace node { |
||
32 |
|||
33 |
using errors::TryCatchScope; |
||
34 |
using v8::Array; |
||
35 |
using v8::Boolean; |
||
36 |
using v8::Context; |
||
37 |
using v8::EmbedderGraph; |
||
38 |
using v8::EscapableHandleScope; |
||
39 |
using v8::Function; |
||
40 |
using v8::FunctionTemplate; |
||
41 |
using v8::HandleScope; |
||
42 |
using v8::HeapSpaceStatistics; |
||
43 |
using v8::Integer; |
||
44 |
using v8::Isolate; |
||
45 |
using v8::Local; |
||
46 |
using v8::MaybeLocal; |
||
47 |
using v8::NewStringType; |
||
48 |
using v8::Number; |
||
49 |
using v8::Object; |
||
50 |
using v8::Private; |
||
51 |
using v8::Script; |
||
52 |
using v8::SnapshotCreator; |
||
53 |
using v8::StackTrace; |
||
54 |
using v8::String; |
||
55 |
using v8::Symbol; |
||
56 |
using v8::TracingController; |
||
57 |
using v8::TryCatch; |
||
58 |
using v8::Undefined; |
||
59 |
using v8::Value; |
||
60 |
using worker::Worker; |
||
61 |
|||
62 |
int const ContextEmbedderTag::kNodeContextTag = 0x6e6f64; |
||
63 |
void* const ContextEmbedderTag::kNodeContextTagPtr = const_cast<void*>( |
||
64 |
static_cast<const void*>(&ContextEmbedderTag::kNodeContextTag)); |
||
65 |
|||
66 |
16356 |
void AsyncHooks::SetJSPromiseHooks(Local<Function> init, |
|
67 |
Local<Function> before, |
||
68 |
Local<Function> after, |
||
69 |
Local<Function> resolve) { |
||
70 |
16356 |
js_promise_hooks_[0].Reset(env()->isolate(), init); |
|
71 |
16356 |
js_promise_hooks_[1].Reset(env()->isolate(), before); |
|
72 |
16356 |
js_promise_hooks_[2].Reset(env()->isolate(), after); |
|
73 |
16356 |
js_promise_hooks_[3].Reset(env()->isolate(), resolve); |
|
74 |
✓✓ | 33057 |
for (auto it = contexts_.begin(); it != contexts_.end(); it++) { |
75 |
✗✓ | 16701 |
if (it->IsEmpty()) { |
76 |
contexts_.erase(it--); |
||
77 |
continue; |
||
78 |
} |
||
79 |
33402 |
PersistentToLocal::Weak(env()->isolate(), *it) |
|
80 |
16701 |
->SetPromiseHooks(init, before, after, resolve); |
|
81 |
} |
||
82 |
16356 |
} |
|
83 |
|||
84 |
// Remember to keep this code aligned with pushAsyncContext() in JS. |
||
85 |
845762 |
void AsyncHooks::push_async_context(double async_id, |
|
86 |
double trigger_async_id, |
||
87 |
Local<Object> resource) { |
||
88 |
// Since async_hooks is experimental, do only perform the check |
||
89 |
// when async_hooks is enabled. |
||
90 |
✓✓ | 845762 |
if (fields_[kCheck] > 0) { |
91 |
✗✓ | 845758 |
CHECK_GE(async_id, -1); |
92 |
✗✓ | 845758 |
CHECK_GE(trigger_async_id, -1); |
93 |
} |
||
94 |
|||
95 |
845762 |
uint32_t offset = fields_[kStackLength]; |
|
96 |
✓✓ | 845762 |
if (offset * 2 >= async_ids_stack_.Length()) grow_async_ids_stack(); |
97 |
845762 |
async_ids_stack_[2 * offset] = async_id_fields_[kExecutionAsyncId]; |
|
98 |
845762 |
async_ids_stack_[2 * offset + 1] = async_id_fields_[kTriggerAsyncId]; |
|
99 |
845762 |
fields_[kStackLength] += 1; |
|
100 |
845762 |
async_id_fields_[kExecutionAsyncId] = async_id; |
|
101 |
845762 |
async_id_fields_[kTriggerAsyncId] = trigger_async_id; |
|
102 |
|||
103 |
#ifdef DEBUG |
||
104 |
for (uint32_t i = offset; i < native_execution_async_resources_.size(); i++) |
||
105 |
CHECK(native_execution_async_resources_[i].IsEmpty()); |
||
106 |
#endif |
||
107 |
|||
108 |
// When this call comes from JS (as a way of increasing the stack size), |
||
109 |
// `resource` will be empty, because JS caches these values anyway. |
||
110 |
✓✓ | 845762 |
if (!resource.IsEmpty()) { |
111 |
845758 |
native_execution_async_resources_.resize(offset + 1); |
|
112 |
// Caveat: This is a v8::Local<> assignment, we do not keep a v8::Global<>! |
||
113 |
845758 |
native_execution_async_resources_[offset] = resource; |
|
114 |
} |
||
115 |
845762 |
} |
|
116 |
|||
117 |
// Remember to keep this code aligned with popAsyncContext() in JS. |
||
118 |
845310 |
bool AsyncHooks::pop_async_context(double async_id) { |
|
119 |
// In case of an exception then this may have already been reset, if the |
||
120 |
// stack was multiple MakeCallback()'s deep. |
||
121 |
✓✓ | 845310 |
if (UNLIKELY(fields_[kStackLength] == 0)) return false; |
122 |
|||
123 |
// Ask for the async_id to be restored as a check that the stack |
||
124 |
// hasn't been corrupted. |
||
125 |
1688506 |
if (UNLIKELY(fields_[kCheck] > 0 && |
|
126 |
✓✓✓✓ ✓✓ |
1688506 |
async_id_fields_[kExecutionAsyncId] != async_id)) { |
127 |
4 |
FailWithCorruptedAsyncStack(async_id); |
|
128 |
} |
||
129 |
|||
130 |
844251 |
uint32_t offset = fields_[kStackLength] - 1; |
|
131 |
844251 |
async_id_fields_[kExecutionAsyncId] = async_ids_stack_[2 * offset]; |
|
132 |
844251 |
async_id_fields_[kTriggerAsyncId] = async_ids_stack_[2 * offset + 1]; |
|
133 |
844251 |
fields_[kStackLength] = offset; |
|
134 |
|||
135 |
1688502 |
if (LIKELY(offset < native_execution_async_resources_.size() && |
|
136 |
✓✗✓✗ ✓✗ |
1688502 |
!native_execution_async_resources_[offset].IsEmpty())) { |
137 |
#ifdef DEBUG |
||
138 |
for (uint32_t i = offset + 1; i < native_execution_async_resources_.size(); |
||
139 |
i++) { |
||
140 |
CHECK(native_execution_async_resources_[i].IsEmpty()); |
||
141 |
} |
||
142 |
#endif |
||
143 |
844251 |
native_execution_async_resources_.resize(offset); |
|
144 |
844251 |
if (native_execution_async_resources_.size() < |
|
145 |
✓✓✗✓ ✗✓ |
1104533 |
native_execution_async_resources_.capacity() / 2 && |
146 |
260282 |
native_execution_async_resources_.size() > 16) { |
|
147 |
native_execution_async_resources_.shrink_to_fit(); |
||
148 |
} |
||
149 |
} |
||
150 |
|||
151 |
✓✓ | 1688502 |
if (UNLIKELY(js_execution_async_resources()->Length() > offset)) { |
152 |
43431 |
HandleScope handle_scope(env()->isolate()); |
|
153 |
86862 |
USE(js_execution_async_resources()->Set( |
|
154 |
env()->context(), |
||
155 |
env()->length_string(), |
||
156 |
173724 |
Integer::NewFromUnsigned(env()->isolate(), offset))); |
|
157 |
} |
||
158 |
|||
159 |
844251 |
return fields_[kStackLength] > 0; |
|
160 |
} |
||
161 |
|||
162 |
2315 |
void AsyncHooks::clear_async_id_stack() { |
|
163 |
✓✓ | 2315 |
if (env()->can_call_into_js()) { |
164 |
1366 |
Isolate* isolate = env()->isolate(); |
|
165 |
2732 |
HandleScope handle_scope(isolate); |
|
166 |
✓✓ | 1366 |
if (!js_execution_async_resources_.IsEmpty()) { |
167 |
2580 |
USE(PersistentToLocal::Strong(js_execution_async_resources_) |
|
168 |
2580 |
->Set(env()->context(), |
|
169 |
env()->length_string(), |
||
170 |
5160 |
Integer::NewFromUnsigned(isolate, 0))); |
|
171 |
} |
||
172 |
} |
||
173 |
|||
174 |
2315 |
native_execution_async_resources_.clear(); |
|
175 |
2315 |
native_execution_async_resources_.shrink_to_fit(); |
|
176 |
|||
177 |
2315 |
async_id_fields_[kExecutionAsyncId] = 0; |
|
178 |
2315 |
async_id_fields_[kTriggerAsyncId] = 0; |
|
179 |
2315 |
fields_[kStackLength] = 0; |
|
180 |
2315 |
} |
|
181 |
|||
182 |
6984 |
void AsyncHooks::AddContext(Local<Context> ctx) { |
|
183 |
✓✓ | 20952 |
ctx->SetPromiseHooks(js_promise_hooks_[0].IsEmpty() |
184 |
6984 |
? Local<Function>() |
|
185 |
205 |
: PersistentToLocal::Strong(js_promise_hooks_[0]), |
|
186 |
✓✓ | 6984 |
js_promise_hooks_[1].IsEmpty() |
187 |
6984 |
? Local<Function>() |
|
188 |
205 |
: PersistentToLocal::Strong(js_promise_hooks_[1]), |
|
189 |
✓✓ | 6984 |
js_promise_hooks_[2].IsEmpty() |
190 |
6984 |
? Local<Function>() |
|
191 |
205 |
: PersistentToLocal::Strong(js_promise_hooks_[2]), |
|
192 |
✓✗ | 6984 |
js_promise_hooks_[3].IsEmpty() |
193 |
6984 |
? Local<Function>() |
|
194 |
: PersistentToLocal::Strong(js_promise_hooks_[3])); |
||
195 |
|||
196 |
6984 |
size_t id = contexts_.size(); |
|
197 |
6984 |
contexts_.resize(id + 1); |
|
198 |
6984 |
contexts_[id].Reset(env()->isolate(), ctx); |
|
199 |
6984 |
contexts_[id].SetWeak(); |
|
200 |
6984 |
} |
|
201 |
|||
202 |
517 |
void AsyncHooks::RemoveContext(Local<Context> ctx) { |
|
203 |
517 |
Isolate* isolate = env()->isolate(); |
|
204 |
1034 |
HandleScope handle_scope(isolate); |
|
205 |
517 |
contexts_.erase(std::remove_if(contexts_.begin(), |
|
206 |
contexts_.end(), |
||
207 |
2957 |
[&](auto&& el) { return el.IsEmpty(); }), |
|
208 |
1034 |
contexts_.end()); |
|
209 |
✓✓ | 2440 |
for (auto it = contexts_.begin(); it != contexts_.end(); it++) { |
210 |
2395 |
Local<Context> saved_context = PersistentToLocal::Weak(isolate, *it); |
|
211 |
✓✓ | 2395 |
if (saved_context == ctx) { |
212 |
472 |
it->Reset(); |
|
213 |
472 |
contexts_.erase(it); |
|
214 |
472 |
break; |
|
215 |
} |
||
216 |
} |
||
217 |
517 |
} |
|
218 |
|||
219 |
239568 |
AsyncHooks::DefaultTriggerAsyncIdScope::DefaultTriggerAsyncIdScope( |
|
220 |
239568 |
Environment* env, double default_trigger_async_id) |
|
221 |
239568 |
: async_hooks_(env->async_hooks()) { |
|
222 |
✓✗ | 239568 |
if (env->async_hooks()->fields()[AsyncHooks::kCheck] > 0) { |
223 |
✗✓ | 239568 |
CHECK_GE(default_trigger_async_id, 0); |
224 |
} |
||
225 |
|||
226 |
239568 |
old_default_trigger_async_id_ = |
|
227 |
239568 |
async_hooks_->async_id_fields()[AsyncHooks::kDefaultTriggerAsyncId]; |
|
228 |
239568 |
async_hooks_->async_id_fields()[AsyncHooks::kDefaultTriggerAsyncId] = |
|
229 |
239568 |
default_trigger_async_id; |
|
230 |
239568 |
} |
|
231 |
|||
232 |
479134 |
AsyncHooks::DefaultTriggerAsyncIdScope::~DefaultTriggerAsyncIdScope() { |
|
233 |
239567 |
async_hooks_->async_id_fields()[AsyncHooks::kDefaultTriggerAsyncId] = |
|
234 |
239567 |
old_default_trigger_async_id_; |
|
235 |
239567 |
} |
|
236 |
|||
237 |
239568 |
AsyncHooks::DefaultTriggerAsyncIdScope::DefaultTriggerAsyncIdScope( |
|
238 |
239568 |
AsyncWrap* async_wrap) |
|
239 |
: DefaultTriggerAsyncIdScope(async_wrap->env(), |
||
240 |
239568 |
async_wrap->get_async_id()) {} |
|
241 |
|||
242 |
12 |
std::ostream& operator<<(std::ostream& output, |
|
243 |
const std::vector<SnapshotIndex>& v) { |
||
244 |
12 |
output << "{ "; |
|
245 |
✓✓ | 2136 |
for (const SnapshotIndex i : v) { |
246 |
2124 |
output << i << ", "; |
|
247 |
} |
||
248 |
12 |
output << " }"; |
|
249 |
12 |
return output; |
|
250 |
} |
||
251 |
|||
252 |
6 |
std::ostream& operator<<(std::ostream& output, |
|
253 |
const IsolateDataSerializeInfo& i) { |
||
254 |
output << "{\n" |
||
255 |
6 |
<< "// -- primitive begins --\n" |
|
256 |
6 |
<< i.primitive_values << ",\n" |
|
257 |
<< "// -- primitive ends --\n" |
||
258 |
6 |
<< "// -- template_values begins --\n" |
|
259 |
6 |
<< i.template_values << ",\n" |
|
260 |
<< "// -- template_values ends --\n" |
||
261 |
6 |
<< "}"; |
|
262 |
6 |
return output; |
|
263 |
} |
||
264 |
|||
265 |
6 |
std::ostream& operator<<(std::ostream& output, const SnapshotMetadata& i) { |
|
266 |
output << "{\n" |
||
267 |
<< " " |
||
268 |
6 |
<< (i.type == SnapshotMetadata::Type::kDefault |
|
269 |
? "SnapshotMetadata::Type::kDefault" |
||
270 |
: "SnapshotMetadata::Type::kFullyCustomized") |
||
271 |
<< ", // type\n" |
||
272 |
6 |
<< " \"" << i.node_version << "\", // node_version\n" |
|
273 |
6 |
<< " \"" << i.node_arch << "\", // node_arch\n" |
|
274 |
6 |
<< " \"" << i.node_platform << "\", // node_platform\n" |
|
275 |
✓✗ | 6 |
<< " " << i.v8_cache_version_tag << ", // v8_cache_version_tag\n" |
276 |
6 |
<< "}"; |
|
277 |
6 |
return output; |
|
278 |
} |
||
279 |
|||
280 |
7 |
IsolateDataSerializeInfo IsolateData::Serialize(SnapshotCreator* creator) { |
|
281 |
7 |
Isolate* isolate = creator->GetIsolate(); |
|
282 |
7 |
IsolateDataSerializeInfo info; |
|
283 |
14 |
HandleScope handle_scope(isolate); |
|
284 |
// XXX(joyeecheung): technically speaking, the indexes here should be |
||
285 |
// consecutive and we could just return a range instead of an array, |
||
286 |
// but that's not part of the V8 API contract so we use an array |
||
287 |
// just to be safe. |
||
288 |
|||
289 |
#define VP(PropertyName, StringValue) V(Private, PropertyName) |
||
290 |
#define VY(PropertyName, StringValue) V(Symbol, PropertyName) |
||
291 |
#define VS(PropertyName, StringValue) V(String, PropertyName) |
||
292 |
#define V(TypeName, PropertyName) \ |
||
293 |
info.primitive_values.push_back( \ |
||
294 |
creator->AddData(PropertyName##_.Get(isolate))); |
||
295 |
63 |
PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(VP) |
|
296 |
91 |
PER_ISOLATE_SYMBOL_PROPERTIES(VY) |
|
297 |
1939 |
PER_ISOLATE_STRING_PROPERTIES(VS) |
|
298 |
#undef V |
||
299 |
#undef VY |
||
300 |
#undef VS |
||
301 |
#undef VP |
||
302 |
|||
303 |
✓✓ | 413 |
for (size_t i = 0; i < AsyncWrap::PROVIDERS_LENGTH; i++) |
304 |
812 |
info.primitive_values.push_back(creator->AddData(async_wrap_provider(i))); |
|
305 |
|||
306 |
7 |
uint32_t id = 0; |
|
307 |
#define V(PropertyName, TypeName) \ |
||
308 |
do { \ |
||
309 |
Local<TypeName> field = PropertyName(); \ |
||
310 |
if (!field.IsEmpty()) { \ |
||
311 |
size_t index = creator->AddData(field); \ |
||
312 |
info.template_values.push_back({#PropertyName, id, index}); \ |
||
313 |
} \ |
||
314 |
id++; \ |
||
315 |
} while (0); |
||
316 |
✓✗✗✓ ✓✗✓✗ ✗✓✗✓ ✓✗✓✗ ✓✗✓✗ ✓✗✓✗ ✓✗✓✗ ✓✗✓✗ ✗✓✗✓ ✗✓✓✗ ✗✓✗✓ ✓✗✓✗ ✗✓✗✓ ✗✓✓✗ ✓✗✓✗ ✗✓✗✓ ✗✓✗✓ ✗✓✓✗ ✓✗✗✓ |
420 |
PER_ISOLATE_TEMPLATE_PROPERTIES(V) |
317 |
#undef V |
||
318 |
|||
319 |
7 |
return info; |
|
320 |
} |
||
321 |
|||
322 |
5566 |
void IsolateData::DeserializeProperties(const IsolateDataSerializeInfo* info) { |
|
323 |
5566 |
size_t i = 0; |
|
324 |
5566 |
HandleScope handle_scope(isolate_); |
|
325 |
|||
326 |
#define VP(PropertyName, StringValue) V(Private, PropertyName) |
||
327 |
#define VY(PropertyName, StringValue) V(Symbol, PropertyName) |
||
328 |
#define VS(PropertyName, StringValue) V(String, PropertyName) |
||
329 |
#define V(TypeName, PropertyName) \ |
||
330 |
do { \ |
||
331 |
MaybeLocal<TypeName> maybe_field = \ |
||
332 |
isolate_->GetDataFromSnapshotOnce<TypeName>( \ |
||
333 |
info->primitive_values[i++]); \ |
||
334 |
Local<TypeName> field; \ |
||
335 |
if (!maybe_field.ToLocal(&field)) { \ |
||
336 |
fprintf(stderr, "Failed to deserialize " #PropertyName "\n"); \ |
||
337 |
} \ |
||
338 |
PropertyName##_.Set(isolate_, field); \ |
||
339 |
} while (0); |
||
340 |
✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ |
94622 |
PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(VP) |
341 |
✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ |
139150 |
PER_ISOLATE_SYMBOL_PROPERTIES(VY) |
342 |
✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ ✗✓✗✓ |
3077998 |
PER_ISOLATE_STRING_PROPERTIES(VS) |
343 |
#undef V |
||
344 |
#undef VY |
||
345 |
#undef VS |
||
346 |
#undef VP |
||
347 |
|||
348 |
✓✓ | 328394 |
for (size_t j = 0; j < AsyncWrap::PROVIDERS_LENGTH; j++) { |
349 |
MaybeLocal<String> maybe_field = |
||
350 |
645656 |
isolate_->GetDataFromSnapshotOnce<String>(info->primitive_values[i++]); |
|
351 |
Local<String> field; |
||
352 |
✗✓ | 322828 |
if (!maybe_field.ToLocal(&field)) { |
353 |
fprintf(stderr, "Failed to deserialize AsyncWrap provider %zu\n", j); |
||
354 |
} |
||
355 |
322828 |
async_wrap_providers_[j].Set(isolate_, field); |
|
356 |
} |
||
357 |
|||
358 |
5566 |
const std::vector<PropInfo>& values = info->template_values; |
|
359 |
5566 |
i = 0; // index to the array |
|
360 |
5566 |
uint32_t id = 0; |
|
361 |
#define V(PropertyName, TypeName) \ |
||
362 |
do { \ |
||
363 |
if (values.size() > i && id == values[i].id) { \ |
||
364 |
const PropInfo& d = values[i]; \ |
||
365 |
DCHECK_EQ(d.name, #PropertyName); \ |
||
366 |
MaybeLocal<TypeName> maybe_field = \ |
||
367 |
isolate_->GetDataFromSnapshotOnce<TypeName>(d.index); \ |
||
368 |
Local<TypeName> field; \ |
||
369 |
if (!maybe_field.ToLocal(&field)) { \ |
||
370 |
fprintf(stderr, \ |
||
371 |
"Failed to deserialize isolate data template " #PropertyName \ |
||
372 |
"\n"); \ |
||
373 |
} \ |
||
374 |
set_##PropertyName(field); \ |
||
375 |
i++; \ |
||
376 |
} \ |
||
377 |
id++; \ |
||
378 |
} while (0); |
||
379 |
|||
380 |
✓✗✓✗ ✓✗✗✓ ✓✗✗✓ ✗✓✗✗ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✓✗ ✓✗✗✓ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✗✓ ✗✓✗✗ ✓✗✓✗ ✓✗✗✓ ✓✗✓✗ ✓✗✗✓ ✗✓✗✗ ✗✓✗✗ |
239338 |
PER_ISOLATE_TEMPLATE_PROPERTIES(V); |
381 |
#undef V |
||
382 |
5566 |
} |
|
383 |
|||
384 |
796 |
void IsolateData::CreateProperties() { |
|
385 |
// Create string and private symbol properties as internalized one byte |
||
386 |
// strings after the platform is properly initialized. |
||
387 |
// |
||
388 |
// Internalized because it makes property lookups a little faster and |
||
389 |
// because the string is created in the old space straight away. It's going |
||
390 |
// to end up in the old space sooner or later anyway but now it doesn't go |
||
391 |
// through v8::Eternal's new space handling first. |
||
392 |
// |
||
393 |
// One byte because our strings are ASCII and we can safely skip V8's UTF-8 |
||
394 |
// decoding step. |
||
395 |
|||
396 |
1592 |
HandleScope handle_scope(isolate_); |
|
397 |
|||
398 |
#define V(PropertyName, StringValue) \ |
||
399 |
PropertyName##_.Set( \ |
||
400 |
isolate_, \ |
||
401 |
Private::New(isolate_, \ |
||
402 |
String::NewFromOneByte( \ |
||
403 |
isolate_, \ |
||
404 |
reinterpret_cast<const uint8_t*>(StringValue), \ |
||
405 |
NewStringType::kInternalized, \ |
||
406 |
sizeof(StringValue) - 1) \ |
||
407 |
.ToLocalChecked())); |
||
408 |
7164 |
PER_ISOLATE_PRIVATE_SYMBOL_PROPERTIES(V) |
|
409 |
#undef V |
||
410 |
#define V(PropertyName, StringValue) \ |
||
411 |
PropertyName##_.Set( \ |
||
412 |
isolate_, \ |
||
413 |
Symbol::New(isolate_, \ |
||
414 |
String::NewFromOneByte( \ |
||
415 |
isolate_, \ |
||
416 |
reinterpret_cast<const uint8_t*>(StringValue), \ |
||
417 |
NewStringType::kInternalized, \ |
||
418 |
sizeof(StringValue) - 1) \ |
||
419 |
.ToLocalChecked())); |
||
420 |
10348 |
PER_ISOLATE_SYMBOL_PROPERTIES(V) |
|
421 |
#undef V |
||
422 |
#define V(PropertyName, StringValue) \ |
||
423 |
PropertyName##_.Set( \ |
||
424 |
isolate_, \ |
||
425 |
String::NewFromOneByte(isolate_, \ |
||
426 |
reinterpret_cast<const uint8_t*>(StringValue), \ |
||
427 |
NewStringType::kInternalized, \ |
||
428 |
sizeof(StringValue) - 1) \ |
||
429 |
.ToLocalChecked()); |
||
430 |
220492 |
PER_ISOLATE_STRING_PROPERTIES(V) |
|
431 |
#undef V |
||
432 |
|||
433 |
// Create all the provider strings that will be passed to JS. Place them in |
||
434 |
// an array so the array index matches the PROVIDER id offset. This way the |
||
435 |
// strings can be retrieved quickly. |
||
436 |
#define V(Provider) \ |
||
437 |
async_wrap_providers_[AsyncWrap::PROVIDER_ ## Provider].Set( \ |
||
438 |
isolate_, \ |
||
439 |
String::NewFromOneByte( \ |
||
440 |
isolate_, \ |
||
441 |
reinterpret_cast<const uint8_t*>(#Provider), \ |
||
442 |
NewStringType::kInternalized, \ |
||
443 |
sizeof(#Provider) - 1).ToLocalChecked()); |
||
444 |
46964 |
NODE_ASYNC_PROVIDER_TYPES(V) |
|
445 |
#undef V |
||
446 |
|||
447 |
// TODO(legendecas): eagerly create per isolate templates. |
||
448 |
796 |
Local<FunctionTemplate> templ = FunctionTemplate::New(isolate()); |
|
449 |
1592 |
templ->InstanceTemplate()->SetInternalFieldCount( |
|
450 |
BaseObject::kInternalFieldCount); |
||
451 |
796 |
templ->Inherit(BaseObject::GetConstructorTemplate(this)); |
|
452 |
796 |
set_binding_data_ctor_template(templ); |
|
453 |
|||
454 |
796 |
contextify::ContextifyContext::InitializeGlobalTemplates(this); |
|
455 |
796 |
} |
|
456 |
|||
457 |
6362 |
IsolateData::IsolateData(Isolate* isolate, |
|
458 |
uv_loop_t* event_loop, |
||
459 |
MultiIsolatePlatform* platform, |
||
460 |
ArrayBufferAllocator* node_allocator, |
||
461 |
6362 |
const IsolateDataSerializeInfo* isolate_data_info) |
|
462 |
: isolate_(isolate), |
||
463 |
event_loop_(event_loop), |
||
464 |
52 |
node_allocator_(node_allocator == nullptr ? nullptr |
|
465 |
6310 |
: node_allocator->GetImpl()), |
|
466 |
✓✓ | 12724 |
platform_(platform) { |
467 |
6362 |
options_.reset( |
|
468 |
6362 |
new PerIsolateOptions(*(per_process::cli_options->per_isolate))); |
|
469 |
|||
470 |
✓✓ | 6362 |
if (isolate_data_info == nullptr) { |
471 |
796 |
CreateProperties(); |
|
472 |
} else { |
||
473 |
5566 |
DeserializeProperties(isolate_data_info); |
|
474 |
} |
||
475 |
6362 |
} |
|
476 |
|||
477 |
25 |
void IsolateData::MemoryInfo(MemoryTracker* tracker) const { |
|
478 |
#define V(PropertyName, StringValue) \ |
||
479 |
tracker->TrackField(#PropertyName, PropertyName()); |
||
480 |
25 |
PER_ISOLATE_SYMBOL_PROPERTIES(V) |
|
481 |
|||
482 |
25 |
PER_ISOLATE_STRING_PROPERTIES(V) |
|
483 |
#undef V |
||
484 |
|||
485 |
25 |
tracker->TrackField("async_wrap_providers", async_wrap_providers_); |
|
486 |
|||
487 |
✓✗ | 25 |
if (node_allocator_ != nullptr) { |
488 |
25 |
tracker->TrackFieldWithSize( |
|
489 |
"node_allocator", sizeof(*node_allocator_), "NodeArrayBufferAllocator"); |
||
490 |
} |
||
491 |
25 |
tracker->TrackFieldWithSize( |
|
492 |
"platform", sizeof(*platform_), "MultiIsolatePlatform"); |
||
493 |
// TODO(joyeecheung): implement MemoryRetainer in the option classes. |
||
494 |
25 |
} |
|
495 |
|||
496 |
154 |
void TrackingTraceStateObserver::UpdateTraceCategoryState() { |
|
497 |
✓✓✓✓ ✓✓ |
154 |
if (!env_->owns_process_state() || !env_->can_call_into_js()) { |
498 |
// Ideally, we’d have a consistent story that treats all threads/Environment |
||
499 |
// instances equally here. However, tracing is essentially global, and this |
||
500 |
// callback is called from whichever thread calls `StartTracing()` or |
||
501 |
// `StopTracing()`. The only way to do this in a threadsafe fashion |
||
502 |
// seems to be only tracking this from the main thread, and only allowing |
||
503 |
// these state modifications from the main thread. |
||
504 |
96 |
return; |
|
505 |
} |
||
506 |
|||
507 |
✓✓ | 143 |
if (env_->principal_realm() == nullptr) { |
508 |
85 |
return; |
|
509 |
} |
||
510 |
|||
511 |
58 |
bool async_hooks_enabled = (*(TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED( |
|
512 |
58 |
TRACING_CATEGORY_NODE1(async_hooks)))) != 0; |
|
513 |
|||
514 |
58 |
Isolate* isolate = env_->isolate(); |
|
515 |
58 |
HandleScope handle_scope(isolate); |
|
516 |
58 |
Local<Function> cb = env_->trace_category_state_function(); |
|
517 |
✗✓ | 58 |
if (cb.IsEmpty()) |
518 |
return; |
||
519 |
58 |
TryCatchScope try_catch(env_); |
|
520 |
58 |
try_catch.SetVerbose(true); |
|
521 |
✓✓ | 116 |
Local<Value> args[] = {Boolean::New(isolate, async_hooks_enabled)}; |
522 |
116 |
USE(cb->Call(env_->context(), Undefined(isolate), arraysize(args), args)); |
|
523 |
} |
||
524 |
|||
525 |
6984 |
void Environment::AssignToContext(Local<v8::Context> context, |
|
526 |
Realm* realm, |
||
527 |
const ContextInfo& info) { |
||
528 |
6984 |
context->SetAlignedPointerInEmbedderData(ContextEmbedderIndex::kEnvironment, |
|
529 |
this); |
||
530 |
6984 |
context->SetAlignedPointerInEmbedderData(ContextEmbedderIndex::kRealm, realm); |
|
531 |
// Used to retrieve bindings |
||
532 |
13968 |
context->SetAlignedPointerInEmbedderData( |
|
533 |
6984 |
ContextEmbedderIndex::kBindingListIndex, &(this->bindings_)); |
|
534 |
|||
535 |
// ContextifyContexts will update this to a pointer to the native object. |
||
536 |
6984 |
context->SetAlignedPointerInEmbedderData( |
|
537 |
ContextEmbedderIndex::kContextifyContext, nullptr); |
||
538 |
|||
539 |
// This must not be done before other context fields are initialized. |
||
540 |
6984 |
ContextEmbedderTag::TagNodeContext(context); |
|
541 |
|||
542 |
#if HAVE_INSPECTOR |
||
543 |
6984 |
inspector_agent()->ContextCreated(context, info); |
|
544 |
#endif // HAVE_INSPECTOR |
||
545 |
|||
546 |
6984 |
this->async_hooks()->AddContext(context); |
|
547 |
6984 |
} |
|
548 |
|||
549 |
186 |
void Environment::TryLoadAddon( |
|
550 |
const char* filename, |
||
551 |
int flags, |
||
552 |
const std::function<bool(binding::DLib*)>& was_loaded) { |
||
553 |
186 |
loaded_addons_.emplace_back(filename, flags); |
|
554 |
✓✓ | 186 |
if (!was_loaded(&loaded_addons_.back())) { |
555 |
10 |
loaded_addons_.pop_back(); |
|
556 |
} |
||
557 |
186 |
} |
|
558 |
|||
559 |
12 |
std::string Environment::GetCwd() { |
|
560 |
char cwd[PATH_MAX_BYTES]; |
||
561 |
12 |
size_t size = PATH_MAX_BYTES; |
|
562 |
12 |
const int err = uv_cwd(cwd, &size); |
|
563 |
|||
564 |
✓✗ | 12 |
if (err == 0) { |
565 |
✗✓ | 12 |
CHECK_GT(size, 0); |
566 |
12 |
return cwd; |
|
567 |
} |
||
568 |
|||
569 |
// This can fail if the cwd is deleted. In that case, fall back to |
||
570 |
// exec_path. |
||
571 |
const std::string& exec_path = exec_path_; |
||
572 |
return exec_path.substr(0, exec_path.find_last_of(kPathSeparator)); |
||
573 |
} |
||
574 |
|||
575 |
1915 |
void Environment::add_refs(int64_t diff) { |
|
576 |
1915 |
task_queues_async_refs_ += diff; |
|
577 |
✗✓ | 1915 |
CHECK_GE(task_queues_async_refs_, 0); |
578 |
✓✓ | 1915 |
if (task_queues_async_refs_ == 0) |
579 |
427 |
uv_unref(reinterpret_cast<uv_handle_t*>(&task_queues_async_)); |
|
580 |
else |
||
581 |
1488 |
uv_ref(reinterpret_cast<uv_handle_t*>(&task_queues_async_)); |
|
582 |
1915 |
} |
|
583 |
|||
584 |
66721 |
uv_buf_t Environment::allocate_managed_buffer(const size_t suggested_size) { |
|
585 |
133442 |
NoArrayBufferZeroFillScope no_zero_fill_scope(isolate_data()); |
|
586 |
std::unique_ptr<v8::BackingStore> bs = |
||
587 |
66721 |
v8::ArrayBuffer::NewBackingStore(isolate(), suggested_size); |
|
588 |
66721 |
uv_buf_t buf = uv_buf_init(static_cast<char*>(bs->Data()), bs->ByteLength()); |
|
589 |
66721 |
released_allocated_buffers_.emplace(buf.base, std::move(bs)); |
|
590 |
66721 |
return buf; |
|
591 |
} |
||
592 |
|||
593 |
81678 |
std::unique_ptr<v8::BackingStore> Environment::release_managed_buffer( |
|
594 |
const uv_buf_t& buf) { |
||
595 |
81678 |
std::unique_ptr<v8::BackingStore> bs; |
|
596 |
✓✓ | 81678 |
if (buf.base != nullptr) { |
597 |
66721 |
auto it = released_allocated_buffers_.find(buf.base); |
|
598 |
✗✓ | 66721 |
CHECK_NE(it, released_allocated_buffers_.end()); |
599 |
66721 |
bs = std::move(it->second); |
|
600 |
66721 |
released_allocated_buffers_.erase(it); |
|
601 |
} |
||
602 |
81678 |
return bs; |
|
603 |
} |
||
604 |
|||
605 |
6354 |
std::string GetExecPath(const std::vector<std::string>& argv) { |
|
606 |
char exec_path_buf[2 * PATH_MAX]; |
||
607 |
6354 |
size_t exec_path_len = sizeof(exec_path_buf); |
|
608 |
6354 |
std::string exec_path; |
|
609 |
✓✗ | 6354 |
if (uv_exepath(exec_path_buf, &exec_path_len) == 0) { |
610 |
6354 |
exec_path = std::string(exec_path_buf, exec_path_len); |
|
611 |
} else { |
||
612 |
exec_path = argv[0]; |
||
613 |
} |
||
614 |
|||
615 |
// On OpenBSD process.execPath will be relative unless we |
||
616 |
// get the full path before process.execPath is used. |
||
617 |
#if defined(__OpenBSD__) |
||
618 |
uv_fs_t req; |
||
619 |
req.ptr = nullptr; |
||
620 |
if (0 == |
||
621 |
uv_fs_realpath(nullptr, &req, exec_path.c_str(), nullptr)) { |
||
622 |
CHECK_NOT_NULL(req.ptr); |
||
623 |
exec_path = std::string(static_cast<char*>(req.ptr)); |
||
624 |
} |
||
625 |
uv_fs_req_cleanup(&req); |
||
626 |
#endif |
||
627 |
|||
628 |
6354 |
return exec_path; |
|
629 |
} |
||
630 |
|||
631 |
6354 |
Environment::Environment(IsolateData* isolate_data, |
|
632 |
Isolate* isolate, |
||
633 |
const std::vector<std::string>& args, |
||
634 |
const std::vector<std::string>& exec_args, |
||
635 |
const EnvSerializeInfo* env_info, |
||
636 |
EnvironmentFlags::Flags flags, |
||
637 |
6354 |
ThreadId thread_id) |
|
638 |
: isolate_(isolate), |
||
639 |
isolate_data_(isolate_data), |
||
640 |
async_hooks_(isolate, MAYBE_FIELD_PTR(env_info, async_hooks)), |
||
641 |
immediate_info_(isolate, MAYBE_FIELD_PTR(env_info, immediate_info)), |
||
642 |
tick_info_(isolate, MAYBE_FIELD_PTR(env_info, tick_info)), |
||
643 |
6354 |
timer_base_(uv_now(isolate_data->event_loop())), |
|
644 |
exec_argv_(exec_args), |
||
645 |
argv_(args), |
||
646 |
exec_path_(GetExecPath(args)), |
||
647 |
6354 |
exiting_(isolate_, 1, MAYBE_FIELD_PTR(env_info, exiting)), |
|
648 |
should_abort_on_uncaught_toggle_( |
||
649 |
6354 |
isolate_, |
|
650 |
1, |
||
651 |
MAYBE_FIELD_PTR(env_info, should_abort_on_uncaught_toggle)), |
||
652 |
6354 |
stream_base_state_(isolate_, |
|
653 |
StreamBase::kNumStreamBaseStateFields, |
||
654 |
MAYBE_FIELD_PTR(env_info, stream_base_state)), |
||
655 |
6354 |
time_origin_(PERFORMANCE_NOW()), |
|
656 |
6354 |
time_origin_timestamp_(GetCurrentTimeInMicroseconds()), |
|
657 |
flags_(flags), |
||
658 |
6354 |
thread_id_(thread_id.id == static_cast<uint64_t>(-1) |
|
659 |
6354 |
? AllocateEnvironmentThreadId().id |
|
660 |
✓✓✓✓ ✓✓✓✓ ✓✓✓✓ ✓✓ |
25416 |
: thread_id.id) { |
661 |
// We'll be creating new objects so make sure we've entered the context. |
||
662 |
12708 |
HandleScope handle_scope(isolate); |
|
663 |
|||
664 |
// Set some flags if only kDefaultFlags was passed. This can make API version |
||
665 |
// transitions easier for embedders. |
||
666 |
✓✓ | 6354 |
if (flags_ & EnvironmentFlags::kDefaultFlags) { |
667 |
11248 |
flags_ = flags_ | |
|
668 |
5624 |
EnvironmentFlags::kOwnsProcessState | |
|
669 |
EnvironmentFlags::kOwnsInspector; |
||
670 |
} |
||
671 |
|||
672 |
6354 |
set_env_vars(per_process::system_environment); |
|
673 |
6354 |
enabled_debug_list_.Parse(env_vars(), isolate); |
|
674 |
|||
675 |
// We create new copies of the per-Environment option sets, so that it is |
||
676 |
// easier to modify them after Environment creation. The defaults are |
||
677 |
// part of the per-Isolate option set, for which in turn the defaults are |
||
678 |
// part of the per-process option set. |
||
679 |
12708 |
options_ = std::make_shared<EnvironmentOptions>( |
|
680 |
19062 |
*isolate_data->options()->per_env); |
|
681 |
6354 |
inspector_host_port_ = std::make_shared<ExclusiveAccess<HostPort>>( |
|
682 |
6354 |
options_->debug_options().host_port); |
|
683 |
|||
684 |
6354 |
heap_snapshot_near_heap_limit_ = |
|
685 |
6354 |
static_cast<uint32_t>(options_->heap_snapshot_near_heap_limit); |
|
686 |
|||
687 |
✓✓ | 6354 |
if (!(flags_ & EnvironmentFlags::kOwnsProcessState)) { |
688 |
730 |
set_abort_on_uncaught_exception(false); |
|
689 |
} |
||
690 |
|||
691 |
#if HAVE_INSPECTOR |
||
692 |
// We can only create the inspector agent after having cloned the options. |
||
693 |
6354 |
inspector_agent_ = std::make_unique<inspector::Agent>(this); |
|
694 |
#endif |
||
695 |
|||
696 |
✓✗ | 6354 |
if (tracing::AgentWriterHandle* writer = GetTracingAgentWriter()) { |
697 |
6354 |
trace_state_observer_ = std::make_unique<TrackingTraceStateObserver>(this); |
|
698 |
✓✓ | 6354 |
if (TracingController* tracing_controller = writer->GetTracingController()) |
699 |
6303 |
tracing_controller->AddTraceStateObserver(trace_state_observer_.get()); |
|
700 |
} |
||
701 |
|||
702 |
6354 |
destroy_async_id_list_.reserve(512); |
|
703 |
|||
704 |
6354 |
performance_state_ = std::make_unique<performance::PerformanceState>( |
|
705 |
✓✓ | 6354 |
isolate, MAYBE_FIELD_PTR(env_info, performance_state)); |
706 |
|||
707 |
6354 |
if (*TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED( |
|
708 |
✓✓ | 6354 |
TRACING_CATEGORY_NODE1(environment)) != 0) { |
709 |
16 |
auto traced_value = tracing::TracedValue::Create(); |
|
710 |
8 |
traced_value->BeginArray("args"); |
|
711 |
✓✓ | 18 |
for (const std::string& arg : args) traced_value->AppendString(arg); |
712 |
8 |
traced_value->EndArray(); |
|
713 |
8 |
traced_value->BeginArray("exec_args"); |
|
714 |
✓✓ | 33 |
for (const std::string& arg : exec_args) traced_value->AppendString(arg); |
715 |
8 |
traced_value->EndArray(); |
|
716 |
✓✓✓✗ |
15 |
TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(TRACING_CATEGORY_NODE1(environment), |
717 |
"Environment", |
||
718 |
this, |
||
719 |
"args", |
||
720 |
std::move(traced_value)); |
||
721 |
} |
||
722 |
6354 |
} |
|
723 |
|||
724 |
788 |
Environment::Environment(IsolateData* isolate_data, |
|
725 |
Local<Context> context, |
||
726 |
const std::vector<std::string>& args, |
||
727 |
const std::vector<std::string>& exec_args, |
||
728 |
const EnvSerializeInfo* env_info, |
||
729 |
EnvironmentFlags::Flags flags, |
||
730 |
788 |
ThreadId thread_id) |
|
731 |
: Environment(isolate_data, |
||
732 |
context->GetIsolate(), |
||
733 |
args, |
||
734 |
exec_args, |
||
735 |
env_info, |
||
736 |
flags, |
||
737 |
788 |
thread_id) { |
|
738 |
788 |
InitializeMainContext(context, env_info); |
|
739 |
788 |
} |
|
740 |
|||
741 |
6354 |
void Environment::InitializeMainContext(Local<Context> context, |
|
742 |
const EnvSerializeInfo* env_info) { |
||
743 |
6354 |
principal_realm_ = std::make_unique<Realm>( |
|
744 |
✓✓ | 6354 |
this, context, MAYBE_FIELD_PTR(env_info, principal_realm)); |
745 |
6354 |
AssignToContext(context, principal_realm_.get(), ContextInfo("")); |
|
746 |
✓✓ | 6354 |
if (env_info != nullptr) { |
747 |
5566 |
DeserializeProperties(env_info); |
|
748 |
} |
||
749 |
|||
750 |
✓✓ | 6354 |
if (!options_->force_async_hooks_checks) { |
751 |
1 |
async_hooks_.no_force_checks(); |
|
752 |
} |
||
753 |
|||
754 |
// By default, always abort when --abort-on-uncaught-exception was passed. |
||
755 |
6354 |
should_abort_on_uncaught_toggle_[0] = 1; |
|
756 |
|||
757 |
// The process is not exiting by default. |
||
758 |
6354 |
set_exiting(false); |
|
759 |
|||
760 |
6354 |
performance_state_->Mark(performance::NODE_PERFORMANCE_MILESTONE_ENVIRONMENT, |
|
761 |
time_origin_); |
||
762 |
6354 |
performance_state_->Mark(performance::NODE_PERFORMANCE_MILESTONE_NODE_START, |
|
763 |
per_process::node_start_time); |
||
764 |
|||
765 |
✓✓ | 6354 |
if (per_process::v8_initialized) { |
766 |
6310 |
performance_state_->Mark(performance::NODE_PERFORMANCE_MILESTONE_V8_START, |
|
767 |
performance::performance_v8_start); |
||
768 |
} |
||
769 |
6354 |
} |
|
770 |
|||
771 |
22656 |
Environment::~Environment() { |
|
772 |
22656 |
HandleScope handle_scope(isolate()); |
|
773 |
11328 |
Local<Context> ctx = context(); |
|
774 |
|||
775 |
✓✓ | 11328 |
if (Environment** interrupt_data = interrupt_data_.load()) { |
776 |
// There are pending RequestInterrupt() callbacks. Tell them not to run, |
||
777 |
// then force V8 to run interrupts by compiling and running an empty script |
||
778 |
// so as not to leak memory. |
||
779 |
22 |
*interrupt_data = nullptr; |
|
780 |
|||
781 |
44 |
Isolate::AllowJavascriptExecutionScope allow_js_here(isolate()); |
|
782 |
44 |
TryCatch try_catch(isolate()); |
|
783 |
22 |
Context::Scope context_scope(ctx); |
|
784 |
|||
785 |
#ifdef DEBUG |
||
786 |
bool consistency_check = false; |
||
787 |
isolate()->RequestInterrupt([](Isolate*, void* data) { |
||
788 |
*static_cast<bool*>(data) = true; |
||
789 |
}, &consistency_check); |
||
790 |
#endif |
||
791 |
|||
792 |
Local<Script> script; |
||
793 |
✓✗ | 66 |
if (Script::Compile(ctx, String::Empty(isolate())).ToLocal(&script)) |
794 |
22 |
USE(script->Run(ctx)); |
|
795 |
|||
796 |
DCHECK(consistency_check); |
||
797 |
} |
||
798 |
|||
799 |
// FreeEnvironment() should have set this. |
||
800 |
✗✓ | 11328 |
CHECK(is_stopping()); |
801 |
|||
802 |
✗✓ | 11328 |
if (heapsnapshot_near_heap_limit_callback_added_) { |
803 |
RemoveHeapSnapshotNearHeapLimitCallback(0); |
||
804 |
} |
||
805 |
|||
806 |
11328 |
isolate()->GetHeapProfiler()->RemoveBuildEmbedderGraphCallback( |
|
807 |
BuildEmbedderGraph, this); |
||
808 |
|||
809 |
#if HAVE_INSPECTOR |
||
810 |
// Destroy inspector agent before erasing the context. The inspector |
||
811 |
// destructor depends on the context still being accessible. |
||
812 |
11328 |
inspector_agent_.reset(); |
|
813 |
#endif |
||
814 |
|||
815 |
11328 |
ctx->SetAlignedPointerInEmbedderData(ContextEmbedderIndex::kEnvironment, |
|
816 |
nullptr); |
||
817 |
11328 |
ctx->SetAlignedPointerInEmbedderData(ContextEmbedderIndex::kRealm, nullptr); |
|
818 |
|||
819 |
✓✗ | 11328 |
if (trace_state_observer_) { |
820 |
11328 |
tracing::AgentWriterHandle* writer = GetTracingAgentWriter(); |
|
821 |
✗✓ | 11328 |
CHECK_NOT_NULL(writer); |
822 |
✓✓ | 11328 |
if (TracingController* tracing_controller = writer->GetTracingController()) |
823 |
11230 |
tracing_controller->RemoveTraceStateObserver(trace_state_observer_.get()); |
|
824 |
} |
||
825 |
|||
826 |
✓✓✓✓ |
21134 |
TRACE_EVENT_NESTABLE_ASYNC_END0( |
827 |
TRACING_CATEGORY_NODE1(environment), "Environment", this); |
||
828 |
|||
829 |
// Do not unload addons on the main thread. Some addons need to retain memory |
||
830 |
// beyond the Environment's lifetime, and unloading them early would break |
||
831 |
// them; with Worker threads, we have the opportunity to be stricter. |
||
832 |
// Also, since the main thread usually stops just before the process exits, |
||
833 |
// this is far less relevant here. |
||
834 |
✓✓ | 11328 |
if (!is_main_thread()) { |
835 |
// Dereference all addons that were loaded into this environment. |
||
836 |
✓✓ | 1482 |
for (binding::DLib& addon : loaded_addons_) { |
837 |
28 |
addon.Close(); |
|
838 |
} |
||
839 |
} |
||
840 |
22656 |
} |
|
841 |
|||
842 |
6317 |
void Environment::InitializeLibuv() { |
|
843 |
12634 |
HandleScope handle_scope(isolate()); |
|
844 |
6317 |
Context::Scope context_scope(context()); |
|
845 |
|||
846 |
✗✓ | 6317 |
CHECK_EQ(0, uv_timer_init(event_loop(), timer_handle())); |
847 |
6317 |
uv_unref(reinterpret_cast<uv_handle_t*>(timer_handle())); |
|
848 |
|||
849 |
✗✓ | 6317 |
CHECK_EQ(0, uv_check_init(event_loop(), immediate_check_handle())); |
850 |
6317 |
uv_unref(reinterpret_cast<uv_handle_t*>(immediate_check_handle())); |
|
851 |
|||
852 |
✗✓ | 6317 |
CHECK_EQ(0, uv_idle_init(event_loop(), immediate_idle_handle())); |
853 |
|||
854 |
✗✓ | 6317 |
CHECK_EQ(0, uv_check_start(immediate_check_handle(), CheckImmediate)); |
855 |
|||
856 |
// Inform V8's CPU profiler when we're idle. The profiler is sampling-based |
||
857 |
// but not all samples are created equal; mark the wall clock time spent in |
||
858 |
// epoll_wait() and friends so profiling tools can filter it out. The samples |
||
859 |
// still end up in v8.log but with state=IDLE rather than state=EXTERNAL. |
||
860 |
✗✓ | 6317 |
CHECK_EQ(0, uv_prepare_init(event_loop(), &idle_prepare_handle_)); |
861 |
✗✓ | 6317 |
CHECK_EQ(0, uv_check_init(event_loop(), &idle_check_handle_)); |
862 |
|||
863 |
✗✓ | 24837 |
CHECK_EQ(0, uv_async_init( |
864 |
event_loop(), |
||
865 |
&task_queues_async_, |
||
866 |
[](uv_async_t* async) { |
||
867 |
Environment* env = ContainerOf( |
||
868 |
&Environment::task_queues_async_, async); |
||
869 |
HandleScope handle_scope(env->isolate()); |
||
870 |
Context::Scope context_scope(env->context()); |
||
871 |
env->RunAndClearNativeImmediates(); |
||
872 |
})); |
||
873 |
6317 |
uv_unref(reinterpret_cast<uv_handle_t*>(&idle_prepare_handle_)); |
|
874 |
6317 |
uv_unref(reinterpret_cast<uv_handle_t*>(&idle_check_handle_)); |
|
875 |
6317 |
uv_unref(reinterpret_cast<uv_handle_t*>(&task_queues_async_)); |
|
876 |
|||
877 |
{ |
||
878 |
12634 |
Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); |
|
879 |
6317 |
task_queues_async_initialized_ = true; |
|
880 |
✓✗✓✓ ✓✓ |
12634 |
if (native_immediates_threadsafe_.size() > 0 || |
881 |
6317 |
native_immediates_interrupts_.size() > 0) { |
|
882 |
5561 |
uv_async_send(&task_queues_async_); |
|
883 |
} |
||
884 |
} |
||
885 |
|||
886 |
// Register clean-up cb to be called to clean up the handles |
||
887 |
// when the environment is freed, note that they are not cleaned in |
||
888 |
// the one environment per process setup, but will be called in |
||
889 |
// FreeEnvironment. |
||
890 |
6317 |
RegisterHandleCleanups(); |
|
891 |
|||
892 |
6317 |
StartProfilerIdleNotifier(); |
|
893 |
6317 |
} |
|
894 |
|||
895 |
378 |
void Environment::ExitEnv() { |
|
896 |
378 |
set_can_call_into_js(false); |
|
897 |
378 |
set_stopping(true); |
|
898 |
378 |
isolate_->TerminateExecution(); |
|
899 |
756 |
SetImmediateThreadsafe([](Environment* env) { uv_stop(env->event_loop()); }); |
|
900 |
378 |
} |
|
901 |
|||
902 |
6317 |
void Environment::RegisterHandleCleanups() { |
|
903 |
6317 |
HandleCleanupCb close_and_finish = [](Environment* env, uv_handle_t* handle, |
|
904 |
33762 |
void* arg) { |
|
905 |
33762 |
handle->data = env; |
|
906 |
|||
907 |
33762 |
env->CloseHandle(handle, [](uv_handle_t* handle) { |
|
908 |
#ifdef DEBUG |
||
909 |
memset(handle, 0xab, uv_handle_size(handle->type)); |
||
910 |
#endif |
||
911 |
33762 |
}); |
|
912 |
33762 |
}; |
|
913 |
|||
914 |
37902 |
auto register_handle = [&](uv_handle_t* handle) { |
|
915 |
37902 |
RegisterHandleCleanup(handle, close_and_finish, nullptr); |
|
916 |
44219 |
}; |
|
917 |
6317 |
register_handle(reinterpret_cast<uv_handle_t*>(timer_handle())); |
|
918 |
6317 |
register_handle(reinterpret_cast<uv_handle_t*>(immediate_check_handle())); |
|
919 |
6317 |
register_handle(reinterpret_cast<uv_handle_t*>(immediate_idle_handle())); |
|
920 |
6317 |
register_handle(reinterpret_cast<uv_handle_t*>(&idle_prepare_handle_)); |
|
921 |
6317 |
register_handle(reinterpret_cast<uv_handle_t*>(&idle_check_handle_)); |
|
922 |
6317 |
register_handle(reinterpret_cast<uv_handle_t*>(&task_queues_async_)); |
|
923 |
6317 |
} |
|
924 |
|||
925 |
11319 |
void Environment::CleanupHandles() { |
|
926 |
{ |
||
927 |
11319 |
Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); |
|
928 |
11319 |
task_queues_async_initialized_ = false; |
|
929 |
} |
||
930 |
|||
931 |
Isolate::DisallowJavascriptExecutionScope disallow_js(isolate(), |
||
932 |
22638 |
Isolate::DisallowJavascriptExecutionScope::THROW_ON_FAILURE); |
|
933 |
|||
934 |
11319 |
RunAndClearNativeImmediates(true /* skip unrefed SetImmediate()s */); |
|
935 |
|||
936 |
✓✓ | 11478 |
for (ReqWrapBase* request : req_wrap_queue_) |
937 |
159 |
request->Cancel(); |
|
938 |
|||
939 |
✓✓ | 15904 |
for (HandleWrap* handle : handle_wrap_queue_) |
940 |
9170 |
handle->Close(); |
|
941 |
|||
942 |
✓✓ | 45081 |
for (HandleCleanup& hc : handle_cleanup_queue_) |
943 |
33762 |
hc.cb_(this, hc.handle_, hc.arg_); |
|
944 |
11319 |
handle_cleanup_queue_.clear(); |
|
945 |
|||
946 |
10641 |
while (handle_cleanup_waiting_ != 0 || |
|
947 |
✓✓✓✓ ✓✓ |
33281 |
request_waiting_ != 0 || |
948 |
✓✓ | 11321 |
!handle_wrap_queue_.IsEmpty()) { |
949 |
10641 |
uv_run(event_loop(), UV_RUN_ONCE); |
|
950 |
} |
||
951 |
11319 |
} |
|
952 |
|||
953 |
6317 |
void Environment::StartProfilerIdleNotifier() { |
|
954 |
6317 |
uv_prepare_start(&idle_prepare_handle_, [](uv_prepare_t* handle) { |
|
955 |
215136 |
Environment* env = ContainerOf(&Environment::idle_prepare_handle_, handle); |
|
956 |
215136 |
env->isolate()->SetIdle(true); |
|
957 |
215136 |
}); |
|
958 |
6317 |
uv_check_start(&idle_check_handle_, [](uv_check_t* handle) { |
|
959 |
214848 |
Environment* env = ContainerOf(&Environment::idle_check_handle_, handle); |
|
960 |
214848 |
env->isolate()->SetIdle(false); |
|
961 |
214848 |
}); |
|
962 |
6317 |
} |
|
963 |
|||
964 |
755390 |
void Environment::PrintSyncTrace() const { |
|
965 |
✓✓ | 755390 |
if (!trace_sync_io_) return; |
966 |
|||
967 |
2 |
HandleScope handle_scope(isolate()); |
|
968 |
|||
969 |
1 |
fprintf( |
|
970 |
stderr, "(node:%d) WARNING: Detected use of sync API\n", uv_os_getpid()); |
||
971 |
1 |
PrintStackTrace(isolate(), |
|
972 |
StackTrace::CurrentStackTrace( |
||
973 |
isolate(), stack_trace_limit(), StackTrace::kDetailed)); |
||
974 |
} |
||
975 |
|||
976 |
5306 |
MaybeLocal<Value> Environment::RunSnapshotSerializeCallback() const { |
|
977 |
5306 |
EscapableHandleScope handle_scope(isolate()); |
|
978 |
✓✓ | 10612 |
if (!snapshot_serialize_callback().IsEmpty()) { |
979 |
1 |
Context::Scope context_scope(context()); |
|
980 |
1 |
return handle_scope.EscapeMaybe(snapshot_serialize_callback()->Call( |
|
981 |
3 |
context(), v8::Undefined(isolate()), 0, nullptr)); |
|
982 |
} |
||
983 |
10610 |
return handle_scope.Escape(Undefined(isolate())); |
|
984 |
} |
||
985 |
|||
986 |
MaybeLocal<Value> Environment::RunSnapshotDeserializeMain() const { |
||
987 |
EscapableHandleScope handle_scope(isolate()); |
||
988 |
if (!snapshot_deserialize_main().IsEmpty()) { |
||
989 |
Context::Scope context_scope(context()); |
||
990 |
return handle_scope.EscapeMaybe(snapshot_deserialize_main()->Call( |
||
991 |
context(), v8::Undefined(isolate()), 0, nullptr)); |
||
992 |
} |
||
993 |
return handle_scope.Escape(Undefined(isolate())); |
||
994 |
} |
||
995 |
|||
996 |
5664 |
void Environment::RunCleanup() { |
|
997 |
5664 |
started_cleanup_ = true; |
|
998 |
✓✓✓✓ |
16231 |
TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), "RunCleanup"); |
999 |
5664 |
bindings_.clear(); |
|
1000 |
// Only BaseObject's cleanups are registered as per-realm cleanup hooks now. |
||
1001 |
// Defer the BaseObject cleanup after handles are cleaned up. |
||
1002 |
5664 |
CleanupHandles(); |
|
1003 |
|||
1004 |
✓✓✓✓ |
23360 |
while (!cleanup_queue_.empty() || principal_realm_->HasCleanupHooks() || |
1005 |
✓✗ | 11330 |
native_immediates_.size() > 0 || |
1006 |
✓✓✗✓ ✓✓ |
23358 |
native_immediates_threadsafe_.size() > 0 || |
1007 |
5664 |
native_immediates_interrupts_.size() > 0) { |
|
1008 |
// TODO(legendecas): cleanup handles in per-realm cleanup hooks as well. |
||
1009 |
5655 |
principal_realm_->RunCleanup(); |
|
1010 |
5655 |
cleanup_queue_.Drain(); |
|
1011 |
5655 |
CleanupHandles(); |
|
1012 |
} |
||
1013 |
|||
1014 |
✓✓ | 5667 |
for (const int fd : unmanaged_fds_) { |
1015 |
uv_fs_t close_req; |
||
1016 |
3 |
uv_fs_close(nullptr, &close_req, fd, nullptr); |
|
1017 |
3 |
uv_fs_req_cleanup(&close_req); |
|
1018 |
} |
||
1019 |
5664 |
} |
|
1020 |
|||
1021 |
6427 |
void Environment::RunAtExitCallbacks() { |
|
1022 |
✓✓✓✓ |
18424 |
TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), "AtExit"); |
1023 |
✓✓ | 19094 |
for (ExitCallback at_exit : at_exit_functions_) { |
1024 |
12667 |
at_exit.cb_(at_exit.arg_); |
|
1025 |
} |
||
1026 |
6427 |
at_exit_functions_.clear(); |
|
1027 |
6427 |
} |
|
1028 |
|||
1029 |
12695 |
void Environment::AtExit(void (*cb)(void* arg), void* arg) { |
|
1030 |
12695 |
at_exit_functions_.push_front(ExitCallback{cb, arg}); |
|
1031 |
12695 |
} |
|
1032 |
|||
1033 |
253245 |
void Environment::RunAndClearInterrupts() { |
|
1034 |
✓✓ | 253245 |
while (native_immediates_interrupts_.size() > 0) { |
1035 |
10591 |
NativeImmediateQueue queue; |
|
1036 |
{ |
||
1037 |
21186 |
Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); |
|
1038 |
10593 |
queue.ConcatMove(std::move(native_immediates_interrupts_)); |
|
1039 |
} |
||
1040 |
10593 |
DebugSealHandleScope seal_handle_scope(isolate()); |
|
1041 |
|||
1042 |
✓✓ | 21193 |
while (auto head = queue.Shift()) |
1043 |
21202 |
head->Call(this); |
|
1044 |
} |
||
1045 |
242652 |
} |
|
1046 |
|||
1047 |
232343 |
void Environment::RunAndClearNativeImmediates(bool only_refed) { |
|
1048 |
✓✓✓✓ |
469899 |
TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), |
1049 |
"RunAndClearNativeImmediates"); |
||
1050 |
464678 |
HandleScope handle_scope(isolate_); |
|
1051 |
// In case the Isolate is no longer accessible just use an empty Local. This |
||
1052 |
// is not an issue for InternalCallbackScope as this case is already handled |
||
1053 |
// in its constructor but we avoid calls into v8 which can crash the process |
||
1054 |
// in debug builds. |
||
1055 |
Local<Object> obj = |
||
1056 |
✓✓ | 232343 |
can_call_into_js() ? Object::New(isolate_) : Local<Object>(); |
1057 |
464678 |
InternalCallbackScope cb_scope(this, obj, {0, 0}); |
|
1058 |
|||
1059 |
232343 |
size_t ref_count = 0; |
|
1060 |
|||
1061 |
// Handle interrupts first. These functions are not allowed to throw |
||
1062 |
// exceptions, so we do not need to handle that. |
||
1063 |
232343 |
RunAndClearInterrupts(); |
|
1064 |
|||
1065 |
464682 |
auto drain_list = [&](NativeImmediateQueue* queue) { |
|
1066 |
929357 |
TryCatchScope try_catch(this); |
|
1067 |
464682 |
DebugSealHandleScope seal_handle_scope(isolate()); |
|
1068 |
✓✓ | 524377 |
while (auto head = queue->Shift()) { |
1069 |
59703 |
bool is_refed = head->flags() & CallbackFlags::kRefed; |
|
1070 |
✓✓ | 59703 |
if (is_refed) |
1071 |
34930 |
ref_count++; |
|
1072 |
|||
1073 |
✓✓✓✓ |
59703 |
if (is_refed || !only_refed) |
1074 |
59430 |
head->Call(this); |
|
1075 |
|||
1076 |
59698 |
head.reset(); // Destroy now so that this is also observed by try_catch. |
|
1077 |
|||
1078 |
✓✓ | 59698 |
if (UNLIKELY(try_catch.HasCaught())) { |
1079 |
✓✗✓✗ ✓✗ |
3 |
if (!try_catch.HasTerminated() && can_call_into_js()) |
1080 |
3 |
errors::TriggerUncaughtException(isolate(), try_catch); |
|
1081 |
|||
1082 |
1 |
return true; |
|
1083 |
} |
||
1084 |
59695 |
} |
|
1085 |
464674 |
return false; |
|
1086 |
232342 |
}; |
|
1087 |
✗✓ | 232342 |
while (drain_list(&native_immediates_)) {} |
1088 |
|||
1089 |
232339 |
immediate_info()->ref_count_dec(ref_count); |
|
1090 |
|||
1091 |
✓✓ | 232339 |
if (immediate_info()->ref_count() == 0) |
1092 |
175139 |
ToggleImmediateRef(false); |
|
1093 |
|||
1094 |
// It is safe to check .size() first, because there is a causal relationship |
||
1095 |
// between pushes to the threadsafe immediate list and this function being |
||
1096 |
// called. For the common case, it's worth checking the size first before |
||
1097 |
// establishing a mutex lock. |
||
1098 |
// This is intentionally placed after the `ref_count` handling, because when |
||
1099 |
// refed threadsafe immediates are created, they are not counted towards the |
||
1100 |
// count in immediate_info() either. |
||
1101 |
232335 |
NativeImmediateQueue threadsafe_immediates; |
|
1102 |
✓✓ | 232339 |
if (native_immediates_threadsafe_.size() > 0) { |
1103 |
2194 |
Mutex::ScopedLock lock(native_immediates_threadsafe_mutex_); |
|
1104 |
1097 |
threadsafe_immediates.ConcatMove(std::move(native_immediates_threadsafe_)); |
|
1105 |
} |
||
1106 |
✓✓ | 232340 |
while (drain_list(&threadsafe_immediates)) {} |
1107 |
232335 |
} |
|
1108 |
|||
1109 |
10607 |
void Environment::RequestInterruptFromV8() { |
|
1110 |
// The Isolate may outlive the Environment, so some logic to handle the |
||
1111 |
// situation in which the Environment is destroyed before the handler runs |
||
1112 |
// is required. |
||
1113 |
|||
1114 |
// We allocate a new pointer to a pointer to this Environment instance, and |
||
1115 |
// try to set it as interrupt_data_. If interrupt_data_ was already set, then |
||
1116 |
// callbacks are already scheduled to run and we can delete our own pointer |
||
1117 |
// and just return. If it was nullptr previously, the Environment** is stored; |
||
1118 |
// ~Environment sets the Environment* contained in it to nullptr, so that |
||
1119 |
// the callback can check whether ~Environment has already run and it is thus |
||
1120 |
// not safe to access the Environment instance itself. |
||
1121 |
10607 |
Environment** interrupt_data = new Environment*(this); |
|
1122 |
10607 |
Environment** dummy = nullptr; |
|
1123 |
✓✓ | 10607 |
if (!interrupt_data_.compare_exchange_strong(dummy, interrupt_data)) { |
1124 |
393 |
delete interrupt_data; |
|
1125 |
393 |
return; // Already scheduled. |
|
1126 |
} |
||
1127 |
|||
1128 |
10214 |
isolate()->RequestInterrupt([](Isolate* isolate, void* data) { |
|
1129 |
10204 |
std::unique_ptr<Environment*> env_ptr { static_cast<Environment**>(data) }; |
|
1130 |
10204 |
Environment* env = *env_ptr; |
|
1131 |
✓✓ | 10204 |
if (env == nullptr) { |
1132 |
// The Environment has already been destroyed. That should be okay; any |
||
1133 |
// callback added before the Environment shuts down would have been |
||
1134 |
// handled during cleanup. |
||
1135 |
11 |
return; |
|
1136 |
} |
||
1137 |
10193 |
env->interrupt_data_.store(nullptr); |
|
1138 |
10193 |
env->RunAndClearInterrupts(); |
|
1139 |
}, interrupt_data); |
||
1140 |
} |
||
1141 |
|||
1142 |
9532 |
void Environment::ScheduleTimer(int64_t duration_ms) { |
|
1143 |
✗✓ | 9532 |
if (started_cleanup_) return; |
1144 |
9532 |
uv_timer_start(timer_handle(), RunTimers, duration_ms, 0); |
|
1145 |
} |
||
1146 |
|||
1147 |
3971 |
void Environment::ToggleTimerRef(bool ref) { |
|
1148 |
✗✓ | 3971 |
if (started_cleanup_) return; |
1149 |
|||
1150 |
✓✓ | 3971 |
if (ref) { |
1151 |
2658 |
uv_ref(reinterpret_cast<uv_handle_t*>(timer_handle())); |
|
1152 |
} else { |
||
1153 |
1313 |
uv_unref(reinterpret_cast<uv_handle_t*>(timer_handle())); |
|
1154 |
} |
||
1155 |
} |
||
1156 |
|||
1157 |
7503 |
void Environment::RunTimers(uv_timer_t* handle) { |
|
1158 |
7503 |
Environment* env = Environment::from_timer_handle(handle); |
|
1159 |
✓✓✓✓ |
8027 |
TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), "RunTimers"); |
1160 |
|||
1161 |
✓✓ | 7503 |
if (!env->can_call_into_js()) |
1162 |
1 |
return; |
|
1163 |
|||
1164 |
7502 |
HandleScope handle_scope(env->isolate()); |
|
1165 |
7502 |
Context::Scope context_scope(env->context()); |
|
1166 |
|||
1167 |
7502 |
Local<Object> process = env->process_object(); |
|
1168 |
7502 |
InternalCallbackScope scope(env, process, {0, 0}); |
|
1169 |
|||
1170 |
7502 |
Local<Function> cb = env->timers_callback_function(); |
|
1171 |
MaybeLocal<Value> ret; |
||
1172 |
7502 |
Local<Value> arg = env->GetNow(); |
|
1173 |
// This code will loop until all currently due timers will process. It is |
||
1174 |
// impossible for us to end up in an infinite loop due to how the JS-side |
||
1175 |
// is structured. |
||
1176 |
34 |
do { |
|
1177 |
7536 |
TryCatchScope try_catch(env); |
|
1178 |
7536 |
try_catch.SetVerbose(true); |
|
1179 |
7536 |
ret = cb->Call(env->context(), process, 1, &arg); |
|
1180 |
✓✓✓✓ ✓✓ |
7526 |
} while (ret.IsEmpty() && env->can_call_into_js()); |
1181 |
|||
1182 |
// NOTE(apapirovski): If it ever becomes possible that `call_into_js` above |
||
1183 |
// is reset back to `true` after being previously set to `false` then this |
||
1184 |
// code becomes invalid and needs to be rewritten. Otherwise catastrophic |
||
1185 |
// timers corruption will occur and all timers behaviour will become |
||
1186 |
// entirely unpredictable. |
||
1187 |
✓✓ | 7492 |
if (ret.IsEmpty()) |
1188 |
7 |
return; |
|
1189 |
|||
1190 |
// To allow for less JS-C++ boundary crossing, the value returned from JS |
||
1191 |
// serves a few purposes: |
||
1192 |
// 1. If it's 0, no more timers exist and the handle should be unrefed |
||
1193 |
// 2. If it's > 0, the value represents the next timer's expiry and there |
||
1194 |
// is at least one timer remaining that is refed. |
||
1195 |
// 3. If it's < 0, the absolute value represents the next timer's expiry |
||
1196 |
// and there are no timers that are refed. |
||
1197 |
int64_t expiry_ms = |
||
1198 |
7485 |
ret.ToLocalChecked()->IntegerValue(env->context()).FromJust(); |
|
1199 |
|||
1200 |
7485 |
uv_handle_t* h = reinterpret_cast<uv_handle_t*>(handle); |
|
1201 |
|||
1202 |
✓✓ | 7485 |
if (expiry_ms != 0) { |
1203 |
int64_t duration_ms = |
||
1204 |
6323 |
llabs(expiry_ms) - (uv_now(env->event_loop()) - env->timer_base()); |
|
1205 |
|||
1206 |
6323 |
env->ScheduleTimer(duration_ms > 0 ? duration_ms : 1); |
|
1207 |
|||
1208 |
✓✓ | 6323 |
if (expiry_ms > 0) |
1209 |
5645 |
uv_ref(h); |
|
1210 |
else |
||
1211 |
678 |
uv_unref(h); |
|
1212 |
} else { |
||
1213 |
1162 |
uv_unref(h); |
|
1214 |
} |
||
1215 |
} |
||
1216 |
|||
1217 |
|||
1218 |
214848 |
void Environment::CheckImmediate(uv_check_t* handle) { |
|
1219 |
214848 |
Environment* env = Environment::from_immediate_check_handle(handle); |
|
1220 |
✓✓✓✓ |
217977 |
TRACE_EVENT0(TRACING_CATEGORY_NODE1(environment), "CheckImmediate"); |
1221 |
|||
1222 |
214848 |
HandleScope scope(env->isolate()); |
|
1223 |
214848 |
Context::Scope context_scope(env->context()); |
|
1224 |
|||
1225 |
214848 |
env->RunAndClearNativeImmediates(); |
|
1226 |
|||
1227 |
✓✓✓✓ ✓✓ |
214848 |
if (env->immediate_info()->count() == 0 || !env->can_call_into_js()) |
1228 |
158232 |
return; |
|
1229 |
|||
1230 |
950 |
do { |
|
1231 |
57554 |
MakeCallback(env->isolate(), |
|
1232 |
env->process_object(), |
||
1233 |
env->immediate_callback_function(), |
||
1234 |
0, |
||
1235 |
nullptr, |
||
1236 |
57566 |
{0, 0}).ToLocalChecked(); |
|
1237 |
✓✓✓✓ ✓✓ |
57554 |
} while (env->immediate_info()->has_outstanding() && env->can_call_into_js()); |
1238 |
|||
1239 |
✓✓ | 56604 |
if (env->immediate_info()->ref_count() == 0) |
1240 |
4703 |
env->ToggleImmediateRef(false); |
|
1241 |
} |
||
1242 |
|||
1243 |
259819 |
void Environment::ToggleImmediateRef(bool ref) { |
|
1244 |
✓✓ | 259819 |
if (started_cleanup_) return; |
1245 |
|||
1246 |
✓✓ | 248582 |
if (ref) { |
1247 |
// Idle handle is needed only to stop the event loop from blocking in poll. |
||
1248 |
79952 |
uv_idle_start(immediate_idle_handle(), [](uv_idle_t*){ }); |
|
1249 |
} else { |
||
1250 |
168630 |
uv_idle_stop(immediate_idle_handle()); |
|
1251 |
} |
||
1252 |
} |
||
1253 |
|||
1254 |
|||
1255 |
46898 |
Local<Value> Environment::GetNow() { |
|
1256 |
46898 |
uv_update_time(event_loop()); |
|
1257 |
46898 |
uint64_t now = uv_now(event_loop()); |
|
1258 |
✗✓ | 46898 |
CHECK_GE(now, timer_base()); |
1259 |
46898 |
now -= timer_base(); |
|
1260 |
✓✗ | 46898 |
if (now <= 0xffffffff) |
1261 |
93796 |
return Integer::NewFromUnsigned(isolate(), static_cast<uint32_t>(now)); |
|
1262 |
else |
||
1263 |
return Number::New(isolate(), static_cast<double>(now)); |
||
1264 |
} |
||
1265 |
|||
1266 |
29 |
void CollectExceptionInfo(Environment* env, |
|
1267 |
Local<Object> obj, |
||
1268 |
int errorno, |
||
1269 |
const char* err_string, |
||
1270 |
const char* syscall, |
||
1271 |
const char* message, |
||
1272 |
const char* path, |
||
1273 |
const char* dest) { |
||
1274 |
29 |
obj->Set(env->context(), |
|
1275 |
env->errno_string(), |
||
1276 |
116 |
Integer::New(env->isolate(), errorno)).Check(); |
|
1277 |
|||
1278 |
29 |
obj->Set(env->context(), env->code_string(), |
|
1279 |
87 |
OneByteString(env->isolate(), err_string)).Check(); |
|
1280 |
|||
1281 |
✓✗ | 29 |
if (message != nullptr) { |
1282 |
29 |
obj->Set(env->context(), env->message_string(), |
|
1283 |
116 |
OneByteString(env->isolate(), message)).Check(); |
|
1284 |
} |
||
1285 |
|||
1286 |
Local<Value> path_buffer; |
||
1287 |
✗✓ | 29 |
if (path != nullptr) { |
1288 |
path_buffer = |
||
1289 |
Buffer::Copy(env->isolate(), path, strlen(path)).ToLocalChecked(); |
||
1290 |
obj->Set(env->context(), env->path_string(), path_buffer).Check(); |
||
1291 |
} |
||
1292 |
|||
1293 |
Local<Value> dest_buffer; |
||
1294 |
✗✓ | 29 |
if (dest != nullptr) { |
1295 |
dest_buffer = |
||
1296 |
Buffer::Copy(env->isolate(), dest, strlen(dest)).ToLocalChecked(); |
||
1297 |
obj->Set(env->context(), env->dest_string(), dest_buffer).Check(); |
||
1298 |
} |
||
1299 |
|||
1300 |
✓✗ | 29 |
if (syscall != nullptr) { |
1301 |
29 |
obj->Set(env->context(), env->syscall_string(), |
|
1302 |
116 |
OneByteString(env->isolate(), syscall)).Check(); |
|
1303 |
} |
||
1304 |
29 |
} |
|
1305 |
|||
1306 |
29 |
void Environment::CollectUVExceptionInfo(Local<Value> object, |
|
1307 |
int errorno, |
||
1308 |
const char* syscall, |
||
1309 |
const char* message, |
||
1310 |
const char* path, |
||
1311 |
const char* dest) { |
||
1312 |
✓✗✗✓ ✗✓ |
29 |
if (!object->IsObject() || errorno == 0) |
1313 |
return; |
||
1314 |
|||
1315 |
29 |
Local<Object> obj = object.As<Object>(); |
|
1316 |
29 |
const char* err_string = uv_err_name(errorno); |
|
1317 |
|||
1318 |
✗✓✗✗ |
29 |
if (message == nullptr || message[0] == '\0') { |
1319 |
29 |
message = uv_strerror(errorno); |
|
1320 |
} |
||
1321 |
|||
1322 |
29 |
node::CollectExceptionInfo(this, obj, errorno, err_string, |
|
1323 |
syscall, message, path, dest); |
||
1324 |
} |
||
1325 |
|||
1326 |
6354 |
ImmediateInfo::ImmediateInfo(Isolate* isolate, const SerializeInfo* info) |
|
1327 |
✓✓ | 6354 |
: fields_(isolate, kFieldsCount, MAYBE_FIELD_PTR(info, fields)) {} |
1328 |
|||
1329 |
7 |
ImmediateInfo::SerializeInfo ImmediateInfo::Serialize( |
|
1330 |
Local<Context> context, SnapshotCreator* creator) { |
||
1331 |
7 |
return {fields_.Serialize(context, creator)}; |
|
1332 |
} |
||
1333 |
|||
1334 |
5566 |
void ImmediateInfo::Deserialize(Local<Context> context) { |
|
1335 |
5566 |
fields_.Deserialize(context); |
|
1336 |
5566 |
} |
|
1337 |
|||
1338 |
6 |
std::ostream& operator<<(std::ostream& output, |
|
1339 |
const ImmediateInfo::SerializeInfo& i) { |
||
1340 |
6 |
output << "{ " << i.fields << " }"; |
|
1341 |
6 |
return output; |
|
1342 |
} |
||
1343 |
|||
1344 |
25 |
void ImmediateInfo::MemoryInfo(MemoryTracker* tracker) const { |
|
1345 |
25 |
tracker->TrackField("fields", fields_); |
|
1346 |
25 |
} |
|
1347 |
|||
1348 |
7 |
TickInfo::SerializeInfo TickInfo::Serialize(Local<Context> context, |
|
1349 |
SnapshotCreator* creator) { |
||
1350 |
7 |
return {fields_.Serialize(context, creator)}; |
|
1351 |
} |
||
1352 |
|||
1353 |
5566 |
void TickInfo::Deserialize(Local<Context> context) { |
|
1354 |
5566 |
fields_.Deserialize(context); |
|
1355 |
5566 |
} |
|
1356 |
|||
1357 |
6 |
std::ostream& operator<<(std::ostream& output, |
|
1358 |
const TickInfo::SerializeInfo& i) { |
||
1359 |
6 |
output << "{ " << i.fields << " }"; |
|
1360 |
6 |
return output; |
|
1361 |
} |
||
1362 |
|||
1363 |
25 |
void TickInfo::MemoryInfo(MemoryTracker* tracker) const { |
|
1364 |
25 |
tracker->TrackField("fields", fields_); |
|
1365 |
25 |
} |
|
1366 |
|||
1367 |
6354 |
TickInfo::TickInfo(Isolate* isolate, const SerializeInfo* info) |
|
1368 |
: fields_( |
||
1369 |
✓✓ | 6354 |
isolate, kFieldsCount, info == nullptr ? nullptr : &(info->fields)) {} |
1370 |
|||
1371 |
6354 |
AsyncHooks::AsyncHooks(Isolate* isolate, const SerializeInfo* info) |
|
1372 |
: async_ids_stack_(isolate, 16 * 2, MAYBE_FIELD_PTR(info, async_ids_stack)), |
||
1373 |
fields_(isolate, kFieldsCount, MAYBE_FIELD_PTR(info, fields)), |
||
1374 |
async_id_fields_( |
||
1375 |
isolate, kUidFieldsCount, MAYBE_FIELD_PTR(info, async_id_fields)), |
||
1376 |
✓✓✓✓ ✓✓ |
6354 |
info_(info) { |
1377 |
12708 |
HandleScope handle_scope(isolate); |
|
1378 |
✓✓ | 6354 |
if (info == nullptr) { |
1379 |
788 |
clear_async_id_stack(); |
|
1380 |
|||
1381 |
// Always perform async_hooks checks, not just when async_hooks is enabled. |
||
1382 |
// TODO(AndreasMadsen): Consider removing this for LTS releases. |
||
1383 |
// See discussion in https://github.com/nodejs/node/pull/15454 |
||
1384 |
// When removing this, do it by reverting the commit. Otherwise the test |
||
1385 |
// and flag changes won't be included. |
||
1386 |
788 |
fields_[kCheck] = 1; |
|
1387 |
|||
1388 |
// kDefaultTriggerAsyncId should be -1, this indicates that there is no |
||
1389 |
// specified default value and it should fallback to the executionAsyncId. |
||
1390 |
// 0 is not used as the magic value, because that indicates a missing |
||
1391 |
// context which is different from a default context. |
||
1392 |
788 |
async_id_fields_[AsyncHooks::kDefaultTriggerAsyncId] = -1; |
|
1393 |
|||
1394 |
// kAsyncIdCounter should start at 1 because that'll be the id the execution |
||
1395 |
// context during bootstrap (code that runs before entering uv_run()). |
||
1396 |
788 |
async_id_fields_[AsyncHooks::kAsyncIdCounter] = 1; |
|
1397 |
} |
||
1398 |
6354 |
} |
|
1399 |
|||
1400 |
5566 |
void AsyncHooks::Deserialize(Local<Context> context) { |
|
1401 |
5566 |
async_ids_stack_.Deserialize(context); |
|
1402 |
5566 |
fields_.Deserialize(context); |
|
1403 |
5566 |
async_id_fields_.Deserialize(context); |
|
1404 |
|||
1405 |
Local<Array> js_execution_async_resources; |
||
1406 |
✓✗ | 5566 |
if (info_->js_execution_async_resources != 0) { |
1407 |
js_execution_async_resources = |
||
1408 |
5566 |
context->GetDataFromSnapshotOnce<Array>( |
|
1409 |
✗✓ | 16698 |
info_->js_execution_async_resources).ToLocalChecked(); |
1410 |
} else { |
||
1411 |
js_execution_async_resources = Array::New(context->GetIsolate()); |
||
1412 |
} |
||
1413 |
5566 |
js_execution_async_resources_.Reset( |
|
1414 |
context->GetIsolate(), js_execution_async_resources); |
||
1415 |
|||
1416 |
// The native_execution_async_resources_ field requires v8::Local<> instances |
||
1417 |
// for async calls whose resources were on the stack as JS objects when they |
||
1418 |
// were entered. We cannot recreate this here; however, storing these values |
||
1419 |
// on the JS equivalent gives the same result, so we do that instead. |
||
1420 |
✗✓ | 5566 |
for (size_t i = 0; i < info_->native_execution_async_resources.size(); ++i) { |
1421 |
if (info_->native_execution_async_resources[i] == SIZE_MAX) |
||
1422 |
continue; |
||
1423 |
Local<Object> obj = context->GetDataFromSnapshotOnce<Object>( |
||
1424 |
info_->native_execution_async_resources[i]) |
||
1425 |
.ToLocalChecked(); |
||
1426 |
js_execution_async_resources->Set(context, i, obj).Check(); |
||
1427 |
} |
||
1428 |
5566 |
info_ = nullptr; |
|
1429 |
5566 |
} |
|
1430 |
|||
1431 |
6 |
std::ostream& operator<<(std::ostream& output, |
|
1432 |
const AsyncHooks::SerializeInfo& i) { |
||
1433 |
output << "{\n" |
||
1434 |
6 |
<< " " << i.async_ids_stack << ", // async_ids_stack\n" |
|
1435 |
6 |
<< " " << i.fields << ", // fields\n" |
|
1436 |
6 |
<< " " << i.async_id_fields << ", // async_id_fields\n" |
|
1437 |
6 |
<< " " << i.js_execution_async_resources |
|
1438 |
<< ", // js_execution_async_resources\n" |
||
1439 |
6 |
<< " " << i.native_execution_async_resources |
|
1440 |
<< ", // native_execution_async_resources\n" |
||
1441 |
6 |
<< "}"; |
|
1442 |
6 |
return output; |
|
1443 |
} |
||
1444 |
|||
1445 |
7 |
AsyncHooks::SerializeInfo AsyncHooks::Serialize(Local<Context> context, |
|
1446 |
SnapshotCreator* creator) { |
||
1447 |
7 |
SerializeInfo info; |
|
1448 |
// TODO(joyeecheung): some of these probably don't need to be serialized. |
||
1449 |
7 |
info.async_ids_stack = async_ids_stack_.Serialize(context, creator); |
|
1450 |
7 |
info.fields = fields_.Serialize(context, creator); |
|
1451 |
7 |
info.async_id_fields = async_id_fields_.Serialize(context, creator); |
|
1452 |
✓✗ | 7 |
if (!js_execution_async_resources_.IsEmpty()) { |
1453 |
7 |
info.js_execution_async_resources = creator->AddData( |
|
1454 |
context, js_execution_async_resources_.Get(context->GetIsolate())); |
||
1455 |
✗✓ | 7 |
CHECK_NE(info.js_execution_async_resources, 0); |
1456 |
} else { |
||
1457 |
info.js_execution_async_resources = 0; |
||
1458 |
} |
||
1459 |
|||
1460 |
7 |
info.native_execution_async_resources.resize( |
|
1461 |
native_execution_async_resources_.size()); |
||
1462 |
✗✓ | 7 |
for (size_t i = 0; i < native_execution_async_resources_.size(); i++) { |
1463 |
info.native_execution_async_resources[i] = |
||
1464 |
native_execution_async_resources_[i].IsEmpty() ? SIZE_MAX : |
||
1465 |
creator->AddData( |
||
1466 |
context, |
||
1467 |
native_execution_async_resources_[i]); |
||
1468 |
} |
||
1469 |
✗✓ | 7 |
CHECK_EQ(contexts_.size(), 1); |
1470 |
✗✓✗✓ |
14 |
CHECK_EQ(contexts_[0], env()->context()); |
1471 |
✗✓ | 7 |
CHECK(js_promise_hooks_[0].IsEmpty()); |
1472 |
✗✓ | 7 |
CHECK(js_promise_hooks_[1].IsEmpty()); |
1473 |
✗✓ | 7 |
CHECK(js_promise_hooks_[2].IsEmpty()); |
1474 |
✗✓ | 7 |
CHECK(js_promise_hooks_[3].IsEmpty()); |
1475 |
|||
1476 |
7 |
return info; |
|
1477 |
} |
||
1478 |
|||
1479 |
25 |
void AsyncHooks::MemoryInfo(MemoryTracker* tracker) const { |
|
1480 |
25 |
tracker->TrackField("async_ids_stack", async_ids_stack_); |
|
1481 |
25 |
tracker->TrackField("fields", fields_); |
|
1482 |
25 |
tracker->TrackField("async_id_fields", async_id_fields_); |
|
1483 |
25 |
tracker->TrackField("js_promise_hooks", js_promise_hooks_); |
|
1484 |
25 |
} |
|
1485 |
|||
1486 |
4 |
void AsyncHooks::grow_async_ids_stack() { |
|
1487 |
4 |
async_ids_stack_.reserve(async_ids_stack_.Length() * 3); |
|
1488 |
|||
1489 |
4 |
env()->async_hooks_binding()->Set( |
|
1490 |
env()->context(), |
||
1491 |
env()->async_ids_stack_string(), |
||
1492 |
12 |
async_ids_stack_.GetJSArray()).Check(); |
|
1493 |
4 |
} |
|
1494 |
|||
1495 |
4 |
void AsyncHooks::FailWithCorruptedAsyncStack(double expected_async_id) { |
|
1496 |
4 |
fprintf(stderr, |
|
1497 |
"Error: async hook stack has become corrupted (" |
||
1498 |
"actual: %.f, expected: %.f)\n", |
||
1499 |
async_id_fields_.GetValue(kExecutionAsyncId), |
||
1500 |
expected_async_id); |
||
1501 |
4 |
DumpBacktrace(stderr); |
|
1502 |
4 |
fflush(stderr); |
|
1503 |
// TODO(joyeecheung): should this exit code be more specific? |
||
1504 |
✓✗ | 4 |
if (!env()->abort_on_uncaught_exception()) Exit(ExitCode::kGenericUserError); |
1505 |
fprintf(stderr, "\n"); |
||
1506 |
fflush(stderr); |
||
1507 |
ABORT_NO_BACKTRACE(); |
||
1508 |
} |
||
1509 |
|||
1510 |
746 |
void Environment::Exit(ExitCode exit_code) { |
|
1511 |
✓✓ | 746 |
if (options()->trace_exit) { |
1512 |
4 |
HandleScope handle_scope(isolate()); |
|
1513 |
Isolate::DisallowJavascriptExecutionScope disallow_js( |
||
1514 |
4 |
isolate(), Isolate::DisallowJavascriptExecutionScope::CRASH_ON_FAILURE); |
|
1515 |
|||
1516 |
✓✓ | 2 |
if (is_main_thread()) { |
1517 |
1 |
fprintf(stderr, "(node:%d) ", uv_os_getpid()); |
|
1518 |
} else { |
||
1519 |
1 |
fprintf(stderr, "(node:%d, thread:%" PRIu64 ") ", |
|
1520 |
uv_os_getpid(), thread_id()); |
||
1521 |
} |
||
1522 |
|||
1523 |
2 |
fprintf(stderr, |
|
1524 |
"WARNING: Exited the environment with code %d\n", |
||
1525 |
static_cast<int>(exit_code)); |
||
1526 |
2 |
PrintStackTrace(isolate(), |
|
1527 |
StackTrace::CurrentStackTrace( |
||
1528 |
isolate(), stack_trace_limit(), StackTrace::kDetailed)); |
||
1529 |
} |
||
1530 |
746 |
process_exit_handler_(this, exit_code); |
|
1531 |
63 |
} |
|
1532 |
|||
1533 |
6375 |
void Environment::stop_sub_worker_contexts() { |
|
1534 |
DCHECK_EQ(Isolate::GetCurrent(), isolate()); |
||
1535 |
|||
1536 |
✓✓ | 6375 |
while (!sub_worker_contexts_.empty()) { |
1537 |
28 |
Worker* w = *sub_worker_contexts_.begin(); |
|
1538 |
28 |
remove_sub_worker_context(w); |
|
1539 |
28 |
w->Exit(ExitCode::kGenericUserError); |
|
1540 |
28 |
w->JoinThread(); |
|
1541 |
} |
||
1542 |
6347 |
} |
|
1543 |
|||
1544 |
10 |
Environment* Environment::worker_parent_env() const { |
|
1545 |
✓✗ | 10 |
if (worker_context() == nullptr) return nullptr; |
1546 |
return worker_context()->env(); |
||
1547 |
} |
||
1548 |
|||
1549 |
69280 |
void Environment::AddUnmanagedFd(int fd) { |
|
1550 |
✓✓ | 69280 |
if (!tracks_unmanaged_fds()) return; |
1551 |
2741 |
auto result = unmanaged_fds_.insert(fd); |
|
1552 |
✓✓ | 2741 |
if (!result.second) { |
1553 |
ProcessEmitWarning( |
||
1554 |
1 |
this, "File descriptor %d opened in unmanaged mode twice", fd); |
|
1555 |
} |
||
1556 |
} |
||
1557 |
|||
1558 |
68891 |
void Environment::RemoveUnmanagedFd(int fd) { |
|
1559 |
✓✓ | 68891 |
if (!tracks_unmanaged_fds()) return; |
1560 |
2738 |
size_t removed_count = unmanaged_fds_.erase(fd); |
|
1561 |
✓✓ | 2738 |
if (removed_count == 0) { |
1562 |
ProcessEmitWarning( |
||
1563 |
1 |
this, "File descriptor %d closed but not opened in unmanaged mode", fd); |
|
1564 |
} |
||
1565 |
} |
||
1566 |
|||
1567 |
5267 |
void Environment::PrintInfoForSnapshotIfDebug() { |
|
1568 |
✗✓ | 10534 |
if (enabled_debug_list()->enabled(DebugCategory::MKSNAPSHOT)) { |
1569 |
fprintf(stderr, "At the exit of the Environment:\n"); |
||
1570 |
principal_realm()->PrintInfoForSnapshot(); |
||
1571 |
fprintf(stderr, "\nNative modules without cache:\n"); |
||
1572 |
for (const auto& s : builtins_without_cache) { |
||
1573 |
fprintf(stderr, "%s\n", s.c_str()); |
||
1574 |
} |
||
1575 |
fprintf(stderr, "\nNative modules with cache:\n"); |
||
1576 |
for (const auto& s : builtins_with_cache) { |
||
1577 |
fprintf(stderr, "%s\n", s.c_str()); |
||
1578 |
} |
||
1579 |
fprintf(stderr, "\nStatic bindings (need to be registered):\n"); |
||
1580 |
for (const auto mod : internal_bindings) { |
||
1581 |
fprintf(stderr, "%s:%s\n", mod->nm_filename, mod->nm_modname); |
||
1582 |
} |
||
1583 |
} |
||
1584 |
5267 |
} |
|
1585 |
|||
1586 |
7 |
EnvSerializeInfo Environment::Serialize(SnapshotCreator* creator) { |
|
1587 |
7 |
EnvSerializeInfo info; |
|
1588 |
7 |
Local<Context> ctx = context(); |
|
1589 |
|||
1590 |
// Currently all modules are compiled without cache in builtin snapshot |
||
1591 |
// builder. |
||
1592 |
14 |
info.builtins = std::vector<std::string>(builtins_without_cache.begin(), |
|
1593 |
7 |
builtins_without_cache.end()); |
|
1594 |
|||
1595 |
7 |
info.async_hooks = async_hooks_.Serialize(ctx, creator); |
|
1596 |
7 |
info.immediate_info = immediate_info_.Serialize(ctx, creator); |
|
1597 |
7 |
info.tick_info = tick_info_.Serialize(ctx, creator); |
|
1598 |
7 |
info.performance_state = performance_state_->Serialize(ctx, creator); |
|
1599 |
7 |
info.exiting = exiting_.Serialize(ctx, creator); |
|
1600 |
7 |
info.stream_base_state = stream_base_state_.Serialize(ctx, creator); |
|
1601 |
7 |
info.should_abort_on_uncaught_toggle = |
|
1602 |
7 |
should_abort_on_uncaught_toggle_.Serialize(ctx, creator); |
|
1603 |
|||
1604 |
7 |
info.principal_realm = principal_realm_->Serialize(creator); |
|
1605 |
7 |
return info; |
|
1606 |
} |
||
1607 |
|||
1608 |
22264 |
void Environment::EnqueueDeserializeRequest(DeserializeRequestCallback cb, |
|
1609 |
Local<Object> holder, |
||
1610 |
int index, |
||
1611 |
InternalFieldInfoBase* info) { |
||
1612 |
DCHECK_EQ(index, BaseObject::kEmbedderType); |
||
1613 |
44528 |
DeserializeRequest request{cb, {isolate(), holder}, index, info}; |
|
1614 |
22264 |
deserialize_requests_.push_back(std::move(request)); |
|
1615 |
22264 |
} |
|
1616 |
|||
1617 |
5566 |
void Environment::RunDeserializeRequests() { |
|
1618 |
11132 |
HandleScope scope(isolate()); |
|
1619 |
5566 |
Local<Context> ctx = context(); |
|
1620 |
5566 |
Isolate* is = isolate(); |
|
1621 |
✓✓ | 27830 |
while (!deserialize_requests_.empty()) { |
1622 |
44528 |
DeserializeRequest request(std::move(deserialize_requests_.front())); |
|
1623 |
22264 |
deserialize_requests_.pop_front(); |
|
1624 |
22264 |
Local<Object> holder = request.holder.Get(is); |
|
1625 |
22264 |
request.cb(ctx, holder, request.index, request.info); |
|
1626 |
request.holder.Reset(); |
||
1627 |
22264 |
request.info->Delete(); |
|
1628 |
} |
||
1629 |
5566 |
} |
|
1630 |
|||
1631 |
5566 |
void Environment::DeserializeProperties(const EnvSerializeInfo* info) { |
|
1632 |
5566 |
Local<Context> ctx = context(); |
|
1633 |
|||
1634 |
5566 |
RunDeserializeRequests(); |
|
1635 |
|||
1636 |
5566 |
builtins_in_snapshot = info->builtins; |
|
1637 |
5566 |
async_hooks_.Deserialize(ctx); |
|
1638 |
5566 |
immediate_info_.Deserialize(ctx); |
|
1639 |
5566 |
tick_info_.Deserialize(ctx); |
|
1640 |
5566 |
performance_state_->Deserialize(ctx); |
|
1641 |
5566 |
exiting_.Deserialize(ctx); |
|
1642 |
5566 |
stream_base_state_.Deserialize(ctx); |
|
1643 |
5566 |
should_abort_on_uncaught_toggle_.Deserialize(ctx); |
|
1644 |
|||
1645 |
5566 |
principal_realm_->DeserializeProperties(&info->principal_realm); |
|
1646 |
|||
1647 |
✗✓ | 5566 |
if (enabled_debug_list_.enabled(DebugCategory::MKSNAPSHOT)) { |
1648 |
fprintf(stderr, "deserializing...\n"); |
||
1649 |
std::cerr << *info << "\n"; |
||
1650 |
} |
||
1651 |
5566 |
} |
|
1652 |
|||
1653 |
4 |
uint64_t GuessMemoryAvailableToTheProcess() { |
|
1654 |
4 |
uint64_t free_in_system = uv_get_free_memory(); |
|
1655 |
4 |
size_t allowed = uv_get_constrained_memory(); |
|
1656 |
✗✓ | 4 |
if (allowed == 0) { |
1657 |
return free_in_system; |
||
1658 |
} |
||
1659 |
size_t rss; |
||
1660 |
4 |
int err = uv_resident_set_memory(&rss); |
|
1661 |
✗✓ | 4 |
if (err) { |
1662 |
return free_in_system; |
||
1663 |
} |
||
1664 |
✗✓ | 4 |
if (allowed < rss) { |
1665 |
// Something is probably wrong. Fallback to the free memory. |
||
1666 |
return free_in_system; |
||
1667 |
} |
||
1668 |
// There may still be room for swap, but we will just leave it here. |
||
1669 |
4 |
return allowed - rss; |
|
1670 |
} |
||
1671 |
|||
1672 |
25 |
void Environment::BuildEmbedderGraph(Isolate* isolate, |
|
1673 |
EmbedderGraph* graph, |
||
1674 |
void* data) { |
||
1675 |
50 |
MemoryTracker tracker(isolate, graph); |
|
1676 |
25 |
Environment* env = static_cast<Environment*>(data); |
|
1677 |
// Start traversing embedder objects from the root Environment object. |
||
1678 |
25 |
tracker.Track(env); |
|
1679 |
25 |
} |
|
1680 |
|||
1681 |
4 |
size_t Environment::NearHeapLimitCallback(void* data, |
|
1682 |
size_t current_heap_limit, |
||
1683 |
size_t initial_heap_limit) { |
||
1684 |
4 |
Environment* env = static_cast<Environment*>(data); |
|
1685 |
|||
1686 |
Debug(env, |
||
1687 |
DebugCategory::DIAGNOSTICS, |
||
1688 |
"Invoked NearHeapLimitCallback, processing=%d, " |
||
1689 |
"current_limit=%" PRIu64 ", " |
||
1690 |
"initial_limit=%" PRIu64 "\n", |
||
1691 |
4 |
env->is_in_heapsnapshot_heap_limit_callback_, |
|
1692 |
8 |
static_cast<uint64_t>(current_heap_limit), |
|
1693 |
4 |
static_cast<uint64_t>(initial_heap_limit)); |
|
1694 |
|||
1695 |
4 |
size_t max_young_gen_size = env->isolate_data()->max_young_gen_size; |
|
1696 |
4 |
size_t young_gen_size = 0; |
|
1697 |
4 |
size_t old_gen_size = 0; |
|
1698 |
|||
1699 |
4 |
HeapSpaceStatistics stats; |
|
1700 |
4 |
size_t num_heap_spaces = env->isolate()->NumberOfHeapSpaces(); |
|
1701 |
✓✓ | 36 |
for (size_t i = 0; i < num_heap_spaces; ++i) { |
1702 |
32 |
env->isolate()->GetHeapSpaceStatistics(&stats, i); |
|
1703 |
✓✓✓✓ |
60 |
if (strcmp(stats.space_name(), "new_space") == 0 || |
1704 |
✓✓ | 28 |
strcmp(stats.space_name(), "new_large_object_space") == 0) { |
1705 |
8 |
young_gen_size += stats.space_used_size(); |
|
1706 |
} else { |
||
1707 |
24 |
old_gen_size += stats.space_used_size(); |
|
1708 |
} |
||
1709 |
} |
||
1710 |
|||
1711 |
Debug(env, |
||
1712 |
DebugCategory::DIAGNOSTICS, |
||
1713 |
"max_young_gen_size=%" PRIu64 ", " |
||
1714 |
"young_gen_size=%" PRIu64 ", " |
||
1715 |
"old_gen_size=%" PRIu64 ", " |
||
1716 |
"total_size=%" PRIu64 "\n", |
||
1717 |
8 |
static_cast<uint64_t>(max_young_gen_size), |
|
1718 |
8 |
static_cast<uint64_t>(young_gen_size), |
|
1719 |
8 |
static_cast<uint64_t>(old_gen_size), |
|
1720 |
4 |
static_cast<uint64_t>(young_gen_size + old_gen_size)); |
|
1721 |
|||
1722 |
4 |
uint64_t available = GuessMemoryAvailableToTheProcess(); |
|
1723 |
// TODO(joyeecheung): get a better estimate about the native memory |
||
1724 |
// usage into the overhead, e.g. based on the count of objects. |
||
1725 |
4 |
uint64_t estimated_overhead = max_young_gen_size; |
|
1726 |
Debug(env, |
||
1727 |
DebugCategory::DIAGNOSTICS, |
||
1728 |
"Estimated available memory=%" PRIu64 ", " |
||
1729 |
"estimated overhead=%" PRIu64 "\n", |
||
1730 |
8 |
static_cast<uint64_t>(available), |
|
1731 |
4 |
static_cast<uint64_t>(estimated_overhead)); |
|
1732 |
|||
1733 |
// This might be hit when the snapshot is being taken in another |
||
1734 |
// NearHeapLimitCallback invocation. |
||
1735 |
// When taking the snapshot, objects in the young generation may be |
||
1736 |
// promoted to the old generation, result in increased heap usage, |
||
1737 |
// but it should be no more than the young generation size. |
||
1738 |
// Ideally, this should be as small as possible - the heap limit |
||
1739 |
// can only be restored when the heap usage falls down below the |
||
1740 |
// new limit, so in a heap with unbounded growth the isolate |
||
1741 |
// may eventually crash with this new limit - effectively raising |
||
1742 |
// the heap limit to the new one. |
||
1743 |
4 |
size_t new_limit = current_heap_limit + max_young_gen_size; |
|
1744 |
✓✓ | 4 |
if (env->is_in_heapsnapshot_heap_limit_callback_) { |
1745 |
Debug(env, |
||
1746 |
DebugCategory::DIAGNOSTICS, |
||
1747 |
"Not generating snapshots in nested callback. " |
||
1748 |
"new_limit=%" PRIu64 "\n", |
||
1749 |
2 |
static_cast<uint64_t>(new_limit)); |
|
1750 |
2 |
return new_limit; |
|
1751 |
} |
||
1752 |
|||
1753 |
// Estimate whether the snapshot is going to use up all the memory |
||
1754 |
// available to the process. If so, just give up to prevent the system |
||
1755 |
// from killing the process for a system OOM. |
||
1756 |
✗✓ | 2 |
if (estimated_overhead > available) { |
1757 |
Debug(env, |
||
1758 |
DebugCategory::DIAGNOSTICS, |
||
1759 |
"Not generating snapshots because it's too risky.\n"); |
||
1760 |
env->RemoveHeapSnapshotNearHeapLimitCallback(0); |
||
1761 |
// The new limit must be higher than current_heap_limit or V8 might |
||
1762 |
// crash. |
||
1763 |
return new_limit; |
||
1764 |
} |
||
1765 |
|||
1766 |
// Take the snapshot synchronously. |
||
1767 |
2 |
env->is_in_heapsnapshot_heap_limit_callback_ = true; |
|
1768 |
|||
1769 |
4 |
std::string dir = env->options()->diagnostic_dir; |
|
1770 |
✓✗ | 2 |
if (dir.empty()) { |
1771 |
2 |
dir = env->GetCwd(); |
|
1772 |
} |
||
1773 |
4 |
DiagnosticFilename name(env, "Heap", "heapsnapshot"); |
|
1774 |
2 |
std::string filename = dir + kPathSeparator + (*name); |
|
1775 |
|||
1776 |
2 |
Debug(env, DebugCategory::DIAGNOSTICS, "Start generating %s...\n", *name); |
|
1777 |
|||
1778 |
2 |
heap::WriteSnapshot(env, filename.c_str()); |
|
1779 |
2 |
env->heap_limit_snapshot_taken_ += 1; |
|
1780 |
|||
1781 |
Debug(env, |
||
1782 |
DebugCategory::DIAGNOSTICS, |
||
1783 |
"%" PRIu32 "/%" PRIu32 " snapshots taken.\n", |
||
1784 |
2 |
env->heap_limit_snapshot_taken_, |
|
1785 |
2 |
env->heap_snapshot_near_heap_limit_); |
|
1786 |
|||
1787 |
// Don't take more snapshots than the limit specified. |
||
1788 |
✓✗ | 2 |
if (env->heap_limit_snapshot_taken_ == env->heap_snapshot_near_heap_limit_) { |
1789 |
Debug(env, |
||
1790 |
DebugCategory::DIAGNOSTICS, |
||
1791 |
"Removing the near heap limit callback"); |
||
1792 |
2 |
env->RemoveHeapSnapshotNearHeapLimitCallback(0); |
|
1793 |
} |
||
1794 |
|||
1795 |
2 |
FPrintF(stderr, "Wrote snapshot to %s\n", filename.c_str()); |
|
1796 |
// Tell V8 to reset the heap limit once the heap usage falls down to |
||
1797 |
// 95% of the initial limit. |
||
1798 |
2 |
env->isolate()->AutomaticallyRestoreInitialHeapLimit(0.95); |
|
1799 |
|||
1800 |
2 |
env->is_in_heapsnapshot_heap_limit_callback_ = false; |
|
1801 |
|||
1802 |
// The new limit must be higher than current_heap_limit or V8 might |
||
1803 |
// crash. |
||
1804 |
2 |
return new_limit; |
|
1805 |
} |
||
1806 |
|||
1807 |
25 |
inline size_t Environment::SelfSize() const { |
|
1808 |
25 |
size_t size = sizeof(*this); |
|
1809 |
// Remove non pointer fields that will be tracked in MemoryInfo() |
||
1810 |
// TODO(joyeecheung): refactor the MemoryTracker interface so |
||
1811 |
// this can be done for common types within the Track* calls automatically |
||
1812 |
// if a certain scope is entered. |
||
1813 |
25 |
size -= sizeof(async_hooks_); |
|
1814 |
25 |
size -= sizeof(cleanup_queue_); |
|
1815 |
25 |
size -= sizeof(tick_info_); |
|
1816 |
25 |
size -= sizeof(immediate_info_); |
|
1817 |
25 |
return size; |
|
1818 |
} |
||
1819 |
|||
1820 |
25 |
void Environment::MemoryInfo(MemoryTracker* tracker) const { |
|
1821 |
// Iteratable STLs have their own sizes subtracted from the parent |
||
1822 |
// by default. |
||
1823 |
25 |
tracker->TrackField("isolate_data", isolate_data_); |
|
1824 |
25 |
tracker->TrackField("builtins_with_cache", builtins_with_cache); |
|
1825 |
25 |
tracker->TrackField("builtins_without_cache", builtins_without_cache); |
|
1826 |
25 |
tracker->TrackField("destroy_async_id_list", destroy_async_id_list_); |
|
1827 |
25 |
tracker->TrackField("exec_argv", exec_argv_); |
|
1828 |
25 |
tracker->TrackField("exiting", exiting_); |
|
1829 |
25 |
tracker->TrackField("should_abort_on_uncaught_toggle", |
|
1830 |
25 |
should_abort_on_uncaught_toggle_); |
|
1831 |
25 |
tracker->TrackField("stream_base_state", stream_base_state_); |
|
1832 |
25 |
tracker->TrackField("cleanup_queue", cleanup_queue_); |
|
1833 |
25 |
tracker->TrackField("async_hooks", async_hooks_); |
|
1834 |
25 |
tracker->TrackField("immediate_info", immediate_info_); |
|
1835 |
25 |
tracker->TrackField("tick_info", tick_info_); |
|
1836 |
25 |
tracker->TrackField("principal_realm", principal_realm_); |
|
1837 |
|||
1838 |
// FIXME(joyeecheung): track other fields in Environment. |
||
1839 |
// Currently MemoryTracker is unable to track these |
||
1840 |
// correctly: |
||
1841 |
// - Internal types that do not implement MemoryRetainer yet |
||
1842 |
// - STL containers with MemoryRetainer* inside |
||
1843 |
// - STL containers with numeric types inside that should not have their |
||
1844 |
// nodes elided e.g. numeric keys in maps. |
||
1845 |
// We also need to make sure that when we add a non-pointer field as its own |
||
1846 |
// node, we shift its sizeof() size out of the Environment node. |
||
1847 |
25 |
} |
|
1848 |
|||
1849 |
771695 |
void Environment::RunWeakRefCleanup() { |
|
1850 |
771695 |
isolate()->ClearKeptObjects(); |
|
1851 |
771695 |
} |
|
1852 |
} // namespace node |
Generated by: GCOVR (Version 4.2) |