GCC Code Coverage Report | |||||||||||||||||||||
|
|||||||||||||||||||||
Line | Branch | Exec | Source |
1 |
#include "inspector_agent.h" |
||
2 |
|||
3 |
#include "env-inl.h" |
||
4 |
#include "inspector/main_thread_interface.h" |
||
5 |
#include "inspector/node_string.h" |
||
6 |
#include "inspector/runtime_agent.h" |
||
7 |
#include "inspector/tracing_agent.h" |
||
8 |
#include "inspector/worker_agent.h" |
||
9 |
#include "inspector/worker_inspector.h" |
||
10 |
#include "inspector_io.h" |
||
11 |
#include "node/inspector/protocol/Protocol.h" |
||
12 |
#include "node_errors.h" |
||
13 |
#include "node_internals.h" |
||
14 |
#include "node_options-inl.h" |
||
15 |
#include "node_process-inl.h" |
||
16 |
#include "node_url.h" |
||
17 |
#include "util-inl.h" |
||
18 |
#include "timer_wrap-inl.h" |
||
19 |
#include "v8-inspector.h" |
||
20 |
#include "v8-platform.h" |
||
21 |
|||
22 |
#include "libplatform/libplatform.h" |
||
23 |
|||
24 |
#ifdef __POSIX__ |
||
25 |
#include <pthread.h> |
||
26 |
#include <climits> // PTHREAD_STACK_MIN |
||
27 |
#endif // __POSIX__ |
||
28 |
|||
29 |
#include <algorithm> |
||
30 |
#include <cstring> |
||
31 |
#include <sstream> |
||
32 |
#include <unordered_map> |
||
33 |
#include <vector> |
||
34 |
|||
35 |
namespace node { |
||
36 |
namespace inspector { |
||
37 |
namespace { |
||
38 |
|||
39 |
using node::FatalError; |
||
40 |
|||
41 |
using v8::Context; |
||
42 |
using v8::Function; |
||
43 |
using v8::HandleScope; |
||
44 |
using v8::Isolate; |
||
45 |
using v8::Local; |
||
46 |
using v8::Message; |
||
47 |
using v8::Object; |
||
48 |
using v8::Value; |
||
49 |
|||
50 |
using v8_inspector::StringBuffer; |
||
51 |
using v8_inspector::StringView; |
||
52 |
using v8_inspector::V8Inspector; |
||
53 |
using v8_inspector::V8InspectorClient; |
||
54 |
|||
55 |
static uv_sem_t start_io_thread_semaphore; |
||
56 |
static uv_async_t start_io_thread_async; |
||
57 |
// This is just an additional check to make sure start_io_thread_async |
||
58 |
// is not accidentally re-used or used when uninitialized. |
||
59 |
static std::atomic_bool start_io_thread_async_initialized { false }; |
||
60 |
// Protects the Agent* stored in start_io_thread_async.data. |
||
61 |
static Mutex start_io_thread_async_mutex; |
||
62 |
|||
63 |
4 |
std::unique_ptr<StringBuffer> ToProtocolString(Isolate* isolate, |
|
64 |
Local<Value> value) { |
||
65 |
4 |
TwoByteValue buffer(isolate, value); |
|
66 |
4 |
return StringBuffer::create(StringView(*buffer, buffer.length())); |
|
67 |
} |
||
68 |
|||
69 |
// Called on the main thread. |
||
70 |
2 |
void StartIoThreadAsyncCallback(uv_async_t* handle) { |
|
71 |
2 |
static_cast<Agent*>(handle->data)->StartIoThread(); |
|
72 |
2 |
} |
|
73 |
|||
74 |
|||
75 |
#ifdef __POSIX__ |
||
76 |
2 |
static void StartIoThreadWakeup(int signo, siginfo_t* info, void* ucontext) { |
|
77 |
2 |
uv_sem_post(&start_io_thread_semaphore); |
|
78 |
2 |
} |
|
79 |
|||
80 |
5574 |
inline void* StartIoThreadMain(void* unused) { |
|
81 |
for (;;) { |
||
82 |
5574 |
uv_sem_wait(&start_io_thread_semaphore); |
|
83 |
4 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
84 |
|||
85 |
✗✓ | 2 |
CHECK(start_io_thread_async_initialized); |
86 |
2 |
Agent* agent = static_cast<Agent*>(start_io_thread_async.data); |
|
87 |
✓✗ | 2 |
if (agent != nullptr) |
88 |
2 |
agent->RequestIoThreadStart(); |
|
89 |
2 |
} |
|
90 |
} |
||
91 |
|||
92 |
5572 |
static int StartDebugSignalHandler() { |
|
93 |
// Start a watchdog thread for calling v8::Debug::DebugBreak() because |
||
94 |
// it's not safe to call directly from the signal handler, it can |
||
95 |
// deadlock with the thread it interrupts. |
||
96 |
✗✓ | 5572 |
CHECK_EQ(0, uv_sem_init(&start_io_thread_semaphore, 0)); |
97 |
pthread_attr_t attr; |
||
98 |
✗✓ | 5572 |
CHECK_EQ(0, pthread_attr_init(&attr)); |
99 |
#if defined(PTHREAD_STACK_MIN) && !defined(__FreeBSD__) |
||
100 |
// PTHREAD_STACK_MIN is 2 KiB with musl libc, which is too small to safely |
||
101 |
// receive signals. PTHREAD_STACK_MIN + MINSIGSTKSZ is 8 KiB on arm64, which |
||
102 |
// is the musl architecture with the biggest MINSIGSTKSZ so let's use that |
||
103 |
// as a lower bound and let's quadruple it just in case. The goal is to avoid |
||
104 |
// creating a big 2 or 4 MiB address space gap (problematic on 32 bits |
||
105 |
// because of fragmentation), not squeeze out every last byte. |
||
106 |
// Omitted on FreeBSD because it doesn't seem to like small stacks. |
||
107 |
5572 |
const size_t stack_size = std::max(static_cast<size_t>(4 * 8192), |
|
108 |
static_cast<size_t>(PTHREAD_STACK_MIN)); |
||
109 |
✗✓ | 5572 |
CHECK_EQ(0, pthread_attr_setstacksize(&attr, stack_size)); |
110 |
#endif // defined(PTHREAD_STACK_MIN) && !defined(__FreeBSD__) |
||
111 |
✗✓ | 5572 |
CHECK_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); |
112 |
sigset_t sigmask; |
||
113 |
// Mask all signals. |
||
114 |
5572 |
sigfillset(&sigmask); |
|
115 |
sigset_t savemask; |
||
116 |
✗✓ | 5572 |
CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sigmask, &savemask)); |
117 |
5572 |
sigmask = savemask; |
|
118 |
pthread_t thread; |
||
119 |
5572 |
const int err = pthread_create(&thread, &attr, |
|
120 |
StartIoThreadMain, nullptr); |
||
121 |
// Restore original mask |
||
122 |
✗✓ | 5572 |
CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sigmask, nullptr)); |
123 |
✗✓ | 5572 |
CHECK_EQ(0, pthread_attr_destroy(&attr)); |
124 |
✗✓ | 5572 |
if (err != 0) { |
125 |
fprintf(stderr, "node[%u]: pthread_create: %s\n", |
||
126 |
uv_os_getpid(), strerror(err)); |
||
127 |
fflush(stderr); |
||
128 |
// Leave SIGUSR1 blocked. We don't install a signal handler, |
||
129 |
// receiving the signal would terminate the process. |
||
130 |
return -err; |
||
131 |
} |
||
132 |
5572 |
RegisterSignalHandler(SIGUSR1, StartIoThreadWakeup); |
|
133 |
// Unblock SIGUSR1. A pending SIGUSR1 signal will now be delivered. |
||
134 |
5572 |
sigemptyset(&sigmask); |
|
135 |
5572 |
sigaddset(&sigmask, SIGUSR1); |
|
136 |
✗✓ | 5572 |
CHECK_EQ(0, pthread_sigmask(SIG_UNBLOCK, &sigmask, nullptr)); |
137 |
5572 |
return 0; |
|
138 |
} |
||
139 |
#endif // __POSIX__ |
||
140 |
|||
141 |
|||
142 |
#ifdef _WIN32 |
||
143 |
DWORD WINAPI StartIoThreadProc(void* arg) { |
||
144 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
||
145 |
CHECK(start_io_thread_async_initialized); |
||
146 |
Agent* agent = static_cast<Agent*>(start_io_thread_async.data); |
||
147 |
if (agent != nullptr) |
||
148 |
agent->RequestIoThreadStart(); |
||
149 |
return 0; |
||
150 |
} |
||
151 |
|||
152 |
static int GetDebugSignalHandlerMappingName(DWORD pid, wchar_t* buf, |
||
153 |
size_t buf_len) { |
||
154 |
return _snwprintf(buf, buf_len, L"node-debug-handler-%u", pid); |
||
155 |
} |
||
156 |
|||
157 |
static int StartDebugSignalHandler() { |
||
158 |
wchar_t mapping_name[32]; |
||
159 |
HANDLE mapping_handle; |
||
160 |
DWORD pid; |
||
161 |
LPTHREAD_START_ROUTINE* handler; |
||
162 |
|||
163 |
pid = uv_os_getpid(); |
||
164 |
|||
165 |
if (GetDebugSignalHandlerMappingName(pid, |
||
166 |
mapping_name, |
||
167 |
arraysize(mapping_name)) < 0) { |
||
168 |
return -1; |
||
169 |
} |
||
170 |
|||
171 |
mapping_handle = CreateFileMappingW(INVALID_HANDLE_VALUE, |
||
172 |
nullptr, |
||
173 |
PAGE_READWRITE, |
||
174 |
0, |
||
175 |
sizeof *handler, |
||
176 |
mapping_name); |
||
177 |
if (mapping_handle == nullptr) { |
||
178 |
return -1; |
||
179 |
} |
||
180 |
|||
181 |
handler = reinterpret_cast<LPTHREAD_START_ROUTINE*>( |
||
182 |
MapViewOfFile(mapping_handle, |
||
183 |
FILE_MAP_ALL_ACCESS, |
||
184 |
0, |
||
185 |
0, |
||
186 |
sizeof *handler)); |
||
187 |
if (handler == nullptr) { |
||
188 |
CloseHandle(mapping_handle); |
||
189 |
return -1; |
||
190 |
} |
||
191 |
|||
192 |
*handler = StartIoThreadProc; |
||
193 |
|||
194 |
UnmapViewOfFile(static_cast<void*>(handler)); |
||
195 |
|||
196 |
return 0; |
||
197 |
} |
||
198 |
#endif // _WIN32 |
||
199 |
|||
200 |
|||
201 |
const int CONTEXT_GROUP_ID = 1; |
||
202 |
|||
203 |
721 |
std::string GetWorkerLabel(node::Environment* env) { |
|
204 |
1442 |
std::ostringstream result; |
|
205 |
721 |
result << "Worker[" << env->thread_id() << "]"; |
|
206 |
721 |
return result.str(); |
|
207 |
} |
||
208 |
|||
209 |
class ChannelImpl final : public v8_inspector::V8Inspector::Channel, |
||
210 |
public protocol::FrontendChannel { |
||
211 |
public: |
||
212 |
7265 |
explicit ChannelImpl(Environment* env, |
|
213 |
const std::unique_ptr<V8Inspector>& inspector, |
||
214 |
std::shared_ptr<WorkerManager> worker_manager, |
||
215 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
216 |
std::shared_ptr<MainThreadHandle> main_thread_, |
||
217 |
bool prevent_shutdown) |
||
218 |
14530 |
: delegate_(std::move(delegate)), prevent_shutdown_(prevent_shutdown), |
|
219 |
7265 |
retaining_context_(false) { |
|
220 |
7265 |
session_ = inspector->connect(CONTEXT_GROUP_ID, this, StringView()); |
|
221 |
7265 |
node_dispatcher_ = std::make_unique<protocol::UberDispatcher>(this); |
|
222 |
tracing_agent_ = |
||
223 |
7265 |
std::make_unique<protocol::TracingAgent>(env, main_thread_); |
|
224 |
7265 |
tracing_agent_->Wire(node_dispatcher_.get()); |
|
225 |
✓✓ | 7265 |
if (worker_manager) { |
226 |
6529 |
worker_agent_ = std::make_unique<protocol::WorkerAgent>(worker_manager); |
|
227 |
6529 |
worker_agent_->Wire(node_dispatcher_.get()); |
|
228 |
} |
||
229 |
7265 |
runtime_agent_ = std::make_unique<protocol::RuntimeAgent>(); |
|
230 |
7265 |
runtime_agent_->Wire(node_dispatcher_.get()); |
|
231 |
7265 |
} |
|
232 |
|||
233 |
26396 |
~ChannelImpl() override { |
|
234 |
13198 |
tracing_agent_->disable(); |
|
235 |
13198 |
tracing_agent_.reset(); // Dispose before the dispatchers |
|
236 |
✓✓ | 13198 |
if (worker_agent_) { |
237 |
11726 |
worker_agent_->disable(); |
|
238 |
11726 |
worker_agent_.reset(); // Dispose before the dispatchers |
|
239 |
} |
||
240 |
13198 |
runtime_agent_->disable(); |
|
241 |
13198 |
runtime_agent_.reset(); // Dispose before the dispatchers |
|
242 |
26396 |
} |
|
243 |
|||
244 |
20320 |
void dispatchProtocolMessage(const StringView& message) { |
|
245 |
40640 |
std::string raw_message = protocol::StringUtil::StringViewToUtf8(message); |
|
246 |
std::unique_ptr<protocol::DictionaryValue> value = |
||
247 |
20320 |
protocol::DictionaryValue::cast(protocol::StringUtil::parseMessage( |
|
248 |
40640 |
raw_message, false)); |
|
249 |
int call_id; |
||
250 |
40640 |
std::string method; |
|
251 |
20320 |
node_dispatcher_->parseCommand(value.get(), &call_id, &method); |
|
252 |
20320 |
if (v8_inspector::V8InspectorSession::canDispatchMethod( |
|
253 |
✓✓ | 40640 |
Utf8ToStringView(method)->string())) { |
254 |
20281 |
session_->dispatchProtocolMessage(message); |
|
255 |
} else { |
||
256 |
39 |
node_dispatcher_->dispatch(call_id, method, std::move(value), |
|
257 |
raw_message); |
||
258 |
} |
||
259 |
20320 |
} |
|
260 |
|||
261 |
38 |
void schedulePauseOnNextStatement(const std::string& reason) { |
|
262 |
76 |
std::unique_ptr<StringBuffer> buffer = Utf8ToStringView(reason); |
|
263 |
38 |
session_->schedulePauseOnNextStatement(buffer->string(), buffer->string()); |
|
264 |
38 |
} |
|
265 |
|||
266 |
6598 |
bool preventShutdown() { |
|
267 |
6598 |
return prevent_shutdown_; |
|
268 |
} |
||
269 |
|||
270 |
6339 |
bool notifyWaitingForDisconnect() { |
|
271 |
6339 |
retaining_context_ = runtime_agent_->notifyWaitingForDisconnect(); |
|
272 |
6339 |
return retaining_context_; |
|
273 |
} |
||
274 |
|||
275 |
934 |
bool retainingContext() { |
|
276 |
934 |
return retaining_context_; |
|
277 |
} |
||
278 |
|||
279 |
private: |
||
280 |
20281 |
void sendResponse( |
|
281 |
int callId, |
||
282 |
std::unique_ptr<v8_inspector::StringBuffer> message) override { |
||
283 |
20281 |
sendMessageToFrontend(message->string()); |
|
284 |
20281 |
} |
|
285 |
|||
286 |
7865 |
void sendNotification( |
|
287 |
std::unique_ptr<v8_inspector::StringBuffer> message) override { |
||
288 |
7865 |
sendMessageToFrontend(message->string()); |
|
289 |
7865 |
} |
|
290 |
|||
291 |
1346 |
void flushProtocolNotifications() override { } |
|
292 |
|||
293 |
28808 |
void sendMessageToFrontend(const StringView& message) { |
|
294 |
28808 |
delegate_->SendMessageToFrontend(message); |
|
295 |
28808 |
} |
|
296 |
|||
297 |
662 |
void sendMessageToFrontend(const std::string& message) { |
|
298 |
662 |
sendMessageToFrontend(Utf8ToStringView(message)->string()); |
|
299 |
662 |
} |
|
300 |
|||
301 |
using Serializable = protocol::Serializable; |
||
302 |
|||
303 |
39 |
void sendProtocolResponse(int callId, |
|
304 |
std::unique_ptr<Serializable> message) override { |
||
305 |
39 |
sendMessageToFrontend(message->serializeToJSON()); |
|
306 |
39 |
} |
|
307 |
623 |
void sendProtocolNotification( |
|
308 |
std::unique_ptr<Serializable> message) override { |
||
309 |
623 |
sendMessageToFrontend(message->serializeToJSON()); |
|
310 |
623 |
} |
|
311 |
|||
312 |
void fallThrough(int callId, |
||
313 |
const std::string& method, |
||
314 |
const std::string& message) override { |
||
315 |
DCHECK(false); |
||
316 |
} |
||
317 |
|||
318 |
std::unique_ptr<protocol::RuntimeAgent> runtime_agent_; |
||
319 |
std::unique_ptr<protocol::TracingAgent> tracing_agent_; |
||
320 |
std::unique_ptr<protocol::WorkerAgent> worker_agent_; |
||
321 |
std::unique_ptr<InspectorSessionDelegate> delegate_; |
||
322 |
std::unique_ptr<v8_inspector::V8InspectorSession> session_; |
||
323 |
std::unique_ptr<protocol::UberDispatcher> node_dispatcher_; |
||
324 |
bool prevent_shutdown_; |
||
325 |
bool retaining_context_; |
||
326 |
}; |
||
327 |
|||
328 |
class SameThreadInspectorSession : public InspectorSession { |
||
329 |
public: |
||
330 |
7265 |
SameThreadInspectorSession( |
|
331 |
int session_id, std::shared_ptr<NodeInspectorClient> client) |
||
332 |
7265 |
: session_id_(session_id), client_(client) {} |
|
333 |
~SameThreadInspectorSession() override; |
||
334 |
void Dispatch(const v8_inspector::StringView& message) override; |
||
335 |
|||
336 |
private: |
||
337 |
int session_id_; |
||
338 |
std::weak_ptr<NodeInspectorClient> client_; |
||
339 |
}; |
||
340 |
|||
341 |
112 |
void NotifyClusterWorkersDebugEnabled(Environment* env) { |
|
342 |
112 |
Isolate* isolate = env->isolate(); |
|
343 |
112 |
HandleScope handle_scope(isolate); |
|
344 |
112 |
Local<Context> context = env->context(); |
|
345 |
|||
346 |
// Send message to enable debug in cluster workers |
||
347 |
112 |
Local<Object> message = Object::New(isolate); |
|
348 |
112 |
message->Set(context, FIXED_ONE_BYTE_STRING(isolate, "cmd"), |
|
349 |
448 |
FIXED_ONE_BYTE_STRING(isolate, "NODE_DEBUG_ENABLED")).Check(); |
|
350 |
112 |
ProcessEmit(env, "internalMessage", message); |
|
351 |
112 |
} |
|
352 |
|||
353 |
#ifdef _WIN32 |
||
354 |
bool IsFilePath(const std::string& path) { |
||
355 |
// '\\' |
||
356 |
if (path.length() > 2 && path[0] == '\\' && path[1] == '\\') |
||
357 |
return true; |
||
358 |
// '[A-Z]:[/\\]' |
||
359 |
if (path.length() < 3) |
||
360 |
return false; |
||
361 |
if ((path[0] >= 'A' && path[0] <= 'Z') || (path[0] >= 'a' && path[0] <= 'z')) |
||
362 |
return path[1] == ':' && (path[2] == '/' || path[2] == '\\'); |
||
363 |
return false; |
||
364 |
} |
||
365 |
#else |
||
366 |
716901 |
bool IsFilePath(const std::string& path) { |
|
367 |
✓✓✓✓ |
716901 |
return !path.empty() && path[0] == '/'; |
368 |
} |
||
369 |
#endif // __POSIX__ |
||
370 |
|||
371 |
void ThrowUninitializedInspectorError(Environment* env) { |
||
372 |
HandleScope scope(env->isolate()); |
||
373 |
|||
374 |
const char* msg = "This Environment was initialized without a V8::Inspector"; |
||
375 |
Local<Value> exception = |
||
376 |
v8::String::NewFromUtf8(env->isolate(), msg).ToLocalChecked(); |
||
377 |
|||
378 |
env->isolate()->ThrowException(exception); |
||
379 |
} |
||
380 |
|||
381 |
} // namespace |
||
382 |
|||
383 |
class NodeInspectorClient : public V8InspectorClient { |
||
384 |
public: |
||
385 |
6295 |
explicit NodeInspectorClient(node::Environment* env, bool is_main) |
|
386 |
6295 |
: env_(env), is_main_(is_main) { |
|
387 |
6295 |
client_ = V8Inspector::create(env->isolate(), this); |
|
388 |
// TODO(bnoordhuis) Make name configurable from src/node.cc. |
||
389 |
std::string name = |
||
390 |
✓✓ | 12590 |
is_main_ ? GetHumanReadableProcessName() : GetWorkerLabel(env); |
391 |
12590 |
ContextInfo info(name); |
|
392 |
6295 |
info.is_default = true; |
|
393 |
6295 |
contextCreated(env->context(), info); |
|
394 |
6295 |
} |
|
395 |
|||
396 |
40 |
void runMessageLoopOnPause(int context_group_id) override { |
|
397 |
40 |
waiting_for_resume_ = true; |
|
398 |
40 |
runMessageLoop(); |
|
399 |
40 |
} |
|
400 |
|||
401 |
112 |
void waitForSessionsDisconnect() { |
|
402 |
112 |
waiting_for_sessions_disconnect_ = true; |
|
403 |
112 |
runMessageLoop(); |
|
404 |
112 |
} |
|
405 |
|||
406 |
20 |
void waitForFrontend() { |
|
407 |
20 |
waiting_for_frontend_ = true; |
|
408 |
20 |
runMessageLoop(); |
|
409 |
20 |
} |
|
410 |
|||
411 |
9 |
void maxAsyncCallStackDepthChanged(int depth) override { |
|
412 |
✓✓ | 9 |
if (waiting_for_sessions_disconnect_) { |
413 |
// V8 isolate is mostly done and is only letting Inspector protocol |
||
414 |
// clients gather data. |
||
415 |
4 |
return; |
|
416 |
} |
||
417 |
✓✗ | 5 |
if (auto agent = env_->inspector_agent()) { |
418 |
✓✓ | 5 |
if (depth == 0) { |
419 |
1 |
agent->DisableAsyncHook(); |
|
420 |
} else { |
||
421 |
4 |
agent->EnableAsyncHook(); |
|
422 |
} |
||
423 |
} |
||
424 |
} |
||
425 |
|||
426 |
6922 |
void contextCreated(Local<Context> context, const ContextInfo& info) { |
|
427 |
13844 |
auto name_buffer = Utf8ToStringView(info.name); |
|
428 |
13844 |
auto origin_buffer = Utf8ToStringView(info.origin); |
|
429 |
6922 |
std::unique_ptr<StringBuffer> aux_data_buffer; |
|
430 |
|||
431 |
v8_inspector::V8ContextInfo v8info( |
||
432 |
6922 |
context, CONTEXT_GROUP_ID, name_buffer->string()); |
|
433 |
6922 |
v8info.origin = origin_buffer->string(); |
|
434 |
|||
435 |
✓✓ | 6922 |
if (info.is_default) { |
436 |
6295 |
aux_data_buffer = Utf8ToStringView("{\"isDefault\":true}"); |
|
437 |
} else { |
||
438 |
627 |
aux_data_buffer = Utf8ToStringView("{\"isDefault\":false}"); |
|
439 |
} |
||
440 |
6922 |
v8info.auxData = aux_data_buffer->string(); |
|
441 |
|||
442 |
6922 |
client_->contextCreated(v8info); |
|
443 |
6922 |
} |
|
444 |
|||
445 |
6278 |
void contextDestroyed(Local<Context> context) { |
|
446 |
6278 |
client_->contextDestroyed(context); |
|
447 |
6278 |
} |
|
448 |
|||
449 |
28 |
void quitMessageLoopOnPause() override { |
|
450 |
28 |
waiting_for_resume_ = false; |
|
451 |
28 |
} |
|
452 |
|||
453 |
21 |
void runIfWaitingForDebugger(int context_group_id) override { |
|
454 |
21 |
waiting_for_frontend_ = false; |
|
455 |
21 |
} |
|
456 |
|||
457 |
7265 |
int connectFrontend(std::unique_ptr<InspectorSessionDelegate> delegate, |
|
458 |
bool prevent_shutdown) { |
||
459 |
7265 |
int session_id = next_session_id_++; |
|
460 |
14530 |
channels_[session_id] = std::make_unique<ChannelImpl>(env_, |
|
461 |
7265 |
client_, |
|
462 |
14530 |
getWorkerManager(), |
|
463 |
7265 |
std::move(delegate), |
|
464 |
14530 |
getThreadHandle(), |
|
465 |
7265 |
prevent_shutdown); |
|
466 |
7265 |
return session_id; |
|
467 |
} |
||
468 |
|||
469 |
931 |
void disconnectFrontend(int session_id) { |
|
470 |
931 |
auto it = channels_.find(session_id); |
|
471 |
✗✓ | 931 |
if (it == channels_.end()) |
472 |
return; |
||
473 |
931 |
bool retaining_context = it->second->retainingContext(); |
|
474 |
931 |
channels_.erase(it); |
|
475 |
✓✓ | 931 |
if (retaining_context) { |
476 |
✓✓ | 6 |
for (const auto& id_channel : channels_) { |
477 |
✗✓ | 3 |
if (id_channel.second->retainingContext()) |
478 |
return; |
||
479 |
} |
||
480 |
3 |
contextDestroyed(env_->context()); |
|
481 |
} |
||
482 |
✓✓✓✓ |
931 |
if (waiting_for_sessions_disconnect_ && !is_main_) |
483 |
2 |
waiting_for_sessions_disconnect_ = false; |
|
484 |
} |
||
485 |
|||
486 |
20320 |
void dispatchMessageFromFrontend(int session_id, const StringView& message) { |
|
487 |
20320 |
channels_[session_id]->dispatchProtocolMessage(message); |
|
488 |
20320 |
} |
|
489 |
|||
490 |
307 |
Local<Context> ensureDefaultContextInGroup(int contextGroupId) override { |
|
491 |
307 |
return env_->context(); |
|
492 |
} |
||
493 |
|||
494 |
3 |
void installAdditionalCommandLineAPI(Local<Context> context, |
|
495 |
Local<Object> target) override { |
||
496 |
3 |
Local<Function> installer = env_->inspector_console_extension_installer(); |
|
497 |
✓✗ | 3 |
if (!installer.IsEmpty()) { |
498 |
3 |
Local<Value> argv[] = {target}; |
|
499 |
// If there is an exception, proceed in JS land |
||
500 |
3 |
USE(installer->Call(context, target, arraysize(argv), argv)); |
|
501 |
} |
||
502 |
3 |
} |
|
503 |
|||
504 |
2 |
void ReportUncaughtException(Local<Value> error, Local<Message> message) { |
|
505 |
2 |
Isolate* isolate = env_->isolate(); |
|
506 |
2 |
Local<Context> context = env_->context(); |
|
507 |
|||
508 |
4 |
int script_id = message->GetScriptOrigin().ScriptId(); |
|
509 |
|||
510 |
2 |
Local<v8::StackTrace> stack_trace = message->GetStackTrace(); |
|
511 |
|||
512 |
✓✓✓✗ ✓✗ |
4 |
if (!stack_trace.IsEmpty() && stack_trace->GetFrameCount() > 0 && |
513 |
✓✓ | 4 |
script_id == stack_trace->GetFrame(isolate, 0)->GetScriptId()) { |
514 |
1 |
script_id = 0; |
|
515 |
} |
||
516 |
|||
517 |
2 |
const uint8_t DETAILS[] = "Uncaught"; |
|
518 |
|||
519 |
4 |
client_->exceptionThrown( |
|
520 |
context, |
||
521 |
StringView(DETAILS, sizeof(DETAILS) - 1), |
||
522 |
error, |
||
523 |
6 |
ToProtocolString(isolate, message->Get())->string(), |
|
524 |
4 |
ToProtocolString(isolate, message->GetScriptResourceName())->string(), |
|
525 |
✓✗ | 4 |
message->GetLineNumber(context).FromMaybe(0), |
526 |
✓✗ | 4 |
message->GetStartColumn(context).FromMaybe(0), |
527 |
4 |
client_->createStackTrace(stack_trace), |
|
528 |
2 |
script_id); |
|
529 |
2 |
} |
|
530 |
|||
531 |
2 |
void startRepeatingTimer(double interval_s, |
|
532 |
TimerCallback callback, |
||
533 |
void* data) override { |
||
534 |
auto result = |
||
535 |
2 |
timers_.emplace(std::piecewise_construct, std::make_tuple(data), |
|
536 |
7 |
std::make_tuple(env_, [=]() { callback(data); })); |
|
537 |
✗✓ | 2 |
CHECK(result.second); |
538 |
2 |
uint64_t interval = static_cast<uint64_t>(1000 * interval_s); |
|
539 |
2 |
result.first->second.Update(interval, interval); |
|
540 |
2 |
} |
|
541 |
|||
542 |
2 |
void cancelTimer(void* data) override { |
|
543 |
2 |
timers_.erase(data); |
|
544 |
2 |
} |
|
545 |
|||
546 |
// Async stack traces instrumentation. |
||
547 |
3 |
void AsyncTaskScheduled(const StringView& task_name, void* task, |
|
548 |
bool recurring) { |
||
549 |
3 |
client_->asyncTaskScheduled(task_name, task, recurring); |
|
550 |
3 |
} |
|
551 |
|||
552 |
4 |
void AsyncTaskCanceled(void* task) { |
|
553 |
4 |
client_->asyncTaskCanceled(task); |
|
554 |
4 |
} |
|
555 |
|||
556 |
6 |
void AsyncTaskStarted(void* task) { |
|
557 |
6 |
client_->asyncTaskStarted(task); |
|
558 |
6 |
} |
|
559 |
|||
560 |
10 |
void AsyncTaskFinished(void* task) { |
|
561 |
10 |
client_->asyncTaskFinished(task); |
|
562 |
10 |
} |
|
563 |
|||
564 |
void AllAsyncTasksCanceled() { |
||
565 |
client_->allAsyncTasksCanceled(); |
||
566 |
} |
||
567 |
|||
568 |
19 |
void schedulePauseOnNextStatement(const std::string& reason) { |
|
569 |
✓✓ | 57 |
for (const auto& id_channel : channels_) { |
570 |
38 |
id_channel.second->schedulePauseOnNextStatement(reason); |
|
571 |
} |
||
572 |
19 |
} |
|
573 |
|||
574 |
6470 |
bool hasConnectedSessions() { |
|
575 |
✓✓ | 12973 |
for (const auto& id_channel : channels_) { |
576 |
// Other sessions are "invisible" more most purposes |
||
577 |
✓✓ | 6598 |
if (id_channel.second->preventShutdown()) |
578 |
95 |
return true; |
|
579 |
} |
||
580 |
6375 |
return false; |
|
581 |
} |
||
582 |
|||
583 |
6278 |
bool notifyWaitingForDisconnect() { |
|
584 |
6278 |
bool retaining_context = false; |
|
585 |
✓✓ | 12617 |
for (const auto& id_channel : channels_) { |
586 |
✓✓ | 6339 |
if (id_channel.second->notifyWaitingForDisconnect()) |
587 |
3 |
retaining_context = true; |
|
588 |
} |
||
589 |
6278 |
return retaining_context; |
|
590 |
} |
||
591 |
|||
592 |
13670 |
std::shared_ptr<MainThreadHandle> getThreadHandle() { |
|
593 |
✓✓ | 13670 |
if (!interface_) { |
594 |
6292 |
interface_ = std::make_shared<MainThreadInterface>( |
|
595 |
12584 |
env_->inspector_agent()); |
|
596 |
} |
||
597 |
13670 |
return interface_->GetHandle(); |
|
598 |
} |
||
599 |
|||
600 |
9877 |
std::shared_ptr<WorkerManager> getWorkerManager() { |
|
601 |
✓✓ | 9877 |
if (!is_main_) { |
602 |
736 |
return nullptr; |
|
603 |
} |
||
604 |
✓✓ | 9141 |
if (worker_manager_ == nullptr) { |
605 |
worker_manager_ = |
||
606 |
5569 |
std::make_shared<WorkerManager>(getThreadHandle()); |
|
607 |
} |
||
608 |
9141 |
return worker_manager_; |
|
609 |
} |
||
610 |
|||
611 |
76926 |
bool IsActive() { |
|
612 |
76926 |
return !channels_.empty(); |
|
613 |
} |
||
614 |
|||
615 |
private: |
||
616 |
291 |
bool shouldRunMessageLoop() { |
|
617 |
✓✓ | 291 |
if (waiting_for_frontend_) |
618 |
49 |
return true; |
|
619 |
✓✓✓✓ |
242 |
if (waiting_for_sessions_disconnect_ || waiting_for_resume_) { |
620 |
192 |
return hasConnectedSessions(); |
|
621 |
} |
||
622 |
50 |
return false; |
|
623 |
} |
||
624 |
|||
625 |
172 |
void runMessageLoop() { |
|
626 |
✗✓ | 172 |
if (running_nested_loop_) |
627 |
return; |
||
628 |
|||
629 |
172 |
running_nested_loop_ = true; |
|
630 |
|||
631 |
✓✓ | 291 |
while (shouldRunMessageLoop()) { |
632 |
✓✗ | 119 |
if (interface_) interface_->WaitForFrontendEvent(); |
633 |
119 |
env_->RunAndClearInterrupts(); |
|
634 |
} |
||
635 |
172 |
running_nested_loop_ = false; |
|
636 |
} |
||
637 |
|||
638 |
71808 |
double currentTimeMS() override { |
|
639 |
71808 |
return env_->isolate_data()->platform()->CurrentClockTimeMillis(); |
|
640 |
} |
||
641 |
|||
642 |
716901 |
std::unique_ptr<StringBuffer> resourceNameToUrl( |
|
643 |
const StringView& resource_name_view) override { |
||
644 |
std::string resource_name = |
||
645 |
1433802 |
protocol::StringUtil::StringViewToUtf8(resource_name_view); |
|
646 |
✓✓ | 716901 |
if (!IsFilePath(resource_name)) |
647 |
678330 |
return nullptr; |
|
648 |
38571 |
node::url::URL url = node::url::URL::FromFilePath(resource_name); |
|
649 |
38571 |
return Utf8ToStringView(url.href()); |
|
650 |
} |
||
651 |
|||
652 |
node::Environment* env_; |
||
653 |
bool is_main_; |
||
654 |
bool running_nested_loop_ = false; |
||
655 |
std::unique_ptr<V8Inspector> client_; |
||
656 |
// Note: ~ChannelImpl may access timers_ so timers_ has to come first. |
||
657 |
std::unordered_map<void*, TimerWrapHandle> timers_; |
||
658 |
std::unordered_map<int, std::unique_ptr<ChannelImpl>> channels_; |
||
659 |
int next_session_id_ = 1; |
||
660 |
bool waiting_for_resume_ = false; |
||
661 |
bool waiting_for_frontend_ = false; |
||
662 |
bool waiting_for_sessions_disconnect_ = false; |
||
663 |
// Allows accessing Inspector from non-main threads |
||
664 |
std::shared_ptr<MainThreadInterface> interface_; |
||
665 |
std::shared_ptr<WorkerManager> worker_manager_; |
||
666 |
}; |
||
667 |
|||
668 |
6301 |
Agent::Agent(Environment* env) |
|
669 |
: parent_env_(env), |
||
670 |
6301 |
debug_options_(env->options()->debug_options()), |
|
671 |
12602 |
host_port_(env->inspector_host_port()) {} |
|
672 |
|||
673 |
5637 |
Agent::~Agent() {} |
|
674 |
|||
675 |
6295 |
bool Agent::Start(const std::string& path, |
|
676 |
const DebugOptions& options, |
||
677 |
std::shared_ptr<ExclusiveAccess<HostPort>> host_port, |
||
678 |
bool is_main) { |
||
679 |
✗✓ | 6295 |
if (!options.allow_attaching_debugger) { |
680 |
return false; |
||
681 |
} |
||
682 |
6295 |
path_ = path; |
|
683 |
6295 |
debug_options_ = options; |
|
684 |
✗✓ | 6295 |
CHECK_NOT_NULL(host_port); |
685 |
6295 |
host_port_ = host_port; |
|
686 |
|||
687 |
6295 |
client_ = std::make_shared<NodeInspectorClient>(parent_env_, is_main); |
|
688 |
✓✓ | 6295 |
if (parent_env_->owns_inspector()) { |
689 |
5572 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
690 |
✗✓ | 5572 |
CHECK_EQ(start_io_thread_async_initialized.exchange(true), false); |
691 |
✗✓ | 5572 |
CHECK_EQ(0, uv_async_init(parent_env_->event_loop(), |
692 |
&start_io_thread_async, |
||
693 |
StartIoThreadAsyncCallback)); |
||
694 |
5572 |
uv_unref(reinterpret_cast<uv_handle_t*>(&start_io_thread_async)); |
|
695 |
5572 |
start_io_thread_async.data = this; |
|
696 |
// Ignore failure, SIGUSR1 won't work, but that should not block node start. |
||
697 |
5572 |
StartDebugSignalHandler(); |
|
698 |
|||
699 |
5572 |
parent_env_->AddCleanupHook([](void* data) { |
|
700 |
4908 |
Environment* env = static_cast<Environment*>(data); |
|
701 |
|||
702 |
{ |
||
703 |
4908 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
704 |
4908 |
start_io_thread_async.data = nullptr; |
|
705 |
} |
||
706 |
|||
707 |
// This is global, will never get freed |
||
708 |
4908 |
env->CloseHandle(&start_io_thread_async, [](uv_async_t*) { |
|
709 |
✗✓ | 4908 |
CHECK(start_io_thread_async_initialized.exchange(false)); |
710 |
4908 |
}); |
|
711 |
5572 |
}, parent_env_); |
|
712 |
} |
||
713 |
|||
714 |
6295 |
AtExit(parent_env_, [](void* env) { |
|
715 |
6281 |
Agent* agent = static_cast<Environment*>(env)->inspector_agent(); |
|
716 |
✓✓ | 6281 |
if (agent->IsActive()) { |
717 |
6276 |
agent->WaitForDisconnect(); |
|
718 |
} |
||
719 |
6295 |
}, parent_env_); |
|
720 |
|||
721 |
6295 |
bool wait_for_connect = options.wait_for_connect(); |
|
722 |
✓✓ | 6295 |
if (parent_handle_) { |
723 |
721 |
wait_for_connect = parent_handle_->WaitForConnect(); |
|
724 |
721 |
parent_handle_->WorkerStarted(client_->getThreadHandle(), wait_for_connect); |
|
725 |
✓✓✓✓ ✓✓ |
5574 |
} else if (!options.inspector_enabled || !StartIoThread()) { |
726 |
5464 |
return false; |
|
727 |
} |
||
728 |
|||
729 |
// Patch the debug options to implement waitForDebuggerOnStart for |
||
730 |
// the NodeWorker.enable method. |
||
731 |
✓✓ | 831 |
if (wait_for_connect) { |
732 |
✗✓ | 20 |
CHECK(!parent_env_->has_serialized_options()); |
733 |
20 |
debug_options_.EnableBreakFirstLine(); |
|
734 |
20 |
parent_env_->options()->get_debug_options()->EnableBreakFirstLine(); |
|
735 |
20 |
client_->waitForFrontend(); |
|
736 |
} |
||
737 |
831 |
return true; |
|
738 |
} |
||
739 |
|||
740 |
115 |
bool Agent::StartIoThread() { |
|
741 |
✓✓ | 115 |
if (io_ != nullptr) |
742 |
2 |
return true; |
|
743 |
|||
744 |
✓✓✗✓ ✗✓ |
113 |
if (!parent_env_->should_create_inspector() && !client_) { |
745 |
ThrowUninitializedInspectorError(parent_env_); |
||
746 |
return false; |
||
747 |
} |
||
748 |
|||
749 |
✗✓ | 113 |
CHECK_NOT_NULL(client_); |
750 |
|||
751 |
226 |
io_ = InspectorIo::Start(client_->getThreadHandle(), |
|
752 |
113 |
path_, |
|
753 |
113 |
host_port_, |
|
754 |
226 |
debug_options_.inspect_publish_uid); |
|
755 |
✓✓ | 113 |
if (io_ == nullptr) { |
756 |
1 |
return false; |
|
757 |
} |
||
758 |
112 |
NotifyClusterWorkersDebugEnabled(parent_env_); |
|
759 |
112 |
return true; |
|
760 |
} |
||
761 |
|||
762 |
4 |
void Agent::Stop() { |
|
763 |
4 |
io_.reset(); |
|
764 |
4 |
} |
|
765 |
|||
766 |
7265 |
std::unique_ptr<InspectorSession> Agent::Connect( |
|
767 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
768 |
bool prevent_shutdown) { |
||
769 |
✓✓✗✓ ✗✓ |
7265 |
if (!parent_env_->should_create_inspector() && !client_) { |
770 |
ThrowUninitializedInspectorError(parent_env_); |
||
771 |
return std::unique_ptr<InspectorSession>{}; |
||
772 |
} |
||
773 |
|||
774 |
✗✓ | 7265 |
CHECK_NOT_NULL(client_); |
775 |
|||
776 |
7265 |
int session_id = client_->connectFrontend(std::move(delegate), |
|
777 |
prevent_shutdown); |
||
778 |
return std::unique_ptr<InspectorSession>( |
||
779 |
7265 |
new SameThreadInspectorSession(session_id, client_)); |
|
780 |
} |
||
781 |
|||
782 |
2 |
std::unique_ptr<InspectorSession> Agent::ConnectToMainThread( |
|
783 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
784 |
bool prevent_shutdown) { |
||
785 |
✗✓✗✗ ✗✓ |
2 |
if (!parent_env_->should_create_inspector() && !client_) { |
786 |
ThrowUninitializedInspectorError(parent_env_); |
||
787 |
return std::unique_ptr<InspectorSession>{}; |
||
788 |
} |
||
789 |
|||
790 |
✗✓ | 2 |
CHECK_NOT_NULL(parent_handle_); |
791 |
✗✓ | 2 |
CHECK_NOT_NULL(client_); |
792 |
auto thread_safe_delegate = |
||
793 |
4 |
client_->getThreadHandle()->MakeDelegateThreadSafe(std::move(delegate)); |
|
794 |
2 |
return parent_handle_->Connect(std::move(thread_safe_delegate), |
|
795 |
2 |
prevent_shutdown); |
|
796 |
} |
||
797 |
|||
798 |
6278 |
void Agent::WaitForDisconnect() { |
|
799 |
✓✓✗✓ ✗✓ |
6278 |
if (!parent_env_->should_create_inspector() && !client_) { |
800 |
ThrowUninitializedInspectorError(parent_env_); |
||
801 |
return; |
||
802 |
} |
||
803 |
|||
804 |
✗✓ | 6278 |
CHECK_NOT_NULL(client_); |
805 |
6278 |
bool is_worker = parent_handle_ != nullptr; |
|
806 |
6278 |
parent_handle_.reset(); |
|
807 |
✓✓✓✓ ✓✓ |
6278 |
if (client_->hasConnectedSessions() && !is_worker) { |
808 |
13 |
fprintf(stderr, "Waiting for the debugger to disconnect...\n"); |
|
809 |
13 |
fflush(stderr); |
|
810 |
} |
||
811 |
✓✓ | 6278 |
if (!client_->notifyWaitingForDisconnect()) { |
812 |
6275 |
client_->contextDestroyed(parent_env_->context()); |
|
813 |
✓✓ | 3 |
} else if (is_worker) { |
814 |
2 |
client_->waitForSessionsDisconnect(); |
|
815 |
} |
||
816 |
✓✓ | 6278 |
if (io_ != nullptr) { |
817 |
110 |
io_->StopAcceptingNewConnections(); |
|
818 |
110 |
client_->waitForSessionsDisconnect(); |
|
819 |
} |
||
820 |
} |
||
821 |
|||
822 |
274 |
void Agent::ReportUncaughtException(Local<Value> error, |
|
823 |
Local<Message> message) { |
||
824 |
✓✓ | 274 |
if (!IsListening()) |
825 |
272 |
return; |
|
826 |
2 |
client_->ReportUncaughtException(error, message); |
|
827 |
2 |
WaitForDisconnect(); |
|
828 |
} |
||
829 |
|||
830 |
19 |
void Agent::PauseOnNextJavascriptStatement(const std::string& reason) { |
|
831 |
19 |
client_->schedulePauseOnNextStatement(reason); |
|
832 |
19 |
} |
|
833 |
|||
834 |
6268 |
void Agent::RegisterAsyncHook(Isolate* isolate, |
|
835 |
Local<Function> enable_function, |
||
836 |
Local<Function> disable_function) { |
||
837 |
6268 |
parent_env_->set_inspector_enable_async_hooks(enable_function); |
|
838 |
6268 |
parent_env_->set_inspector_disable_async_hooks(disable_function); |
|
839 |
✓✓ | 6268 |
if (pending_enable_async_hook_) { |
840 |
✗✓ | 2 |
CHECK(!pending_disable_async_hook_); |
841 |
2 |
pending_enable_async_hook_ = false; |
|
842 |
2 |
EnableAsyncHook(); |
|
843 |
✗✓ | 6266 |
} else if (pending_disable_async_hook_) { |
844 |
CHECK(!pending_enable_async_hook_); |
||
845 |
pending_disable_async_hook_ = false; |
||
846 |
DisableAsyncHook(); |
||
847 |
} |
||
848 |
6268 |
} |
|
849 |
|||
850 |
6 |
void Agent::EnableAsyncHook() { |
|
851 |
12 |
HandleScope scope(parent_env_->isolate()); |
|
852 |
6 |
Local<Function> enable = parent_env_->inspector_enable_async_hooks(); |
|
853 |
✓✓ | 6 |
if (!enable.IsEmpty()) { |
854 |
4 |
ToggleAsyncHook(parent_env_->isolate(), enable); |
|
855 |
✗✓ | 2 |
} else if (pending_disable_async_hook_) { |
856 |
CHECK(!pending_enable_async_hook_); |
||
857 |
pending_disable_async_hook_ = false; |
||
858 |
} else { |
||
859 |
2 |
pending_enable_async_hook_ = true; |
|
860 |
} |
||
861 |
6 |
} |
|
862 |
|||
863 |
1 |
void Agent::DisableAsyncHook() { |
|
864 |
2 |
HandleScope scope(parent_env_->isolate()); |
|
865 |
1 |
Local<Function> disable = parent_env_->inspector_enable_async_hooks(); |
|
866 |
✓✗ | 1 |
if (!disable.IsEmpty()) { |
867 |
1 |
ToggleAsyncHook(parent_env_->isolate(), disable); |
|
868 |
} else if (pending_enable_async_hook_) { |
||
869 |
CHECK(!pending_disable_async_hook_); |
||
870 |
pending_enable_async_hook_ = false; |
||
871 |
} else { |
||
872 |
pending_disable_async_hook_ = true; |
||
873 |
} |
||
874 |
1 |
} |
|
875 |
|||
876 |
5 |
void Agent::ToggleAsyncHook(Isolate* isolate, Local<Function> fn) { |
|
877 |
// Guard against running this during cleanup -- no async events will be |
||
878 |
// emitted anyway at that point anymore, and calling into JS is not possible. |
||
879 |
// This should probably not be something we're attempting in the first place, |
||
880 |
// Refs: https://github.com/nodejs/node/pull/34362#discussion_r456006039 |
||
881 |
✗✓ | 5 |
if (!parent_env_->can_call_into_js()) return; |
882 |
✗✓ | 5 |
CHECK(parent_env_->has_run_bootstrapping_code()); |
883 |
10 |
HandleScope handle_scope(isolate); |
|
884 |
✗✓ | 5 |
CHECK(!fn.IsEmpty()); |
885 |
5 |
auto context = parent_env_->context(); |
|
886 |
10 |
v8::TryCatch try_catch(isolate); |
|
887 |
10 |
USE(fn->Call(context, Undefined(isolate), 0, nullptr)); |
|
888 |
✗✓✗✗ ✗✓ |
5 |
if (try_catch.HasCaught() && !try_catch.HasTerminated()) { |
889 |
PrintCaughtException(isolate, context, try_catch); |
||
890 |
FatalError("\nnode::inspector::Agent::ToggleAsyncHook", |
||
891 |
"Cannot toggle Inspector's AsyncHook, please report this."); |
||
892 |
} |
||
893 |
} |
||
894 |
|||
895 |
3 |
void Agent::AsyncTaskScheduled(const StringView& task_name, void* task, |
|
896 |
bool recurring) { |
||
897 |
3 |
client_->AsyncTaskScheduled(task_name, task, recurring); |
|
898 |
3 |
} |
|
899 |
|||
900 |
4 |
void Agent::AsyncTaskCanceled(void* task) { |
|
901 |
4 |
client_->AsyncTaskCanceled(task); |
|
902 |
4 |
} |
|
903 |
|||
904 |
6 |
void Agent::AsyncTaskStarted(void* task) { |
|
905 |
6 |
client_->AsyncTaskStarted(task); |
|
906 |
6 |
} |
|
907 |
|||
908 |
10 |
void Agent::AsyncTaskFinished(void* task) { |
|
909 |
10 |
client_->AsyncTaskFinished(task); |
|
910 |
10 |
} |
|
911 |
|||
912 |
void Agent::AllAsyncTasksCanceled() { |
||
913 |
client_->AllAsyncTasksCanceled(); |
||
914 |
} |
||
915 |
|||
916 |
2 |
void Agent::RequestIoThreadStart() { |
|
917 |
// We need to attempt to interrupt V8 flow (in case Node is running |
||
918 |
// continuous JS code) and to wake up libuv thread (in case Node is waiting |
||
919 |
// for IO events) |
||
920 |
✗✓ | 2 |
CHECK(start_io_thread_async_initialized); |
921 |
2 |
uv_async_send(&start_io_thread_async); |
|
922 |
2 |
parent_env_->RequestInterrupt([this](Environment*) { |
|
923 |
2 |
StartIoThread(); |
|
924 |
2 |
}); |
|
925 |
|||
926 |
✗✓ | 2 |
CHECK(start_io_thread_async_initialized); |
927 |
2 |
uv_async_send(&start_io_thread_async); |
|
928 |
2 |
} |
|
929 |
|||
930 |
6928 |
void Agent::ContextCreated(Local<Context> context, const ContextInfo& info) { |
|
931 |
✓✓ | 6928 |
if (client_ == nullptr) // This happens for a main context |
932 |
6301 |
return; |
|
933 |
627 |
client_->contextCreated(context, info); |
|
934 |
} |
||
935 |
|||
936 |
77072 |
bool Agent::IsActive() { |
|
937 |
✗✓ | 77072 |
if (client_ == nullptr) |
938 |
return false; |
||
939 |
✓✓✓✓ |
77072 |
return io_ != nullptr || client_->IsActive(); |
940 |
} |
||
941 |
|||
942 |
721 |
void Agent::SetParentHandle( |
|
943 |
std::unique_ptr<ParentInspectorHandle> parent_handle) { |
||
944 |
721 |
parent_handle_ = std::move(parent_handle); |
|
945 |
721 |
} |
|
946 |
|||
947 |
962 |
std::unique_ptr<ParentInspectorHandle> Agent::GetParentHandle( |
|
948 |
uint64_t thread_id, const std::string& url) { |
||
949 |
✗✓✗✗ ✗✓ |
962 |
if (!parent_env_->should_create_inspector() && !client_) { |
950 |
ThrowUninitializedInspectorError(parent_env_); |
||
951 |
return std::unique_ptr<ParentInspectorHandle>{}; |
||
952 |
} |
||
953 |
|||
954 |
✗✓ | 962 |
CHECK_NOT_NULL(client_); |
955 |
✓✓ | 962 |
if (!parent_handle_) { |
956 |
952 |
return client_->getWorkerManager()->NewParentHandle(thread_id, url); |
|
957 |
} else { |
||
958 |
10 |
return parent_handle_->NewParentInspectorHandle(thread_id, url); |
|
959 |
} |
||
960 |
} |
||
961 |
|||
962 |
void Agent::WaitForConnect() { |
||
963 |
if (!parent_env_->should_create_inspector() && !client_) { |
||
964 |
ThrowUninitializedInspectorError(parent_env_); |
||
965 |
return; |
||
966 |
} |
||
967 |
|||
968 |
CHECK_NOT_NULL(client_); |
||
969 |
client_->waitForFrontend(); |
||
970 |
} |
||
971 |
|||
972 |
1660 |
std::shared_ptr<WorkerManager> Agent::GetWorkerManager() { |
|
973 |
✗✓✗✗ ✗✓ |
1660 |
if (!parent_env_->should_create_inspector() && !client_) { |
974 |
ThrowUninitializedInspectorError(parent_env_); |
||
975 |
return std::unique_ptr<WorkerManager>{}; |
||
976 |
} |
||
977 |
|||
978 |
✗✓ | 1660 |
CHECK_NOT_NULL(client_); |
979 |
1660 |
return client_->getWorkerManager(); |
|
980 |
} |
||
981 |
|||
982 |
6 |
std::string Agent::GetWsUrl() const { |
|
983 |
✓✓ | 6 |
if (io_ == nullptr) |
984 |
1 |
return ""; |
|
985 |
5 |
return io_->GetWsUrl(); |
|
986 |
} |
||
987 |
|||
988 |
26396 |
SameThreadInspectorSession::~SameThreadInspectorSession() { |
|
989 |
26396 |
auto client = client_.lock(); |
|
990 |
✓✓ | 13198 |
if (client) |
991 |
1862 |
client->disconnectFrontend(session_id_); |
|
992 |
26396 |
} |
|
993 |
|||
994 |
20320 |
void SameThreadInspectorSession::Dispatch( |
|
995 |
const v8_inspector::StringView& message) { |
||
996 |
40640 |
auto client = client_.lock(); |
|
997 |
✓✗ | 20320 |
if (client) |
998 |
20320 |
client->dispatchMessageFromFrontend(session_id_, message); |
|
999 |
20320 |
} |
|
1000 |
|||
1001 |
} // namespace inspector |
||
1002 |
} // namespace node |
Generated by: GCOVR (Version 4.2) |