GCC Code Coverage Report | |||||||||||||||||||||
|
|||||||||||||||||||||
Line | Branch | Exec | Source |
1 |
#include "inspector_agent.h" |
||
2 |
|||
3 |
#include "env-inl.h" |
||
4 |
#include "inspector/main_thread_interface.h" |
||
5 |
#include "inspector/node_string.h" |
||
6 |
#include "inspector/runtime_agent.h" |
||
7 |
#include "inspector/tracing_agent.h" |
||
8 |
#include "inspector/worker_agent.h" |
||
9 |
#include "inspector/worker_inspector.h" |
||
10 |
#include "inspector_io.h" |
||
11 |
#include "node/inspector/protocol/Protocol.h" |
||
12 |
#include "node_errors.h" |
||
13 |
#include "node_internals.h" |
||
14 |
#include "node_options-inl.h" |
||
15 |
#include "node_process-inl.h" |
||
16 |
#include "node_url.h" |
||
17 |
#include "util-inl.h" |
||
18 |
#include "timer_wrap-inl.h" |
||
19 |
#include "v8-inspector.h" |
||
20 |
#include "v8-platform.h" |
||
21 |
|||
22 |
#include "libplatform/libplatform.h" |
||
23 |
|||
24 |
#ifdef __POSIX__ |
||
25 |
#include <pthread.h> |
||
26 |
#include <climits> // PTHREAD_STACK_MIN |
||
27 |
#endif // __POSIX__ |
||
28 |
|||
29 |
#include <algorithm> |
||
30 |
#include <cstring> |
||
31 |
#include <sstream> |
||
32 |
#include <unordered_map> |
||
33 |
#include <vector> |
||
34 |
|||
35 |
namespace node { |
||
36 |
namespace inspector { |
||
37 |
namespace { |
||
38 |
|||
39 |
using node::FatalError; |
||
40 |
|||
41 |
using v8::Context; |
||
42 |
using v8::Function; |
||
43 |
using v8::HandleScope; |
||
44 |
using v8::Isolate; |
||
45 |
using v8::Local; |
||
46 |
using v8::Message; |
||
47 |
using v8::Object; |
||
48 |
using v8::Value; |
||
49 |
|||
50 |
using v8_inspector::StringBuffer; |
||
51 |
using v8_inspector::StringView; |
||
52 |
using v8_inspector::V8Inspector; |
||
53 |
using v8_inspector::V8InspectorClient; |
||
54 |
|||
55 |
#ifdef __POSIX__ |
||
56 |
static uv_sem_t start_io_thread_semaphore; |
||
57 |
#endif // __POSIX__ |
||
58 |
static uv_async_t start_io_thread_async; |
||
59 |
// This is just an additional check to make sure start_io_thread_async |
||
60 |
// is not accidentally re-used or used when uninitialized. |
||
61 |
static std::atomic_bool start_io_thread_async_initialized { false }; |
||
62 |
// Protects the Agent* stored in start_io_thread_async.data. |
||
63 |
static Mutex start_io_thread_async_mutex; |
||
64 |
|||
65 |
4 |
std::unique_ptr<StringBuffer> ToProtocolString(Isolate* isolate, |
|
66 |
Local<Value> value) { |
||
67 |
4 |
TwoByteValue buffer(isolate, value); |
|
68 |
4 |
return StringBuffer::create(StringView(*buffer, buffer.length())); |
|
69 |
} |
||
70 |
|||
71 |
// Called on the main thread. |
||
72 |
3 |
void StartIoThreadAsyncCallback(uv_async_t* handle) { |
|
73 |
3 |
static_cast<Agent*>(handle->data)->StartIoThread(); |
|
74 |
3 |
} |
|
75 |
|||
76 |
|||
77 |
#ifdef __POSIX__ |
||
78 |
2 |
static void StartIoThreadWakeup(int signo, siginfo_t* info, void* ucontext) { |
|
79 |
2 |
uv_sem_post(&start_io_thread_semaphore); |
|
80 |
2 |
} |
|
81 |
|||
82 |
5685 |
inline void* StartIoThreadMain(void* unused) { |
|
83 |
for (;;) { |
||
84 |
5685 |
uv_sem_wait(&start_io_thread_semaphore); |
|
85 |
4 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
86 |
|||
87 |
✗✓ | 2 |
CHECK(start_io_thread_async_initialized); |
88 |
2 |
Agent* agent = static_cast<Agent*>(start_io_thread_async.data); |
|
89 |
✓✗ | 2 |
if (agent != nullptr) |
90 |
2 |
agent->RequestIoThreadStart(); |
|
91 |
2 |
} |
|
92 |
} |
||
93 |
|||
94 |
5683 |
static int StartDebugSignalHandler() { |
|
95 |
// Start a watchdog thread for calling v8::Debug::DebugBreak() because |
||
96 |
// it's not safe to call directly from the signal handler, it can |
||
97 |
// deadlock with the thread it interrupts. |
||
98 |
✗✓ | 5683 |
CHECK_EQ(0, uv_sem_init(&start_io_thread_semaphore, 0)); |
99 |
pthread_attr_t attr; |
||
100 |
✗✓ | 5683 |
CHECK_EQ(0, pthread_attr_init(&attr)); |
101 |
#if defined(PTHREAD_STACK_MIN) && !defined(__FreeBSD__) |
||
102 |
// PTHREAD_STACK_MIN is 2 KiB with musl libc, which is too small to safely |
||
103 |
// receive signals. PTHREAD_STACK_MIN + MINSIGSTKSZ is 8 KiB on arm64, which |
||
104 |
// is the musl architecture with the biggest MINSIGSTKSZ so let's use that |
||
105 |
// as a lower bound and let's quadruple it just in case. The goal is to avoid |
||
106 |
// creating a big 2 or 4 MiB address space gap (problematic on 32 bits |
||
107 |
// because of fragmentation), not squeeze out every last byte. |
||
108 |
// Omitted on FreeBSD because it doesn't seem to like small stacks. |
||
109 |
5683 |
const size_t stack_size = std::max(static_cast<size_t>(4 * 8192), |
|
110 |
static_cast<size_t>(PTHREAD_STACK_MIN)); |
||
111 |
✗✓ | 5683 |
CHECK_EQ(0, pthread_attr_setstacksize(&attr, stack_size)); |
112 |
#endif // defined(PTHREAD_STACK_MIN) && !defined(__FreeBSD__) |
||
113 |
✗✓ | 5683 |
CHECK_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); |
114 |
sigset_t sigmask; |
||
115 |
// Mask all signals. |
||
116 |
5683 |
sigfillset(&sigmask); |
|
117 |
sigset_t savemask; |
||
118 |
✗✓ | 5683 |
CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sigmask, &savemask)); |
119 |
5683 |
sigmask = savemask; |
|
120 |
pthread_t thread; |
||
121 |
5683 |
const int err = pthread_create(&thread, &attr, |
|
122 |
StartIoThreadMain, nullptr); |
||
123 |
// Restore original mask |
||
124 |
✗✓ | 5683 |
CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sigmask, nullptr)); |
125 |
✗✓ | 5683 |
CHECK_EQ(0, pthread_attr_destroy(&attr)); |
126 |
✗✓ | 5683 |
if (err != 0) { |
127 |
fprintf(stderr, "node[%u]: pthread_create: %s\n", |
||
128 |
uv_os_getpid(), strerror(err)); |
||
129 |
fflush(stderr); |
||
130 |
// Leave SIGUSR1 blocked. We don't install a signal handler, |
||
131 |
// receiving the signal would terminate the process. |
||
132 |
return -err; |
||
133 |
} |
||
134 |
5683 |
RegisterSignalHandler(SIGUSR1, StartIoThreadWakeup); |
|
135 |
// Unblock SIGUSR1. A pending SIGUSR1 signal will now be delivered. |
||
136 |
5683 |
sigemptyset(&sigmask); |
|
137 |
5683 |
sigaddset(&sigmask, SIGUSR1); |
|
138 |
✗✓ | 5683 |
CHECK_EQ(0, pthread_sigmask(SIG_UNBLOCK, &sigmask, nullptr)); |
139 |
5683 |
return 0; |
|
140 |
} |
||
141 |
#endif // __POSIX__ |
||
142 |
|||
143 |
|||
144 |
#ifdef _WIN32 |
||
145 |
DWORD WINAPI StartIoThreadProc(void* arg) { |
||
146 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
||
147 |
CHECK(start_io_thread_async_initialized); |
||
148 |
Agent* agent = static_cast<Agent*>(start_io_thread_async.data); |
||
149 |
if (agent != nullptr) |
||
150 |
agent->RequestIoThreadStart(); |
||
151 |
return 0; |
||
152 |
} |
||
153 |
|||
154 |
static int GetDebugSignalHandlerMappingName(DWORD pid, wchar_t* buf, |
||
155 |
size_t buf_len) { |
||
156 |
return _snwprintf(buf, buf_len, L"node-debug-handler-%u", pid); |
||
157 |
} |
||
158 |
|||
159 |
static int StartDebugSignalHandler() { |
||
160 |
wchar_t mapping_name[32]; |
||
161 |
HANDLE mapping_handle; |
||
162 |
DWORD pid; |
||
163 |
LPTHREAD_START_ROUTINE* handler; |
||
164 |
|||
165 |
pid = uv_os_getpid(); |
||
166 |
|||
167 |
if (GetDebugSignalHandlerMappingName(pid, |
||
168 |
mapping_name, |
||
169 |
arraysize(mapping_name)) < 0) { |
||
170 |
return -1; |
||
171 |
} |
||
172 |
|||
173 |
mapping_handle = CreateFileMappingW(INVALID_HANDLE_VALUE, |
||
174 |
nullptr, |
||
175 |
PAGE_READWRITE, |
||
176 |
0, |
||
177 |
sizeof *handler, |
||
178 |
mapping_name); |
||
179 |
if (mapping_handle == nullptr) { |
||
180 |
return -1; |
||
181 |
} |
||
182 |
|||
183 |
handler = reinterpret_cast<LPTHREAD_START_ROUTINE*>( |
||
184 |
MapViewOfFile(mapping_handle, |
||
185 |
FILE_MAP_ALL_ACCESS, |
||
186 |
0, |
||
187 |
0, |
||
188 |
sizeof *handler)); |
||
189 |
if (handler == nullptr) { |
||
190 |
CloseHandle(mapping_handle); |
||
191 |
return -1; |
||
192 |
} |
||
193 |
|||
194 |
*handler = StartIoThreadProc; |
||
195 |
|||
196 |
UnmapViewOfFile(static_cast<void*>(handler)); |
||
197 |
|||
198 |
return 0; |
||
199 |
} |
||
200 |
#endif // _WIN32 |
||
201 |
|||
202 |
|||
203 |
const int CONTEXT_GROUP_ID = 1; |
||
204 |
|||
205 |
740 |
std::string GetWorkerLabel(node::Environment* env) { |
|
206 |
1480 |
std::ostringstream result; |
|
207 |
740 |
result << "Worker[" << env->thread_id() << "]"; |
|
208 |
740 |
return result.str(); |
|
209 |
} |
||
210 |
|||
211 |
class ChannelImpl final : public v8_inspector::V8Inspector::Channel, |
||
212 |
public protocol::FrontendChannel { |
||
213 |
public: |
||
214 |
7394 |
explicit ChannelImpl(Environment* env, |
|
215 |
const std::unique_ptr<V8Inspector>& inspector, |
||
216 |
std::shared_ptr<WorkerManager> worker_manager, |
||
217 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
218 |
std::shared_ptr<MainThreadHandle> main_thread_, |
||
219 |
bool prevent_shutdown) |
||
220 |
14788 |
: delegate_(std::move(delegate)), prevent_shutdown_(prevent_shutdown), |
|
221 |
7394 |
retaining_context_(false) { |
|
222 |
22182 |
session_ = inspector->connect(CONTEXT_GROUP_ID, |
|
223 |
this, |
||
224 |
StringView(), |
||
225 |
14788 |
V8Inspector::ClientTrustLevel::kFullyTrusted); |
|
226 |
7394 |
node_dispatcher_ = std::make_unique<protocol::UberDispatcher>(this); |
|
227 |
tracing_agent_ = |
||
228 |
7394 |
std::make_unique<protocol::TracingAgent>(env, main_thread_); |
|
229 |
7394 |
tracing_agent_->Wire(node_dispatcher_.get()); |
|
230 |
✓✓ | 7394 |
if (worker_manager) { |
231 |
6640 |
worker_agent_ = std::make_unique<protocol::WorkerAgent>(worker_manager); |
|
232 |
6640 |
worker_agent_->Wire(node_dispatcher_.get()); |
|
233 |
} |
||
234 |
7394 |
runtime_agent_ = std::make_unique<protocol::RuntimeAgent>(); |
|
235 |
7394 |
runtime_agent_->Wire(node_dispatcher_.get()); |
|
236 |
7394 |
} |
|
237 |
|||
238 |
26784 |
~ChannelImpl() override { |
|
239 |
13392 |
tracing_agent_->disable(); |
|
240 |
13392 |
tracing_agent_.reset(); // Dispose before the dispatchers |
|
241 |
✓✓ | 13392 |
if (worker_agent_) { |
242 |
11884 |
worker_agent_->disable(); |
|
243 |
11884 |
worker_agent_.reset(); // Dispose before the dispatchers |
|
244 |
} |
||
245 |
13392 |
runtime_agent_->disable(); |
|
246 |
13392 |
runtime_agent_.reset(); // Dispose before the dispatchers |
|
247 |
26784 |
} |
|
248 |
|||
249 |
20707 |
void dispatchProtocolMessage(const StringView& message) { |
|
250 |
41414 |
std::string raw_message = protocol::StringUtil::StringViewToUtf8(message); |
|
251 |
std::unique_ptr<protocol::DictionaryValue> value = |
||
252 |
20707 |
protocol::DictionaryValue::cast(protocol::StringUtil::parseMessage( |
|
253 |
41414 |
raw_message, false)); |
|
254 |
int call_id; |
||
255 |
41414 |
std::string method; |
|
256 |
20707 |
node_dispatcher_->parseCommand(value.get(), &call_id, &method); |
|
257 |
20707 |
if (v8_inspector::V8InspectorSession::canDispatchMethod( |
|
258 |
✓✓ | 41414 |
Utf8ToStringView(method)->string())) { |
259 |
20668 |
session_->dispatchProtocolMessage(message); |
|
260 |
} else { |
||
261 |
39 |
node_dispatcher_->dispatch(call_id, method, std::move(value), |
|
262 |
raw_message); |
||
263 |
} |
||
264 |
20707 |
} |
|
265 |
|||
266 |
38 |
void schedulePauseOnNextStatement(const std::string& reason) { |
|
267 |
76 |
std::unique_ptr<StringBuffer> buffer = Utf8ToStringView(reason); |
|
268 |
38 |
session_->schedulePauseOnNextStatement(buffer->string(), buffer->string()); |
|
269 |
38 |
} |
|
270 |
|||
271 |
6722 |
bool preventShutdown() { |
|
272 |
6722 |
return prevent_shutdown_; |
|
273 |
} |
||
274 |
|||
275 |
6466 |
bool notifyWaitingForDisconnect() { |
|
276 |
6466 |
retaining_context_ = runtime_agent_->notifyWaitingForDisconnect(); |
|
277 |
6466 |
return retaining_context_; |
|
278 |
} |
||
279 |
|||
280 |
936 |
bool retainingContext() { |
|
281 |
936 |
return retaining_context_; |
|
282 |
} |
||
283 |
|||
284 |
private: |
||
285 |
20668 |
void sendResponse( |
|
286 |
int callId, |
||
287 |
std::unique_ptr<v8_inspector::StringBuffer> message) override { |
||
288 |
20668 |
sendMessageToFrontend(message->string()); |
|
289 |
20668 |
} |
|
290 |
|||
291 |
8072 |
void sendNotification( |
|
292 |
std::unique_ptr<v8_inspector::StringBuffer> message) override { |
||
293 |
8072 |
sendMessageToFrontend(message->string()); |
|
294 |
8072 |
} |
|
295 |
|||
296 |
1349 |
void flushProtocolNotifications() override { } |
|
297 |
|||
298 |
29428 |
void sendMessageToFrontend(const StringView& message) { |
|
299 |
29428 |
delegate_->SendMessageToFrontend(message); |
|
300 |
29428 |
} |
|
301 |
|||
302 |
688 |
void sendMessageToFrontend(const std::string& message) { |
|
303 |
688 |
sendMessageToFrontend(Utf8ToStringView(message)->string()); |
|
304 |
688 |
} |
|
305 |
|||
306 |
using Serializable = protocol::Serializable; |
||
307 |
|||
308 |
39 |
void sendProtocolResponse(int callId, |
|
309 |
std::unique_ptr<Serializable> message) override { |
||
310 |
39 |
sendMessageToFrontend(message->serializeToJSON()); |
|
311 |
39 |
} |
|
312 |
649 |
void sendProtocolNotification( |
|
313 |
std::unique_ptr<Serializable> message) override { |
||
314 |
649 |
sendMessageToFrontend(message->serializeToJSON()); |
|
315 |
649 |
} |
|
316 |
|||
317 |
void fallThrough(int callId, |
||
318 |
const std::string& method, |
||
319 |
const std::string& message) override { |
||
320 |
DCHECK(false); |
||
321 |
} |
||
322 |
|||
323 |
std::unique_ptr<protocol::RuntimeAgent> runtime_agent_; |
||
324 |
std::unique_ptr<protocol::TracingAgent> tracing_agent_; |
||
325 |
std::unique_ptr<protocol::WorkerAgent> worker_agent_; |
||
326 |
std::unique_ptr<InspectorSessionDelegate> delegate_; |
||
327 |
std::unique_ptr<v8_inspector::V8InspectorSession> session_; |
||
328 |
std::unique_ptr<protocol::UberDispatcher> node_dispatcher_; |
||
329 |
bool prevent_shutdown_; |
||
330 |
bool retaining_context_; |
||
331 |
}; |
||
332 |
|||
333 |
class SameThreadInspectorSession : public InspectorSession { |
||
334 |
public: |
||
335 |
7394 |
SameThreadInspectorSession( |
|
336 |
int session_id, std::shared_ptr<NodeInspectorClient> client) |
||
337 |
7394 |
: session_id_(session_id), client_(client) {} |
|
338 |
~SameThreadInspectorSession() override; |
||
339 |
void Dispatch(const v8_inspector::StringView& message) override; |
||
340 |
|||
341 |
private: |
||
342 |
int session_id_; |
||
343 |
std::weak_ptr<NodeInspectorClient> client_; |
||
344 |
}; |
||
345 |
|||
346 |
112 |
void NotifyClusterWorkersDebugEnabled(Environment* env) { |
|
347 |
112 |
Isolate* isolate = env->isolate(); |
|
348 |
112 |
HandleScope handle_scope(isolate); |
|
349 |
112 |
Local<Context> context = env->context(); |
|
350 |
|||
351 |
// Send message to enable debug in cluster workers |
||
352 |
112 |
Local<Object> message = Object::New(isolate); |
|
353 |
112 |
message->Set(context, FIXED_ONE_BYTE_STRING(isolate, "cmd"), |
|
354 |
448 |
FIXED_ONE_BYTE_STRING(isolate, "NODE_DEBUG_ENABLED")).Check(); |
|
355 |
112 |
ProcessEmit(env, "internalMessage", message); |
|
356 |
112 |
} |
|
357 |
|||
358 |
#ifdef _WIN32 |
||
359 |
bool IsFilePath(const std::string& path) { |
||
360 |
// '\\' |
||
361 |
if (path.length() > 2 && path[0] == '\\' && path[1] == '\\') |
||
362 |
return true; |
||
363 |
// '[A-Z]:[/\\]' |
||
364 |
if (path.length() < 3) |
||
365 |
return false; |
||
366 |
if ((path[0] >= 'A' && path[0] <= 'Z') || (path[0] >= 'a' && path[0] <= 'z')) |
||
367 |
return path[1] == ':' && (path[2] == '/' || path[2] == '\\'); |
||
368 |
return false; |
||
369 |
} |
||
370 |
#else |
||
371 |
752466 |
bool IsFilePath(const std::string& path) { |
|
372 |
✓✓✓✓ |
752466 |
return !path.empty() && path[0] == '/'; |
373 |
} |
||
374 |
#endif // __POSIX__ |
||
375 |
|||
376 |
void ThrowUninitializedInspectorError(Environment* env) { |
||
377 |
HandleScope scope(env->isolate()); |
||
378 |
|||
379 |
const char* msg = "This Environment was initialized without a V8::Inspector"; |
||
380 |
Local<Value> exception = |
||
381 |
v8::String::NewFromUtf8(env->isolate(), msg).ToLocalChecked(); |
||
382 |
|||
383 |
env->isolate()->ThrowException(exception); |
||
384 |
} |
||
385 |
|||
386 |
} // namespace |
||
387 |
|||
388 |
class NodeInspectorClient : public V8InspectorClient { |
||
389 |
public: |
||
390 |
6425 |
explicit NodeInspectorClient(node::Environment* env, bool is_main) |
|
391 |
6425 |
: env_(env), is_main_(is_main) { |
|
392 |
6425 |
client_ = V8Inspector::create(env->isolate(), this); |
|
393 |
// TODO(bnoordhuis) Make name configurable from src/node.cc. |
||
394 |
std::string name = |
||
395 |
✓✓ | 12850 |
is_main_ ? GetHumanReadableProcessName() : GetWorkerLabel(env); |
396 |
12850 |
ContextInfo info(name); |
|
397 |
6425 |
info.is_default = true; |
|
398 |
6425 |
contextCreated(env->context(), info); |
|
399 |
6425 |
} |
|
400 |
|||
401 |
40 |
void runMessageLoopOnPause(int context_group_id) override { |
|
402 |
40 |
waiting_for_resume_ = true; |
|
403 |
40 |
runMessageLoop(); |
|
404 |
40 |
} |
|
405 |
|||
406 |
112 |
void waitForSessionsDisconnect() { |
|
407 |
112 |
waiting_for_sessions_disconnect_ = true; |
|
408 |
112 |
runMessageLoop(); |
|
409 |
112 |
} |
|
410 |
|||
411 |
20 |
void waitForFrontend() { |
|
412 |
20 |
waiting_for_frontend_ = true; |
|
413 |
20 |
runMessageLoop(); |
|
414 |
20 |
} |
|
415 |
|||
416 |
9 |
void maxAsyncCallStackDepthChanged(int depth) override { |
|
417 |
✓✓ | 9 |
if (waiting_for_sessions_disconnect_) { |
418 |
// V8 isolate is mostly done and is only letting Inspector protocol |
||
419 |
// clients gather data. |
||
420 |
4 |
return; |
|
421 |
} |
||
422 |
✓✗ | 5 |
if (auto agent = env_->inspector_agent()) { |
423 |
✓✓ | 5 |
if (depth == 0) { |
424 |
1 |
agent->DisableAsyncHook(); |
|
425 |
} else { |
||
426 |
4 |
agent->EnableAsyncHook(); |
|
427 |
} |
||
428 |
} |
||
429 |
} |
||
430 |
|||
431 |
7045 |
void contextCreated(Local<Context> context, const ContextInfo& info) { |
|
432 |
14090 |
auto name_buffer = Utf8ToStringView(info.name); |
|
433 |
14090 |
auto origin_buffer = Utf8ToStringView(info.origin); |
|
434 |
7045 |
std::unique_ptr<StringBuffer> aux_data_buffer; |
|
435 |
|||
436 |
v8_inspector::V8ContextInfo v8info( |
||
437 |
7045 |
context, CONTEXT_GROUP_ID, name_buffer->string()); |
|
438 |
7045 |
v8info.origin = origin_buffer->string(); |
|
439 |
|||
440 |
✓✓ | 7045 |
if (info.is_default) { |
441 |
6425 |
aux_data_buffer = Utf8ToStringView("{\"isDefault\":true}"); |
|
442 |
} else { |
||
443 |
620 |
aux_data_buffer = Utf8ToStringView("{\"isDefault\":false}"); |
|
444 |
} |
||
445 |
7045 |
v8info.auxData = aux_data_buffer->string(); |
|
446 |
|||
447 |
7045 |
client_->contextCreated(v8info); |
|
448 |
7045 |
} |
|
449 |
|||
450 |
6405 |
void contextDestroyed(Local<Context> context) { |
|
451 |
6405 |
client_->contextDestroyed(context); |
|
452 |
6405 |
} |
|
453 |
|||
454 |
28 |
void quitMessageLoopOnPause() override { |
|
455 |
28 |
waiting_for_resume_ = false; |
|
456 |
28 |
} |
|
457 |
|||
458 |
21 |
void runIfWaitingForDebugger(int context_group_id) override { |
|
459 |
21 |
waiting_for_frontend_ = false; |
|
460 |
21 |
} |
|
461 |
|||
462 |
7394 |
int connectFrontend(std::unique_ptr<InspectorSessionDelegate> delegate, |
|
463 |
bool prevent_shutdown) { |
||
464 |
7394 |
int session_id = next_session_id_++; |
|
465 |
14788 |
channels_[session_id] = std::make_unique<ChannelImpl>(env_, |
|
466 |
7394 |
client_, |
|
467 |
14788 |
getWorkerManager(), |
|
468 |
7394 |
std::move(delegate), |
|
469 |
14788 |
getThreadHandle(), |
|
470 |
7394 |
prevent_shutdown); |
|
471 |
7394 |
return session_id; |
|
472 |
} |
||
473 |
|||
474 |
933 |
void disconnectFrontend(int session_id) { |
|
475 |
933 |
auto it = channels_.find(session_id); |
|
476 |
✗✓ | 933 |
if (it == channels_.end()) |
477 |
return; |
||
478 |
933 |
bool retaining_context = it->second->retainingContext(); |
|
479 |
933 |
channels_.erase(it); |
|
480 |
✓✓ | 933 |
if (retaining_context) { |
481 |
✓✓ | 6 |
for (const auto& id_channel : channels_) { |
482 |
✗✓ | 3 |
if (id_channel.second->retainingContext()) |
483 |
return; |
||
484 |
} |
||
485 |
3 |
contextDestroyed(env_->context()); |
|
486 |
} |
||
487 |
✓✓✓✓ |
933 |
if (waiting_for_sessions_disconnect_ && !is_main_) |
488 |
2 |
waiting_for_sessions_disconnect_ = false; |
|
489 |
} |
||
490 |
|||
491 |
20707 |
void dispatchMessageFromFrontend(int session_id, const StringView& message) { |
|
492 |
20707 |
channels_[session_id]->dispatchProtocolMessage(message); |
|
493 |
20707 |
} |
|
494 |
|||
495 |
310 |
Local<Context> ensureDefaultContextInGroup(int contextGroupId) override { |
|
496 |
310 |
return env_->context(); |
|
497 |
} |
||
498 |
|||
499 |
3 |
void installAdditionalCommandLineAPI(Local<Context> context, |
|
500 |
Local<Object> target) override { |
||
501 |
3 |
Local<Function> installer = env_->inspector_console_extension_installer(); |
|
502 |
✓✗ | 3 |
if (!installer.IsEmpty()) { |
503 |
3 |
Local<Value> argv[] = {target}; |
|
504 |
// If there is an exception, proceed in JS land |
||
505 |
3 |
USE(installer->Call(context, target, arraysize(argv), argv)); |
|
506 |
} |
||
507 |
3 |
} |
|
508 |
|||
509 |
2 |
void ReportUncaughtException(Local<Value> error, Local<Message> message) { |
|
510 |
2 |
Isolate* isolate = env_->isolate(); |
|
511 |
2 |
Local<Context> context = env_->context(); |
|
512 |
|||
513 |
4 |
int script_id = message->GetScriptOrigin().ScriptId(); |
|
514 |
|||
515 |
2 |
Local<v8::StackTrace> stack_trace = message->GetStackTrace(); |
|
516 |
|||
517 |
✓✓✓✗ ✓✗ |
4 |
if (!stack_trace.IsEmpty() && stack_trace->GetFrameCount() > 0 && |
518 |
✓✓ | 4 |
script_id == stack_trace->GetFrame(isolate, 0)->GetScriptId()) { |
519 |
1 |
script_id = 0; |
|
520 |
} |
||
521 |
|||
522 |
2 |
const uint8_t DETAILS[] = "Uncaught"; |
|
523 |
|||
524 |
4 |
client_->exceptionThrown( |
|
525 |
context, |
||
526 |
StringView(DETAILS, sizeof(DETAILS) - 1), |
||
527 |
error, |
||
528 |
6 |
ToProtocolString(isolate, message->Get())->string(), |
|
529 |
4 |
ToProtocolString(isolate, message->GetScriptResourceName())->string(), |
|
530 |
✓✗ | 4 |
message->GetLineNumber(context).FromMaybe(0), |
531 |
✓✗ | 4 |
message->GetStartColumn(context).FromMaybe(0), |
532 |
4 |
client_->createStackTrace(stack_trace), |
|
533 |
2 |
script_id); |
|
534 |
2 |
} |
|
535 |
|||
536 |
2 |
void startRepeatingTimer(double interval_s, |
|
537 |
TimerCallback callback, |
||
538 |
void* data) override { |
||
539 |
auto result = |
||
540 |
2 |
timers_.emplace(std::piecewise_construct, std::make_tuple(data), |
|
541 |
8 |
std::make_tuple(env_, [=]() { callback(data); })); |
|
542 |
✗✓ | 2 |
CHECK(result.second); |
543 |
2 |
uint64_t interval = static_cast<uint64_t>(1000 * interval_s); |
|
544 |
2 |
result.first->second.Update(interval, interval); |
|
545 |
2 |
} |
|
546 |
|||
547 |
2 |
void cancelTimer(void* data) override { |
|
548 |
2 |
timers_.erase(data); |
|
549 |
2 |
} |
|
550 |
|||
551 |
// Async stack traces instrumentation. |
||
552 |
3 |
void AsyncTaskScheduled(const StringView& task_name, void* task, |
|
553 |
bool recurring) { |
||
554 |
3 |
client_->asyncTaskScheduled(task_name, task, recurring); |
|
555 |
3 |
} |
|
556 |
|||
557 |
4 |
void AsyncTaskCanceled(void* task) { |
|
558 |
4 |
client_->asyncTaskCanceled(task); |
|
559 |
4 |
} |
|
560 |
|||
561 |
6 |
void AsyncTaskStarted(void* task) { |
|
562 |
6 |
client_->asyncTaskStarted(task); |
|
563 |
6 |
} |
|
564 |
|||
565 |
10 |
void AsyncTaskFinished(void* task) { |
|
566 |
10 |
client_->asyncTaskFinished(task); |
|
567 |
10 |
} |
|
568 |
|||
569 |
void AllAsyncTasksCanceled() { |
||
570 |
client_->allAsyncTasksCanceled(); |
||
571 |
} |
||
572 |
|||
573 |
19 |
void schedulePauseOnNextStatement(const std::string& reason) { |
|
574 |
✓✓ | 57 |
for (const auto& id_channel : channels_) { |
575 |
38 |
id_channel.second->schedulePauseOnNextStatement(reason); |
|
576 |
} |
||
577 |
19 |
} |
|
578 |
|||
579 |
6594 |
bool hasConnectedSessions() { |
|
580 |
✓✓ | 13224 |
for (const auto& id_channel : channels_) { |
581 |
// Other sessions are "invisible" more most purposes |
||
582 |
✓✓ | 6722 |
if (id_channel.second->preventShutdown()) |
583 |
92 |
return true; |
|
584 |
} |
||
585 |
6502 |
return false; |
|
586 |
} |
||
587 |
|||
588 |
6405 |
bool notifyWaitingForDisconnect() { |
|
589 |
6405 |
bool retaining_context = false; |
|
590 |
✓✓ | 12871 |
for (const auto& id_channel : channels_) { |
591 |
✓✓ | 6466 |
if (id_channel.second->notifyWaitingForDisconnect()) |
592 |
3 |
retaining_context = true; |
|
593 |
} |
||
594 |
6405 |
return retaining_context; |
|
595 |
} |
||
596 |
|||
597 |
13929 |
std::shared_ptr<MainThreadHandle> getThreadHandle() { |
|
598 |
✓✓ | 13929 |
if (!interface_) { |
599 |
6422 |
interface_ = std::make_shared<MainThreadInterface>( |
|
600 |
12844 |
env_->inspector_agent()); |
|
601 |
} |
||
602 |
13929 |
return interface_->GetHandle(); |
|
603 |
} |
||
604 |
|||
605 |
10055 |
std::shared_ptr<WorkerManager> getWorkerManager() { |
|
606 |
✓✓ | 10055 |
if (!is_main_) { |
607 |
754 |
return nullptr; |
|
608 |
} |
||
609 |
✓✓ | 9301 |
if (worker_manager_ == nullptr) { |
610 |
worker_manager_ = |
||
611 |
5678 |
std::make_shared<WorkerManager>(getThreadHandle()); |
|
612 |
} |
||
613 |
9301 |
return worker_manager_; |
|
614 |
} |
||
615 |
|||
616 |
80605 |
bool IsActive() { |
|
617 |
80605 |
return !channels_.empty(); |
|
618 |
} |
||
619 |
|||
620 |
private: |
||
621 |
289 |
bool shouldRunMessageLoop() { |
|
622 |
✓✓ | 289 |
if (waiting_for_frontend_) |
623 |
50 |
return true; |
|
624 |
✓✓✓✓ |
239 |
if (waiting_for_sessions_disconnect_ || waiting_for_resume_) { |
625 |
189 |
return hasConnectedSessions(); |
|
626 |
} |
||
627 |
50 |
return false; |
|
628 |
} |
||
629 |
|||
630 |
172 |
void runMessageLoop() { |
|
631 |
✗✓ | 172 |
if (running_nested_loop_) |
632 |
return; |
||
633 |
|||
634 |
172 |
running_nested_loop_ = true; |
|
635 |
|||
636 |
✓✓ | 289 |
while (shouldRunMessageLoop()) { |
637 |
✓✗ | 117 |
if (interface_) interface_->WaitForFrontendEvent(); |
638 |
117 |
env_->RunAndClearInterrupts(); |
|
639 |
} |
||
640 |
172 |
running_nested_loop_ = false; |
|
641 |
} |
||
642 |
|||
643 |
75358 |
double currentTimeMS() override { |
|
644 |
75358 |
return env_->isolate_data()->platform()->CurrentClockTimeMillis(); |
|
645 |
} |
||
646 |
|||
647 |
752466 |
std::unique_ptr<StringBuffer> resourceNameToUrl( |
|
648 |
const StringView& resource_name_view) override { |
||
649 |
std::string resource_name = |
||
650 |
1504932 |
protocol::StringUtil::StringViewToUtf8(resource_name_view); |
|
651 |
✓✓ | 752466 |
if (!IsFilePath(resource_name)) |
652 |
714616 |
return nullptr; |
|
653 |
37850 |
node::url::URL url = node::url::URL::FromFilePath(resource_name); |
|
654 |
37850 |
return Utf8ToStringView(url.href()); |
|
655 |
} |
||
656 |
|||
657 |
node::Environment* env_; |
||
658 |
bool is_main_; |
||
659 |
bool running_nested_loop_ = false; |
||
660 |
std::unique_ptr<V8Inspector> client_; |
||
661 |
// Note: ~ChannelImpl may access timers_ so timers_ has to come first. |
||
662 |
std::unordered_map<void*, TimerWrapHandle> timers_; |
||
663 |
std::unordered_map<int, std::unique_ptr<ChannelImpl>> channels_; |
||
664 |
int next_session_id_ = 1; |
||
665 |
bool waiting_for_resume_ = false; |
||
666 |
bool waiting_for_frontend_ = false; |
||
667 |
bool waiting_for_sessions_disconnect_ = false; |
||
668 |
// Allows accessing Inspector from non-main threads |
||
669 |
std::shared_ptr<MainThreadInterface> interface_; |
||
670 |
std::shared_ptr<WorkerManager> worker_manager_; |
||
671 |
}; |
||
672 |
|||
673 |
6431 |
Agent::Agent(Environment* env) |
|
674 |
: parent_env_(env), |
||
675 |
6431 |
debug_options_(env->options()->debug_options()), |
|
676 |
12862 |
host_port_(env->inspector_host_port()) {} |
|
677 |
|||
678 |
5735 |
Agent::~Agent() {} |
|
679 |
|||
680 |
6425 |
bool Agent::Start(const std::string& path, |
|
681 |
const DebugOptions& options, |
||
682 |
std::shared_ptr<ExclusiveAccess<HostPort>> host_port, |
||
683 |
bool is_main) { |
||
684 |
6425 |
path_ = path; |
|
685 |
6425 |
debug_options_ = options; |
|
686 |
✗✓ | 6425 |
CHECK_NOT_NULL(host_port); |
687 |
6425 |
host_port_ = host_port; |
|
688 |
|||
689 |
6425 |
client_ = std::make_shared<NodeInspectorClient>(parent_env_, is_main); |
|
690 |
✓✓ | 6425 |
if (parent_env_->owns_inspector()) { |
691 |
5683 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
692 |
✗✓ | 5683 |
CHECK_EQ(start_io_thread_async_initialized.exchange(true), false); |
693 |
✗✓ | 5683 |
CHECK_EQ(0, uv_async_init(parent_env_->event_loop(), |
694 |
&start_io_thread_async, |
||
695 |
StartIoThreadAsyncCallback)); |
||
696 |
5683 |
uv_unref(reinterpret_cast<uv_handle_t*>(&start_io_thread_async)); |
|
697 |
5683 |
start_io_thread_async.data = this; |
|
698 |
// Ignore failure, SIGUSR1 won't work, but that should not block node start. |
||
699 |
5683 |
StartDebugSignalHandler(); |
|
700 |
|||
701 |
5683 |
parent_env_->AddCleanupHook([](void* data) { |
|
702 |
4987 |
Environment* env = static_cast<Environment*>(data); |
|
703 |
|||
704 |
{ |
||
705 |
4987 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
706 |
4987 |
start_io_thread_async.data = nullptr; |
|
707 |
} |
||
708 |
|||
709 |
// This is global, will never get freed |
||
710 |
4987 |
env->CloseHandle(&start_io_thread_async, [](uv_async_t*) { |
|
711 |
✗✓ | 4987 |
CHECK(start_io_thread_async_initialized.exchange(false)); |
712 |
4987 |
}); |
|
713 |
5683 |
}, parent_env_); |
|
714 |
} |
||
715 |
|||
716 |
6425 |
AtExit(parent_env_, [](void* env) { |
|
717 |
6411 |
Agent* agent = static_cast<Environment*>(env)->inspector_agent(); |
|
718 |
✓✓ | 6411 |
if (agent->IsActive()) { |
719 |
6403 |
agent->WaitForDisconnect(); |
|
720 |
} |
||
721 |
6425 |
}, parent_env_); |
|
722 |
|||
723 |
6425 |
bool wait_for_connect = options.wait_for_connect(); |
|
724 |
✓✓ | 6425 |
if (parent_handle_) { |
725 |
740 |
wait_for_connect = parent_handle_->WaitForConnect(); |
|
726 |
740 |
parent_handle_->WorkerStarted(client_->getThreadHandle(), wait_for_connect); |
|
727 |
✓✓✓✗ ✓✓ |
5798 |
} else if (!options.inspector_enabled || !options.allow_attaching_debugger || |
728 |
✓✓ | 113 |
!StartIoThread()) { |
729 |
5575 |
return false; |
|
730 |
} |
||
731 |
|||
732 |
// Patch the debug options to implement waitForDebuggerOnStart for |
||
733 |
// the NodeWorker.enable method. |
||
734 |
✓✓ | 850 |
if (wait_for_connect) { |
735 |
✗✓ | 20 |
CHECK(!parent_env_->has_serialized_options()); |
736 |
20 |
debug_options_.EnableBreakFirstLine(); |
|
737 |
20 |
parent_env_->options()->get_debug_options()->EnableBreakFirstLine(); |
|
738 |
20 |
client_->waitForFrontend(); |
|
739 |
} |
||
740 |
850 |
return true; |
|
741 |
} |
||
742 |
|||
743 |
118 |
bool Agent::StartIoThread() { |
|
744 |
✓✓ | 118 |
if (io_ != nullptr) |
745 |
3 |
return true; |
|
746 |
|||
747 |
✓✓✗✓ ✗✓ |
115 |
if (!parent_env_->should_create_inspector() && !client_) { |
748 |
ThrowUninitializedInspectorError(parent_env_); |
||
749 |
return false; |
||
750 |
} |
||
751 |
|||
752 |
✗✓ | 115 |
CHECK_NOT_NULL(client_); |
753 |
|||
754 |
230 |
io_ = InspectorIo::Start(client_->getThreadHandle(), |
|
755 |
115 |
path_, |
|
756 |
115 |
host_port_, |
|
757 |
230 |
debug_options_.inspect_publish_uid); |
|
758 |
✓✓ | 115 |
if (io_ == nullptr) { |
759 |
3 |
return false; |
|
760 |
} |
||
761 |
112 |
NotifyClusterWorkersDebugEnabled(parent_env_); |
|
762 |
112 |
return true; |
|
763 |
} |
||
764 |
|||
765 |
4 |
void Agent::Stop() { |
|
766 |
4 |
io_.reset(); |
|
767 |
4 |
} |
|
768 |
|||
769 |
7394 |
std::unique_ptr<InspectorSession> Agent::Connect( |
|
770 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
771 |
bool prevent_shutdown) { |
||
772 |
✓✓✗✓ ✗✓ |
7394 |
if (!parent_env_->should_create_inspector() && !client_) { |
773 |
ThrowUninitializedInspectorError(parent_env_); |
||
774 |
return std::unique_ptr<InspectorSession>{}; |
||
775 |
} |
||
776 |
|||
777 |
✗✓ | 7394 |
CHECK_NOT_NULL(client_); |
778 |
|||
779 |
7394 |
int session_id = client_->connectFrontend(std::move(delegate), |
|
780 |
prevent_shutdown); |
||
781 |
return std::unique_ptr<InspectorSession>( |
||
782 |
7394 |
new SameThreadInspectorSession(session_id, client_)); |
|
783 |
} |
||
784 |
|||
785 |
2 |
std::unique_ptr<InspectorSession> Agent::ConnectToMainThread( |
|
786 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
787 |
bool prevent_shutdown) { |
||
788 |
✗✓✗✗ ✗✓ |
2 |
if (!parent_env_->should_create_inspector() && !client_) { |
789 |
ThrowUninitializedInspectorError(parent_env_); |
||
790 |
return std::unique_ptr<InspectorSession>{}; |
||
791 |
} |
||
792 |
|||
793 |
✗✓ | 2 |
CHECK_NOT_NULL(parent_handle_); |
794 |
✗✓ | 2 |
CHECK_NOT_NULL(client_); |
795 |
auto thread_safe_delegate = |
||
796 |
4 |
client_->getThreadHandle()->MakeDelegateThreadSafe(std::move(delegate)); |
|
797 |
2 |
return parent_handle_->Connect(std::move(thread_safe_delegate), |
|
798 |
2 |
prevent_shutdown); |
|
799 |
} |
||
800 |
|||
801 |
6405 |
void Agent::WaitForDisconnect() { |
|
802 |
✓✓✗✓ ✗✓ |
6405 |
if (!parent_env_->should_create_inspector() && !client_) { |
803 |
ThrowUninitializedInspectorError(parent_env_); |
||
804 |
return; |
||
805 |
} |
||
806 |
|||
807 |
✗✓ | 6405 |
CHECK_NOT_NULL(client_); |
808 |
6405 |
bool is_worker = parent_handle_ != nullptr; |
|
809 |
6405 |
parent_handle_.reset(); |
|
810 |
✓✓✓✓ ✓✓ |
6405 |
if (client_->hasConnectedSessions() && !is_worker) { |
811 |
13 |
fprintf(stderr, "Waiting for the debugger to disconnect...\n"); |
|
812 |
13 |
fflush(stderr); |
|
813 |
} |
||
814 |
✓✓ | 6405 |
if (!client_->notifyWaitingForDisconnect()) { |
815 |
6402 |
client_->contextDestroyed(parent_env_->context()); |
|
816 |
✓✓ | 3 |
} else if (is_worker) { |
817 |
2 |
client_->waitForSessionsDisconnect(); |
|
818 |
} |
||
819 |
✓✓ | 6405 |
if (io_ != nullptr) { |
820 |
110 |
io_->StopAcceptingNewConnections(); |
|
821 |
110 |
client_->waitForSessionsDisconnect(); |
|
822 |
} |
||
823 |
} |
||
824 |
|||
825 |
299 |
void Agent::ReportUncaughtException(Local<Value> error, |
|
826 |
Local<Message> message) { |
||
827 |
✓✓ | 299 |
if (!IsListening()) |
828 |
297 |
return; |
|
829 |
2 |
client_->ReportUncaughtException(error, message); |
|
830 |
2 |
WaitForDisconnect(); |
|
831 |
} |
||
832 |
|||
833 |
19 |
void Agent::PauseOnNextJavascriptStatement(const std::string& reason) { |
|
834 |
19 |
client_->schedulePauseOnNextStatement(reason); |
|
835 |
19 |
} |
|
836 |
|||
837 |
6396 |
void Agent::RegisterAsyncHook(Isolate* isolate, |
|
838 |
Local<Function> enable_function, |
||
839 |
Local<Function> disable_function) { |
||
840 |
6396 |
parent_env_->set_inspector_enable_async_hooks(enable_function); |
|
841 |
6396 |
parent_env_->set_inspector_disable_async_hooks(disable_function); |
|
842 |
✓✓ | 6396 |
if (pending_enable_async_hook_) { |
843 |
✗✓ | 2 |
CHECK(!pending_disable_async_hook_); |
844 |
2 |
pending_enable_async_hook_ = false; |
|
845 |
2 |
EnableAsyncHook(); |
|
846 |
✗✓ | 6394 |
} else if (pending_disable_async_hook_) { |
847 |
CHECK(!pending_enable_async_hook_); |
||
848 |
pending_disable_async_hook_ = false; |
||
849 |
DisableAsyncHook(); |
||
850 |
} |
||
851 |
6396 |
} |
|
852 |
|||
853 |
6 |
void Agent::EnableAsyncHook() { |
|
854 |
12 |
HandleScope scope(parent_env_->isolate()); |
|
855 |
6 |
Local<Function> enable = parent_env_->inspector_enable_async_hooks(); |
|
856 |
✓✓ | 6 |
if (!enable.IsEmpty()) { |
857 |
4 |
ToggleAsyncHook(parent_env_->isolate(), enable); |
|
858 |
✗✓ | 2 |
} else if (pending_disable_async_hook_) { |
859 |
CHECK(!pending_enable_async_hook_); |
||
860 |
pending_disable_async_hook_ = false; |
||
861 |
} else { |
||
862 |
2 |
pending_enable_async_hook_ = true; |
|
863 |
} |
||
864 |
6 |
} |
|
865 |
|||
866 |
1 |
void Agent::DisableAsyncHook() { |
|
867 |
2 |
HandleScope scope(parent_env_->isolate()); |
|
868 |
1 |
Local<Function> disable = parent_env_->inspector_enable_async_hooks(); |
|
869 |
✓✗ | 1 |
if (!disable.IsEmpty()) { |
870 |
1 |
ToggleAsyncHook(parent_env_->isolate(), disable); |
|
871 |
} else if (pending_enable_async_hook_) { |
||
872 |
CHECK(!pending_disable_async_hook_); |
||
873 |
pending_enable_async_hook_ = false; |
||
874 |
} else { |
||
875 |
pending_disable_async_hook_ = true; |
||
876 |
} |
||
877 |
1 |
} |
|
878 |
|||
879 |
5 |
void Agent::ToggleAsyncHook(Isolate* isolate, Local<Function> fn) { |
|
880 |
// Guard against running this during cleanup -- no async events will be |
||
881 |
// emitted anyway at that point anymore, and calling into JS is not possible. |
||
882 |
// This should probably not be something we're attempting in the first place, |
||
883 |
// Refs: https://github.com/nodejs/node/pull/34362#discussion_r456006039 |
||
884 |
✗✓ | 5 |
if (!parent_env_->can_call_into_js()) return; |
885 |
✗✓ | 5 |
CHECK(parent_env_->has_run_bootstrapping_code()); |
886 |
10 |
HandleScope handle_scope(isolate); |
|
887 |
✗✓ | 5 |
CHECK(!fn.IsEmpty()); |
888 |
5 |
auto context = parent_env_->context(); |
|
889 |
10 |
v8::TryCatch try_catch(isolate); |
|
890 |
10 |
USE(fn->Call(context, Undefined(isolate), 0, nullptr)); |
|
891 |
✗✓✗✗ ✗✓ |
5 |
if (try_catch.HasCaught() && !try_catch.HasTerminated()) { |
892 |
PrintCaughtException(isolate, context, try_catch); |
||
893 |
FatalError("\nnode::inspector::Agent::ToggleAsyncHook", |
||
894 |
"Cannot toggle Inspector's AsyncHook, please report this."); |
||
895 |
} |
||
896 |
} |
||
897 |
|||
898 |
3 |
void Agent::AsyncTaskScheduled(const StringView& task_name, void* task, |
|
899 |
bool recurring) { |
||
900 |
3 |
client_->AsyncTaskScheduled(task_name, task, recurring); |
|
901 |
3 |
} |
|
902 |
|||
903 |
4 |
void Agent::AsyncTaskCanceled(void* task) { |
|
904 |
4 |
client_->AsyncTaskCanceled(task); |
|
905 |
4 |
} |
|
906 |
|||
907 |
6 |
void Agent::AsyncTaskStarted(void* task) { |
|
908 |
6 |
client_->AsyncTaskStarted(task); |
|
909 |
6 |
} |
|
910 |
|||
911 |
10 |
void Agent::AsyncTaskFinished(void* task) { |
|
912 |
10 |
client_->AsyncTaskFinished(task); |
|
913 |
10 |
} |
|
914 |
|||
915 |
void Agent::AllAsyncTasksCanceled() { |
||
916 |
client_->AllAsyncTasksCanceled(); |
||
917 |
} |
||
918 |
|||
919 |
2 |
void Agent::RequestIoThreadStart() { |
|
920 |
// We need to attempt to interrupt V8 flow (in case Node is running |
||
921 |
// continuous JS code) and to wake up libuv thread (in case Node is waiting |
||
922 |
// for IO events) |
||
923 |
✗✓ | 2 |
if (!options().allow_attaching_debugger) { |
924 |
return; |
||
925 |
} |
||
926 |
✗✓ | 2 |
CHECK(start_io_thread_async_initialized); |
927 |
2 |
uv_async_send(&start_io_thread_async); |
|
928 |
2 |
parent_env_->RequestInterrupt([this](Environment*) { |
|
929 |
2 |
StartIoThread(); |
|
930 |
2 |
}); |
|
931 |
|||
932 |
✗✓ | 2 |
CHECK(start_io_thread_async_initialized); |
933 |
2 |
uv_async_send(&start_io_thread_async); |
|
934 |
} |
||
935 |
|||
936 |
7051 |
void Agent::ContextCreated(Local<Context> context, const ContextInfo& info) { |
|
937 |
✓✓ | 7051 |
if (client_ == nullptr) // This happens for a main context |
938 |
6431 |
return; |
|
939 |
620 |
client_->contextCreated(context, info); |
|
940 |
} |
||
941 |
|||
942 |
80751 |
bool Agent::IsActive() { |
|
943 |
✗✓ | 80751 |
if (client_ == nullptr) |
944 |
return false; |
||
945 |
✓✓✓✓ |
80751 |
return io_ != nullptr || client_->IsActive(); |
946 |
} |
||
947 |
|||
948 |
740 |
void Agent::SetParentHandle( |
|
949 |
std::unique_ptr<ParentInspectorHandle> parent_handle) { |
||
950 |
740 |
parent_handle_ = std::move(parent_handle); |
|
951 |
740 |
} |
|
952 |
|||
953 |
977 |
std::unique_ptr<ParentInspectorHandle> Agent::GetParentHandle( |
|
954 |
uint64_t thread_id, const std::string& url) { |
||
955 |
✗✓✗✗ ✗✓ |
977 |
if (!parent_env_->should_create_inspector() && !client_) { |
956 |
ThrowUninitializedInspectorError(parent_env_); |
||
957 |
return std::unique_ptr<ParentInspectorHandle>{}; |
||
958 |
} |
||
959 |
|||
960 |
✗✓ | 977 |
CHECK_NOT_NULL(client_); |
961 |
✓✓ | 977 |
if (!parent_handle_) { |
962 |
967 |
return client_->getWorkerManager()->NewParentHandle(thread_id, url); |
|
963 |
} else { |
||
964 |
10 |
return parent_handle_->NewParentInspectorHandle(thread_id, url); |
|
965 |
} |
||
966 |
} |
||
967 |
|||
968 |
void Agent::WaitForConnect() { |
||
969 |
if (!parent_env_->should_create_inspector() && !client_) { |
||
970 |
ThrowUninitializedInspectorError(parent_env_); |
||
971 |
return; |
||
972 |
} |
||
973 |
|||
974 |
CHECK_NOT_NULL(client_); |
||
975 |
client_->waitForFrontend(); |
||
976 |
} |
||
977 |
|||
978 |
1694 |
std::shared_ptr<WorkerManager> Agent::GetWorkerManager() { |
|
979 |
✗✓✗✗ ✗✓ |
1694 |
if (!parent_env_->should_create_inspector() && !client_) { |
980 |
ThrowUninitializedInspectorError(parent_env_); |
||
981 |
return std::unique_ptr<WorkerManager>{}; |
||
982 |
} |
||
983 |
|||
984 |
✗✓ | 1694 |
CHECK_NOT_NULL(client_); |
985 |
1694 |
return client_->getWorkerManager(); |
|
986 |
} |
||
987 |
|||
988 |
6 |
std::string Agent::GetWsUrl() const { |
|
989 |
✓✓ | 6 |
if (io_ == nullptr) |
990 |
1 |
return ""; |
|
991 |
5 |
return io_->GetWsUrl(); |
|
992 |
} |
||
993 |
|||
994 |
26784 |
SameThreadInspectorSession::~SameThreadInspectorSession() { |
|
995 |
26784 |
auto client = client_.lock(); |
|
996 |
✓✓ | 13392 |
if (client) |
997 |
1866 |
client->disconnectFrontend(session_id_); |
|
998 |
26784 |
} |
|
999 |
|||
1000 |
20707 |
void SameThreadInspectorSession::Dispatch( |
|
1001 |
const v8_inspector::StringView& message) { |
||
1002 |
41414 |
auto client = client_.lock(); |
|
1003 |
✓✗ | 20707 |
if (client) |
1004 |
20707 |
client->dispatchMessageFromFrontend(session_id_, message); |
|
1005 |
20707 |
} |
|
1006 |
|||
1007 |
} // namespace inspector |
||
1008 |
} // namespace node |
Generated by: GCOVR (Version 4.2) |