GCC Code Coverage Report | |||||||||||||||||||||
|
|||||||||||||||||||||
Line | Branch | Exec | Source |
1 |
#include "inspector_agent.h" |
||
2 |
|||
3 |
#include "env-inl.h" |
||
4 |
#include "inspector/main_thread_interface.h" |
||
5 |
#include "inspector/node_string.h" |
||
6 |
#include "inspector/runtime_agent.h" |
||
7 |
#include "inspector/tracing_agent.h" |
||
8 |
#include "inspector/worker_agent.h" |
||
9 |
#include "inspector/worker_inspector.h" |
||
10 |
#include "inspector_io.h" |
||
11 |
#include "node/inspector/protocol/Protocol.h" |
||
12 |
#include "node_errors.h" |
||
13 |
#include "node_internals.h" |
||
14 |
#include "node_options-inl.h" |
||
15 |
#include "node_process.h" |
||
16 |
#include "node_url.h" |
||
17 |
#include "util-inl.h" |
||
18 |
#include "timer_wrap.h" |
||
19 |
#include "v8-inspector.h" |
||
20 |
#include "v8-platform.h" |
||
21 |
|||
22 |
#include "libplatform/libplatform.h" |
||
23 |
|||
24 |
#ifdef __POSIX__ |
||
25 |
#include <pthread.h> |
||
26 |
#include <climits> // PTHREAD_STACK_MIN |
||
27 |
#endif // __POSIX__ |
||
28 |
|||
29 |
#include <algorithm> |
||
30 |
#include <cstring> |
||
31 |
#include <sstream> |
||
32 |
#include <unordered_map> |
||
33 |
#include <vector> |
||
34 |
|||
35 |
namespace node { |
||
36 |
namespace inspector { |
||
37 |
namespace { |
||
38 |
|||
39 |
using node::FatalError; |
||
40 |
|||
41 |
using v8::Context; |
||
42 |
using v8::Function; |
||
43 |
using v8::Global; |
||
44 |
using v8::HandleScope; |
||
45 |
using v8::Isolate; |
||
46 |
using v8::Local; |
||
47 |
using v8::Message; |
||
48 |
using v8::Object; |
||
49 |
using v8::Value; |
||
50 |
|||
51 |
using v8_inspector::StringBuffer; |
||
52 |
using v8_inspector::StringView; |
||
53 |
using v8_inspector::V8Inspector; |
||
54 |
using v8_inspector::V8InspectorClient; |
||
55 |
|||
56 |
static uv_sem_t start_io_thread_semaphore; |
||
57 |
static uv_async_t start_io_thread_async; |
||
58 |
// This is just an additional check to make sure start_io_thread_async |
||
59 |
// is not accidentally re-used or used when uninitialized. |
||
60 |
static std::atomic_bool start_io_thread_async_initialized { false }; |
||
61 |
// Protects the Agent* stored in start_io_thread_async.data. |
||
62 |
4678 |
static Mutex start_io_thread_async_mutex; |
|
63 |
|||
64 |
4 |
std::unique_ptr<StringBuffer> ToProtocolString(Isolate* isolate, |
|
65 |
Local<Value> value) { |
||
66 |
8 |
TwoByteValue buffer(isolate, value); |
|
67 |
8 |
return StringBuffer::create(StringView(*buffer, buffer.length())); |
|
68 |
} |
||
69 |
|||
70 |
// Called on the main thread. |
||
71 |
void StartIoThreadAsyncCallback(uv_async_t* handle) { |
||
72 |
static_cast<Agent*>(handle->data)->StartIoThread(); |
||
73 |
} |
||
74 |
|||
75 |
|||
76 |
#ifdef __POSIX__ |
||
77 |
1 |
static void StartIoThreadWakeup(int signo, siginfo_t* info, void* ucontext) { |
|
78 |
1 |
uv_sem_post(&start_io_thread_semaphore); |
|
79 |
1 |
} |
|
80 |
|||
81 |
4635 |
inline void* StartIoThreadMain(void* unused) { |
|
82 |
for (;;) { |
||
83 |
4635 |
uv_sem_wait(&start_io_thread_semaphore); |
|
84 |
2 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
85 |
|||
86 |
✗✓ | 1 |
CHECK(start_io_thread_async_initialized); |
87 |
1 |
Agent* agent = static_cast<Agent*>(start_io_thread_async.data); |
|
88 |
✓✗ | 1 |
if (agent != nullptr) |
89 |
1 |
agent->RequestIoThreadStart(); |
|
90 |
1 |
} |
|
91 |
return nullptr; |
||
92 |
} |
||
93 |
|||
94 |
4634 |
static int StartDebugSignalHandler() { |
|
95 |
// Start a watchdog thread for calling v8::Debug::DebugBreak() because |
||
96 |
// it's not safe to call directly from the signal handler, it can |
||
97 |
// deadlock with the thread it interrupts. |
||
98 |
✗✓ | 4634 |
CHECK_EQ(0, uv_sem_init(&start_io_thread_semaphore, 0)); |
99 |
pthread_attr_t attr; |
||
100 |
✗✓ | 4634 |
CHECK_EQ(0, pthread_attr_init(&attr)); |
101 |
#if defined(PTHREAD_STACK_MIN) && !defined(__FreeBSD__) |
||
102 |
// PTHREAD_STACK_MIN is 2 KB with musl libc, which is too small to safely |
||
103 |
// receive signals. PTHREAD_STACK_MIN + MINSIGSTKSZ is 8 KB on arm64, which |
||
104 |
// is the musl architecture with the biggest MINSIGSTKSZ so let's use that |
||
105 |
// as a lower bound and let's quadruple it just in case. The goal is to avoid |
||
106 |
// creating a big 2 or 4 MB address space gap (problematic on 32 bits |
||
107 |
// because of fragmentation), not squeeze out every last byte. |
||
108 |
// Omitted on FreeBSD because it doesn't seem to like small stacks. |
||
109 |
const size_t stack_size = std::max(static_cast<size_t>(4 * 8192), |
||
110 |
4634 |
static_cast<size_t>(PTHREAD_STACK_MIN)); |
|
111 |
✗✓ | 4634 |
CHECK_EQ(0, pthread_attr_setstacksize(&attr, stack_size)); |
112 |
#endif // defined(PTHREAD_STACK_MIN) && !defined(__FreeBSD__) |
||
113 |
✗✓ | 4634 |
CHECK_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); |
114 |
sigset_t sigmask; |
||
115 |
// Mask all signals. |
||
116 |
4634 |
sigfillset(&sigmask); |
|
117 |
sigset_t savemask; |
||
118 |
✗✓ | 4634 |
CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sigmask, &savemask)); |
119 |
4634 |
sigmask = savemask; |
|
120 |
pthread_t thread; |
||
121 |
const int err = pthread_create(&thread, &attr, |
||
122 |
4634 |
StartIoThreadMain, nullptr); |
|
123 |
// Restore original mask |
||
124 |
✗✓ | 4634 |
CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sigmask, nullptr)); |
125 |
✗✓ | 4634 |
CHECK_EQ(0, pthread_attr_destroy(&attr)); |
126 |
✗✓ | 4634 |
if (err != 0) { |
127 |
fprintf(stderr, "node[%u]: pthread_create: %s\n", |
||
128 |
uv_os_getpid(), strerror(err)); |
||
129 |
fflush(stderr); |
||
130 |
// Leave SIGUSR1 blocked. We don't install a signal handler, |
||
131 |
// receiving the signal would terminate the process. |
||
132 |
return -err; |
||
133 |
} |
||
134 |
4634 |
RegisterSignalHandler(SIGUSR1, StartIoThreadWakeup); |
|
135 |
// Unblock SIGUSR1. A pending SIGUSR1 signal will now be delivered. |
||
136 |
4634 |
sigemptyset(&sigmask); |
|
137 |
4634 |
sigaddset(&sigmask, SIGUSR1); |
|
138 |
✗✓ | 4634 |
CHECK_EQ(0, pthread_sigmask(SIG_UNBLOCK, &sigmask, nullptr)); |
139 |
4634 |
return 0; |
|
140 |
} |
||
141 |
#endif // __POSIX__ |
||
142 |
|||
143 |
|||
144 |
#ifdef _WIN32 |
||
145 |
DWORD WINAPI StartIoThreadProc(void* arg) { |
||
146 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
||
147 |
CHECK(start_io_thread_async_initialized); |
||
148 |
Agent* agent = static_cast<Agent*>(start_io_thread_async.data); |
||
149 |
if (agent != nullptr) |
||
150 |
agent->RequestIoThreadStart(); |
||
151 |
return 0; |
||
152 |
} |
||
153 |
|||
154 |
static int GetDebugSignalHandlerMappingName(DWORD pid, wchar_t* buf, |
||
155 |
size_t buf_len) { |
||
156 |
return _snwprintf(buf, buf_len, L"node-debug-handler-%u", pid); |
||
157 |
} |
||
158 |
|||
159 |
static int StartDebugSignalHandler() { |
||
160 |
wchar_t mapping_name[32]; |
||
161 |
HANDLE mapping_handle; |
||
162 |
DWORD pid; |
||
163 |
LPTHREAD_START_ROUTINE* handler; |
||
164 |
|||
165 |
pid = uv_os_getpid(); |
||
166 |
|||
167 |
if (GetDebugSignalHandlerMappingName(pid, |
||
168 |
mapping_name, |
||
169 |
arraysize(mapping_name)) < 0) { |
||
170 |
return -1; |
||
171 |
} |
||
172 |
|||
173 |
mapping_handle = CreateFileMappingW(INVALID_HANDLE_VALUE, |
||
174 |
nullptr, |
||
175 |
PAGE_READWRITE, |
||
176 |
0, |
||
177 |
sizeof *handler, |
||
178 |
mapping_name); |
||
179 |
if (mapping_handle == nullptr) { |
||
180 |
return -1; |
||
181 |
} |
||
182 |
|||
183 |
handler = reinterpret_cast<LPTHREAD_START_ROUTINE*>( |
||
184 |
MapViewOfFile(mapping_handle, |
||
185 |
FILE_MAP_ALL_ACCESS, |
||
186 |
0, |
||
187 |
0, |
||
188 |
sizeof *handler)); |
||
189 |
if (handler == nullptr) { |
||
190 |
CloseHandle(mapping_handle); |
||
191 |
return -1; |
||
192 |
} |
||
193 |
|||
194 |
*handler = StartIoThreadProc; |
||
195 |
|||
196 |
UnmapViewOfFile(static_cast<void*>(handler)); |
||
197 |
|||
198 |
return 0; |
||
199 |
} |
||
200 |
#endif // _WIN32 |
||
201 |
|||
202 |
|||
203 |
const int CONTEXT_GROUP_ID = 1; |
||
204 |
|||
205 |
394 |
std::string GetWorkerLabel(node::Environment* env) { |
|
206 |
788 |
std::ostringstream result; |
|
207 |
394 |
result << "Worker[" << env->thread_id() << "]"; |
|
208 |
788 |
return result.str(); |
|
209 |
} |
||
210 |
|||
211 |
class ChannelImpl final : public v8_inspector::V8Inspector::Channel, |
||
212 |
public protocol::FrontendChannel { |
||
213 |
public: |
||
214 |
5843 |
explicit ChannelImpl(Environment* env, |
|
215 |
const std::unique_ptr<V8Inspector>& inspector, |
||
216 |
std::shared_ptr<WorkerManager> worker_manager, |
||
217 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
218 |
std::shared_ptr<MainThreadHandle> main_thread_, |
||
219 |
bool prevent_shutdown) |
||
220 |
11686 |
: delegate_(std::move(delegate)), prevent_shutdown_(prevent_shutdown), |
|
221 |
11686 |
retaining_context_(false) { |
|
222 |
5843 |
session_ = inspector->connect(CONTEXT_GROUP_ID, this, StringView()); |
|
223 |
5843 |
node_dispatcher_ = std::make_unique<protocol::UberDispatcher>(this); |
|
224 |
tracing_agent_ = |
||
225 |
5843 |
std::make_unique<protocol::TracingAgent>(env, main_thread_); |
|
226 |
5843 |
tracing_agent_->Wire(node_dispatcher_.get()); |
|
227 |
✓✓ | 5843 |
if (worker_manager) { |
228 |
5434 |
worker_agent_ = std::make_unique<protocol::WorkerAgent>(worker_manager); |
|
229 |
5434 |
worker_agent_->Wire(node_dispatcher_.get()); |
|
230 |
} |
||
231 |
5843 |
runtime_agent_ = std::make_unique<protocol::RuntimeAgent>(); |
|
232 |
5843 |
runtime_agent_->Wire(node_dispatcher_.get()); |
|
233 |
5843 |
} |
|
234 |
|||
235 |
16113 |
~ChannelImpl() override { |
|
236 |
5371 |
tracing_agent_->disable(); |
|
237 |
5371 |
tracing_agent_.reset(); // Dispose before the dispatchers |
|
238 |
✓✓ | 5370 |
if (worker_agent_) { |
239 |
4962 |
worker_agent_->disable(); |
|
240 |
4962 |
worker_agent_.reset(); // Dispose before the dispatchers |
|
241 |
} |
||
242 |
5370 |
runtime_agent_->disable(); |
|
243 |
5369 |
runtime_agent_.reset(); // Dispose before the dispatchers |
|
244 |
10742 |
} |
|
245 |
|||
246 |
16312 |
void dispatchProtocolMessage(const StringView& message) { |
|
247 |
32626 |
std::string raw_message = protocol::StringUtil::StringViewToUtf8(message); |
|
248 |
std::unique_ptr<protocol::DictionaryValue> value = |
||
249 |
32624 |
protocol::DictionaryValue::cast(protocol::StringUtil::parseMessage( |
|
250 |
32626 |
raw_message, false)); |
|
251 |
int call_id; |
||
252 |
32626 |
std::string method; |
|
253 |
16312 |
node_dispatcher_->parseCommand(value.get(), &call_id, &method); |
|
254 |
✓✓ | 32627 |
if (v8_inspector::V8InspectorSession::canDispatchMethod( |
255 |
32627 |
Utf8ToStringView(method)->string())) { |
|
256 |
16274 |
session_->dispatchProtocolMessage(message); |
|
257 |
} else { |
||
258 |
117 |
node_dispatcher_->dispatch(call_id, method, std::move(value), |
|
259 |
78 |
raw_message); |
|
260 |
} |
||
261 |
16314 |
} |
|
262 |
|||
263 |
38 |
void schedulePauseOnNextStatement(const std::string& reason) { |
|
264 |
76 |
std::unique_ptr<StringBuffer> buffer = Utf8ToStringView(reason); |
|
265 |
38 |
session_->schedulePauseOnNextStatement(buffer->string(), buffer->string()); |
|
266 |
38 |
} |
|
267 |
|||
268 |
5300 |
bool preventShutdown() { |
|
269 |
5300 |
return prevent_shutdown_; |
|
270 |
} |
||
271 |
|||
272 |
5076 |
bool notifyWaitingForDisconnect() { |
|
273 |
5076 |
retaining_context_ = runtime_agent_->notifyWaitingForDisconnect(); |
|
274 |
5076 |
return retaining_context_; |
|
275 |
} |
||
276 |
|||
277 |
779 |
bool retainingContext() { |
|
278 |
779 |
return retaining_context_; |
|
279 |
} |
||
280 |
|||
281 |
private: |
||
282 |
16275 |
void sendResponse( |
|
283 |
int callId, |
||
284 |
std::unique_ptr<v8_inspector::StringBuffer> message) override { |
||
285 |
16275 |
sendMessageToFrontend(message->string()); |
|
286 |
16275 |
} |
|
287 |
|||
288 |
5686 |
void sendNotification( |
|
289 |
std::unique_ptr<v8_inspector::StringBuffer> message) override { |
||
290 |
5686 |
sendMessageToFrontend(message->string()); |
|
291 |
5686 |
} |
|
292 |
|||
293 |
1310 |
void flushProtocolNotifications() override { } |
|
294 |
|||
295 |
22249 |
void sendMessageToFrontend(const StringView& message) { |
|
296 |
22249 |
delegate_->SendMessageToFrontend(message); |
|
297 |
22249 |
} |
|
298 |
|||
299 |
288 |
void sendMessageToFrontend(const std::string& message) { |
|
300 |
288 |
sendMessageToFrontend(Utf8ToStringView(message)->string()); |
|
301 |
288 |
} |
|
302 |
|||
303 |
using Serializable = protocol::Serializable; |
||
304 |
|||
305 |
39 |
void sendProtocolResponse(int callId, |
|
306 |
std::unique_ptr<Serializable> message) override { |
||
307 |
39 |
sendMessageToFrontend(message->serializeToJSON()); |
|
308 |
39 |
} |
|
309 |
249 |
void sendProtocolNotification( |
|
310 |
std::unique_ptr<Serializable> message) override { |
||
311 |
249 |
sendMessageToFrontend(message->serializeToJSON()); |
|
312 |
249 |
} |
|
313 |
|||
314 |
void fallThrough(int callId, |
||
315 |
const std::string& method, |
||
316 |
const std::string& message) override { |
||
317 |
DCHECK(false); |
||
318 |
} |
||
319 |
|||
320 |
std::unique_ptr<protocol::RuntimeAgent> runtime_agent_; |
||
321 |
std::unique_ptr<protocol::TracingAgent> tracing_agent_; |
||
322 |
std::unique_ptr<protocol::WorkerAgent> worker_agent_; |
||
323 |
std::unique_ptr<InspectorSessionDelegate> delegate_; |
||
324 |
std::unique_ptr<v8_inspector::V8InspectorSession> session_; |
||
325 |
std::unique_ptr<protocol::UberDispatcher> node_dispatcher_; |
||
326 |
bool prevent_shutdown_; |
||
327 |
bool retaining_context_; |
||
328 |
}; |
||
329 |
|||
330 |
class SameThreadInspectorSession : public InspectorSession { |
||
331 |
public: |
||
332 |
5843 |
SameThreadInspectorSession( |
|
333 |
int session_id, std::shared_ptr<NodeInspectorClient> client) |
||
334 |
5843 |
: session_id_(session_id), client_(client) {} |
|
335 |
~SameThreadInspectorSession() override; |
||
336 |
void Dispatch(const v8_inspector::StringView& message) override; |
||
337 |
|||
338 |
private: |
||
339 |
int session_id_; |
||
340 |
std::weak_ptr<NodeInspectorClient> client_; |
||
341 |
}; |
||
342 |
|||
343 |
77 |
void NotifyClusterWorkersDebugEnabled(Environment* env) { |
|
344 |
77 |
Isolate* isolate = env->isolate(); |
|
345 |
154 |
HandleScope handle_scope(isolate); |
|
346 |
77 |
Local<Context> context = env->context(); |
|
347 |
|||
348 |
// Send message to enable debug in cluster workers |
||
349 |
77 |
Local<Object> message = Object::New(isolate); |
|
350 |
154 |
message->Set(context, FIXED_ONE_BYTE_STRING(isolate, "cmd"), |
|
351 |
308 |
FIXED_ONE_BYTE_STRING(isolate, "NODE_DEBUG_ENABLED")).Check(); |
|
352 |
77 |
ProcessEmit(env, "internalMessage", message); |
|
353 |
77 |
} |
|
354 |
|||
355 |
#ifdef _WIN32 |
||
356 |
bool IsFilePath(const std::string& path) { |
||
357 |
// '\\' |
||
358 |
if (path.length() > 2 && path[0] == '\\' && path[1] == '\\') |
||
359 |
return true; |
||
360 |
// '[A-Z]:[/\\]' |
||
361 |
if (path.length() < 3) |
||
362 |
return false; |
||
363 |
if ((path[0] >= 'A' && path[0] <= 'Z') || (path[0] >= 'a' && path[0] <= 'z')) |
||
364 |
return path[1] == ':' && (path[2] == '/' || path[2] == '\\'); |
||
365 |
return false; |
||
366 |
} |
||
367 |
#else |
||
368 |
459340 |
bool IsFilePath(const std::string& path) { |
|
369 |
✓✓✓✓ |
459340 |
return !path.empty() && path[0] == '/'; |
370 |
} |
||
371 |
#endif // __POSIX__ |
||
372 |
|||
373 |
} // namespace |
||
374 |
|||
375 |
4558 |
class NodeInspectorClient : public V8InspectorClient { |
|
376 |
public: |
||
377 |
5030 |
explicit NodeInspectorClient(node::Environment* env, bool is_main) |
|
378 |
5030 |
: env_(env), is_main_(is_main) { |
|
379 |
5030 |
client_ = V8Inspector::create(env->isolate(), this); |
|
380 |
// TODO(bnoordhuis) Make name configurable from src/node.cc. |
||
381 |
std::string name = |
||
382 |
✓✓ | 10060 |
is_main_ ? GetHumanReadableProcessName() : GetWorkerLabel(env); |
383 |
10060 |
ContextInfo info(name); |
|
384 |
5030 |
info.is_default = true; |
|
385 |
5030 |
contextCreated(env->context(), info); |
|
386 |
5030 |
} |
|
387 |
|||
388 |
40 |
void runMessageLoopOnPause(int context_group_id) override { |
|
389 |
40 |
waiting_for_resume_ = true; |
|
390 |
40 |
runMessageLoop(); |
|
391 |
40 |
} |
|
392 |
|||
393 |
77 |
void waitForSessionsDisconnect() { |
|
394 |
77 |
waiting_for_sessions_disconnect_ = true; |
|
395 |
77 |
runMessageLoop(); |
|
396 |
77 |
} |
|
397 |
|||
398 |
19 |
void waitForFrontend() { |
|
399 |
19 |
waiting_for_frontend_ = true; |
|
400 |
19 |
runMessageLoop(); |
|
401 |
19 |
} |
|
402 |
|||
403 |
13 |
void maxAsyncCallStackDepthChanged(int depth) override { |
|
404 |
✓✓ | 13 |
if (waiting_for_sessions_disconnect_) { |
405 |
// V8 isolate is mostly done and is only letting Inspector protocol |
||
406 |
// clients gather data. |
||
407 |
4 |
return; |
|
408 |
} |
||
409 |
✓✗ | 9 |
if (auto agent = env_->inspector_agent()) { |
410 |
✓✓ | 9 |
if (depth == 0) { |
411 |
3 |
agent->DisableAsyncHook(); |
|
412 |
} else { |
||
413 |
6 |
agent->EnableAsyncHook(); |
|
414 |
} |
||
415 |
} |
||
416 |
} |
||
417 |
|||
418 |
5554 |
void contextCreated(Local<Context> context, const ContextInfo& info) { |
|
419 |
11108 |
auto name_buffer = Utf8ToStringView(info.name); |
|
420 |
11108 |
auto origin_buffer = Utf8ToStringView(info.origin); |
|
421 |
11108 |
std::unique_ptr<StringBuffer> aux_data_buffer; |
|
422 |
|||
423 |
v8_inspector::V8ContextInfo v8info( |
||
424 |
5554 |
context, CONTEXT_GROUP_ID, name_buffer->string()); |
|
425 |
5554 |
v8info.origin = origin_buffer->string(); |
|
426 |
|||
427 |
✓✓ | 5554 |
if (info.is_default) { |
428 |
5030 |
aux_data_buffer = Utf8ToStringView("{\"isDefault\":true}"); |
|
429 |
} else { |
||
430 |
524 |
aux_data_buffer = Utf8ToStringView("{\"isDefault\":false}"); |
|
431 |
} |
||
432 |
5554 |
v8info.auxData = aux_data_buffer->string(); |
|
433 |
|||
434 |
5554 |
client_->contextCreated(v8info); |
|
435 |
5554 |
} |
|
436 |
|||
437 |
5016 |
void contextDestroyed(Local<Context> context) { |
|
438 |
5016 |
client_->contextDestroyed(context); |
|
439 |
5016 |
} |
|
440 |
|||
441 |
28 |
void quitMessageLoopOnPause() override { |
|
442 |
28 |
waiting_for_resume_ = false; |
|
443 |
28 |
} |
|
444 |
|||
445 |
20 |
void runIfWaitingForDebugger(int context_group_id) override { |
|
446 |
20 |
waiting_for_frontend_ = false; |
|
447 |
20 |
} |
|
448 |
|||
449 |
5843 |
int connectFrontend(std::unique_ptr<InspectorSessionDelegate> delegate, |
|
450 |
bool prevent_shutdown) { |
||
451 |
5843 |
int session_id = next_session_id_++; |
|
452 |
11686 |
channels_[session_id] = std::make_unique<ChannelImpl>(env_, |
|
453 |
client_, |
||
454 |
11686 |
getWorkerManager(), |
|
455 |
5843 |
std::move(delegate), |
|
456 |
11686 |
getThreadHandle(), |
|
457 |
5843 |
prevent_shutdown); |
|
458 |
5843 |
return session_id; |
|
459 |
} |
||
460 |
|||
461 |
776 |
void disconnectFrontend(int session_id) { |
|
462 |
776 |
auto it = channels_.find(session_id); |
|
463 |
✗✓ | 776 |
if (it == channels_.end()) |
464 |
return; |
||
465 |
776 |
bool retaining_context = it->second->retainingContext(); |
|
466 |
776 |
channels_.erase(it); |
|
467 |
✓✓ | 776 |
if (retaining_context) { |
468 |
✓✓ | 6 |
for (const auto& id_channel : channels_) { |
469 |
✗✓ | 3 |
if (id_channel.second->retainingContext()) |
470 |
return; |
||
471 |
} |
||
472 |
3 |
contextDestroyed(env_->context()); |
|
473 |
} |
||
474 |
✓✓✓✓ |
776 |
if (waiting_for_sessions_disconnect_ && !is_main_) |
475 |
2 |
waiting_for_sessions_disconnect_ = false; |
|
476 |
} |
||
477 |
|||
478 |
16312 |
void dispatchMessageFromFrontend(int session_id, const StringView& message) { |
|
479 |
16312 |
channels_[session_id]->dispatchProtocolMessage(message); |
|
480 |
16314 |
} |
|
481 |
|||
482 |
200 |
Local<Context> ensureDefaultContextInGroup(int contextGroupId) override { |
|
483 |
200 |
return env_->context(); |
|
484 |
} |
||
485 |
|||
486 |
3 |
void installAdditionalCommandLineAPI(Local<Context> context, |
|
487 |
Local<Object> target) override { |
||
488 |
3 |
Local<Function> installer = env_->inspector_console_extension_installer(); |
|
489 |
✓✗ | 3 |
if (!installer.IsEmpty()) { |
490 |
6 |
Local<Value> argv[] = {target}; |
|
491 |
// If there is an exception, proceed in JS land |
||
492 |
6 |
USE(installer->Call(context, target, arraysize(argv), argv)); |
|
493 |
} |
||
494 |
3 |
} |
|
495 |
|||
496 |
2 |
void ReportUncaughtException(Local<Value> error, Local<Message> message) { |
|
497 |
2 |
Isolate* isolate = env_->isolate(); |
|
498 |
2 |
Local<Context> context = env_->context(); |
|
499 |
|||
500 |
6 |
int script_id = message->GetScriptOrigin().ScriptID()->Value(); |
|
501 |
|||
502 |
2 |
Local<v8::StackTrace> stack_trace = message->GetStackTrace(); |
|
503 |
|||
504 |
✓✓✓✗ ✓✗✓✓ |
6 |
if (!stack_trace.IsEmpty() && stack_trace->GetFrameCount() > 0 && |
505 |
4 |
script_id == stack_trace->GetFrame(isolate, 0)->GetScriptId()) { |
|
506 |
1 |
script_id = 0; |
|
507 |
} |
||
508 |
|||
509 |
2 |
const uint8_t DETAILS[] = "Uncaught"; |
|
510 |
|||
511 |
12 |
client_->exceptionThrown( |
|
512 |
context, |
||
513 |
StringView(DETAILS, sizeof(DETAILS) - 1), |
||
514 |
error, |
||
515 |
6 |
ToProtocolString(isolate, message->Get())->string(), |
|
516 |
4 |
ToProtocolString(isolate, message->GetScriptResourceName())->string(), |
|
517 |
8 |
message->GetLineNumber(context).FromMaybe(0), |
|
518 |
8 |
message->GetStartColumn(context).FromMaybe(0), |
|
519 |
4 |
client_->createStackTrace(stack_trace), |
|
520 |
4 |
script_id); |
|
521 |
2 |
} |
|
522 |
|||
523 |
2 |
void startRepeatingTimer(double interval_s, |
|
524 |
TimerCallback callback, |
||
525 |
void* data) override { |
||
526 |
auto result = |
||
527 |
2 |
timers_.emplace(std::piecewise_construct, std::make_tuple(data), |
|
528 |
9 |
std::make_tuple(env_, [=]() { callback(data); })); |
|
529 |
✗✓ | 2 |
CHECK(result.second); |
530 |
2 |
uint64_t interval = 1000 * interval_s; |
|
531 |
2 |
result.first->second.Update(interval, interval); |
|
532 |
2 |
} |
|
533 |
|||
534 |
2 |
void cancelTimer(void* data) override { |
|
535 |
2 |
timers_.erase(data); |
|
536 |
2 |
} |
|
537 |
|||
538 |
// Async stack traces instrumentation. |
||
539 |
1 |
void AsyncTaskScheduled(const StringView& task_name, void* task, |
|
540 |
bool recurring) { |
||
541 |
1 |
client_->asyncTaskScheduled(task_name, task, recurring); |
|
542 |
1 |
} |
|
543 |
|||
544 |
1 |
void AsyncTaskCanceled(void* task) { |
|
545 |
1 |
client_->asyncTaskCanceled(task); |
|
546 |
1 |
} |
|
547 |
|||
548 |
4 |
void AsyncTaskStarted(void* task) { |
|
549 |
4 |
client_->asyncTaskStarted(task); |
|
550 |
4 |
} |
|
551 |
|||
552 |
1 |
void AsyncTaskFinished(void* task) { |
|
553 |
1 |
client_->asyncTaskFinished(task); |
|
554 |
1 |
} |
|
555 |
|||
556 |
void AllAsyncTasksCanceled() { |
||
557 |
client_->allAsyncTasksCanceled(); |
||
558 |
} |
||
559 |
|||
560 |
19 |
void schedulePauseOnNextStatement(const std::string& reason) { |
|
561 |
✓✓ | 57 |
for (const auto& id_channel : channels_) { |
562 |
38 |
id_channel.second->schedulePauseOnNextStatement(reason); |
|
563 |
} |
||
564 |
19 |
} |
|
565 |
|||
566 |
5173 |
bool hasConnectedSessions() { |
|
567 |
✓✓ | 10378 |
for (const auto& id_channel : channels_) { |
568 |
// Other sessions are "invisible" more most purposes |
||
569 |
✓✓ | 5300 |
if (id_channel.second->preventShutdown()) |
570 |
95 |
return true; |
|
571 |
} |
||
572 |
5078 |
return false; |
|
573 |
} |
||
574 |
|||
575 |
5016 |
bool notifyWaitingForDisconnect() { |
|
576 |
5016 |
bool retaining_context = false; |
|
577 |
✓✓ | 10092 |
for (const auto& id_channel : channels_) { |
578 |
✓✓ | 5076 |
if (id_channel.second->notifyWaitingForDisconnect()) |
579 |
3 |
retaining_context = true; |
|
580 |
} |
||
581 |
5016 |
return retaining_context; |
|
582 |
} |
||
583 |
|||
584 |
10947 |
std::shared_ptr<MainThreadHandle> getThreadHandle() { |
|
585 |
✓✓ | 10947 |
if (!interface_) { |
586 |
10052 |
interface_ = std::make_shared<MainThreadInterface>( |
|
587 |
15078 |
env_->inspector_agent()); |
|
588 |
} |
||
589 |
10947 |
return interface_->GetHandle(); |
|
590 |
} |
||
591 |
|||
592 |
7472 |
std::shared_ptr<WorkerManager> getWorkerManager() { |
|
593 |
✓✓ | 7472 |
if (!is_main_) { |
594 |
409 |
return nullptr; |
|
595 |
} |
||
596 |
✓✓ | 7063 |
if (worker_manager_ == nullptr) { |
597 |
worker_manager_ = |
||
598 |
4630 |
std::make_shared<WorkerManager>(getThreadHandle()); |
|
599 |
} |
||
600 |
7063 |
return worker_manager_; |
|
601 |
} |
||
602 |
|||
603 |
22387 |
bool IsActive() { |
|
604 |
22387 |
return !channels_.empty(); |
|
605 |
} |
||
606 |
|||
607 |
private: |
||
608 |
266 |
bool shouldRunMessageLoop() { |
|
609 |
✓✓ | 266 |
if (waiting_for_frontend_) |
610 |
60 |
return true; |
|
611 |
✓✓✓✓ |
206 |
if (waiting_for_sessions_disconnect_ || waiting_for_resume_) { |
612 |
157 |
return hasConnectedSessions(); |
|
613 |
} |
||
614 |
49 |
return false; |
|
615 |
} |
||
616 |
|||
617 |
136 |
void runMessageLoop() { |
|
618 |
✗✓ | 136 |
if (running_nested_loop_) |
619 |
return; |
||
620 |
|||
621 |
136 |
running_nested_loop_ = true; |
|
622 |
|||
623 |
✓✓ | 396 |
while (shouldRunMessageLoop()) { |
624 |
✓✗ | 130 |
if (interface_) interface_->WaitForFrontendEvent(); |
625 |
130 |
env_->RunAndClearInterrupts(); |
|
626 |
} |
||
627 |
136 |
running_nested_loop_ = false; |
|
628 |
} |
||
629 |
|||
630 |
17464 |
double currentTimeMS() override { |
|
631 |
17464 |
return env_->isolate_data()->platform()->CurrentClockTimeMillis(); |
|
632 |
} |
||
633 |
|||
634 |
459344 |
std::unique_ptr<StringBuffer> resourceNameToUrl( |
|
635 |
const StringView& resource_name_view) override { |
||
636 |
std::string resource_name = |
||
637 |
918691 |
protocol::StringUtil::StringViewToUtf8(resource_name_view); |
|
638 |
✓✓ | 459349 |
if (!IsFilePath(resource_name)) |
639 |
428193 |
return nullptr; |
|
640 |
62318 |
node::url::URL url = node::url::URL::FromFilePath(resource_name); |
|
641 |
// TODO(ak239spb): replace this code with url.href(). |
||
642 |
// Refs: https://github.com/nodejs/node/issues/22610 |
||
643 |
31159 |
return Utf8ToStringView(url.protocol() + "/" + url.path()); |
|
644 |
} |
||
645 |
|||
646 |
node::Environment* env_; |
||
647 |
bool is_main_; |
||
648 |
bool running_nested_loop_ = false; |
||
649 |
std::unique_ptr<V8Inspector> client_; |
||
650 |
// Note: ~ChannelImpl may access timers_ so timers_ has to come first. |
||
651 |
std::unordered_map<void*, TimerWrapHandle> timers_; |
||
652 |
std::unordered_map<int, std::unique_ptr<ChannelImpl>> channels_; |
||
653 |
int next_session_id_ = 1; |
||
654 |
bool waiting_for_resume_ = false; |
||
655 |
bool waiting_for_frontend_ = false; |
||
656 |
bool waiting_for_sessions_disconnect_ = false; |
||
657 |
// Allows accessing Inspector from non-main threads |
||
658 |
std::shared_ptr<MainThreadInterface> interface_; |
||
659 |
std::shared_ptr<WorkerManager> worker_manager_; |
||
660 |
}; |
||
661 |
|||
662 |
5038 |
Agent::Agent(Environment* env) |
|
663 |
: parent_env_(env), |
||
664 |
10076 |
debug_options_(env->options()->debug_options()), |
|
665 |
25190 |
host_port_(env->inspector_host_port()) {} |
|
666 |
|||
667 |
13701 |
Agent::~Agent() {} |
|
668 |
|||
669 |
5030 |
bool Agent::Start(const std::string& path, |
|
670 |
const DebugOptions& options, |
||
671 |
std::shared_ptr<ExclusiveAccess<HostPort>> host_port, |
||
672 |
bool is_main) { |
||
673 |
5030 |
path_ = path; |
|
674 |
5030 |
debug_options_ = options; |
|
675 |
✗✓ | 5030 |
CHECK_NOT_NULL(host_port); |
676 |
5030 |
host_port_ = host_port; |
|
677 |
|||
678 |
5030 |
client_ = std::make_shared<NodeInspectorClient>(parent_env_, is_main); |
|
679 |
✓✓ | 5030 |
if (parent_env_->owns_inspector()) { |
680 |
9268 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
681 |
✗✓ | 4634 |
CHECK_EQ(start_io_thread_async_initialized.exchange(true), false); |
682 |
✗✓ | 4634 |
CHECK_EQ(0, uv_async_init(parent_env_->event_loop(), |
683 |
&start_io_thread_async, |
||
684 |
StartIoThreadAsyncCallback)); |
||
685 |
4634 |
uv_unref(reinterpret_cast<uv_handle_t*>(&start_io_thread_async)); |
|
686 |
4634 |
start_io_thread_async.data = this; |
|
687 |
// Ignore failure, SIGUSR1 won't work, but that should not block node start. |
||
688 |
4634 |
StartDebugSignalHandler(); |
|
689 |
|||
690 |
22228 |
parent_env_->AddCleanupHook([](void* data) { |
|
691 |
4163 |
Environment* env = static_cast<Environment*>(data); |
|
692 |
|||
693 |
{ |
||
694 |
8326 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
695 |
4163 |
start_io_thread_async.data = nullptr; |
|
696 |
} |
||
697 |
|||
698 |
// This is global, will never get freed |
||
699 |
8326 |
env->CloseHandle(&start_io_thread_async, [](uv_async_t*) { |
|
700 |
✗✓ | 4163 |
CHECK(start_io_thread_async_initialized.exchange(false)); |
701 |
8326 |
}); |
|
702 |
22228 |
}, parent_env_); |
|
703 |
} |
||
704 |
|||
705 |
25130 |
AtExit(parent_env_, [](void* env) { |
|
706 |
5020 |
Agent* agent = static_cast<Environment*>(env)->inspector_agent(); |
|
707 |
✓✓ | 5020 |
if (agent->IsActive()) { |
708 |
5014 |
agent->WaitForDisconnect(); |
|
709 |
} |
||
710 |
25130 |
}, parent_env_); |
|
711 |
|||
712 |
5030 |
bool wait_for_connect = options.wait_for_connect(); |
|
713 |
✓✓ | 5030 |
if (parent_handle_) { |
714 |
394 |
wait_for_connect = parent_handle_->WaitForConnect(); |
|
715 |
394 |
parent_handle_->WorkerStarted(client_->getThreadHandle(), wait_for_connect); |
|
716 |
✓✓✓✓ ✓✓ |
4636 |
} else if (!options.inspector_enabled || !StartIoThread()) { |
717 |
4560 |
return false; |
|
718 |
} |
||
719 |
|||
720 |
// Patch the debug options to implement waitForDebuggerOnStart for |
||
721 |
// the NodeWorker.enable method. |
||
722 |
✓✓ | 470 |
if (wait_for_connect) { |
723 |
✗✓ | 19 |
CHECK(!parent_env_->has_serialized_options()); |
724 |
19 |
debug_options_.EnableBreakFirstLine(); |
|
725 |
19 |
parent_env_->options()->get_debug_options()->EnableBreakFirstLine(); |
|
726 |
19 |
client_->waitForFrontend(); |
|
727 |
} |
||
728 |
470 |
return true; |
|
729 |
} |
||
730 |
|||
731 |
78 |
bool Agent::StartIoThread() { |
|
732 |
✗✓ | 78 |
if (io_ != nullptr) |
733 |
return true; |
||
734 |
|||
735 |
✗✓ | 78 |
CHECK_NOT_NULL(client_); |
736 |
|||
737 |
156 |
io_ = InspectorIo::Start(client_->getThreadHandle(), |
|
738 |
path_, |
||
739 |
host_port_, |
||
740 |
78 |
debug_options_.inspect_publish_uid); |
|
741 |
✓✓ | 78 |
if (io_ == nullptr) { |
742 |
1 |
return false; |
|
743 |
} |
||
744 |
77 |
NotifyClusterWorkersDebugEnabled(parent_env_); |
|
745 |
77 |
return true; |
|
746 |
} |
||
747 |
|||
748 |
4 |
void Agent::Stop() { |
|
749 |
4 |
io_.reset(); |
|
750 |
4 |
} |
|
751 |
|||
752 |
5843 |
std::unique_ptr<InspectorSession> Agent::Connect( |
|
753 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
754 |
bool prevent_shutdown) { |
||
755 |
✗✓ | 5843 |
CHECK_NOT_NULL(client_); |
756 |
11686 |
int session_id = client_->connectFrontend(std::move(delegate), |
|
757 |
5843 |
prevent_shutdown); |
|
758 |
return std::unique_ptr<InspectorSession>( |
||
759 |
5843 |
new SameThreadInspectorSession(session_id, client_)); |
|
760 |
} |
||
761 |
|||
762 |
2 |
std::unique_ptr<InspectorSession> Agent::ConnectToMainThread( |
|
763 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
764 |
bool prevent_shutdown) { |
||
765 |
✗✓ | 2 |
CHECK_NOT_NULL(parent_handle_); |
766 |
✗✓ | 2 |
CHECK_NOT_NULL(client_); |
767 |
auto thread_safe_delegate = |
||
768 |
4 |
client_->getThreadHandle()->MakeDelegateThreadSafe(std::move(delegate)); |
|
769 |
2 |
return parent_handle_->Connect(std::move(thread_safe_delegate), |
|
770 |
6 |
prevent_shutdown); |
|
771 |
} |
||
772 |
|||
773 |
5016 |
void Agent::WaitForDisconnect() { |
|
774 |
✗✓ | 5016 |
CHECK_NOT_NULL(client_); |
775 |
5016 |
bool is_worker = parent_handle_ != nullptr; |
|
776 |
5016 |
parent_handle_.reset(); |
|
777 |
✓✓✓✓ ✓✓ |
5016 |
if (client_->hasConnectedSessions() && !is_worker) { |
778 |
13 |
fprintf(stderr, "Waiting for the debugger to disconnect...\n"); |
|
779 |
13 |
fflush(stderr); |
|
780 |
} |
||
781 |
✓✓ | 5016 |
if (!client_->notifyWaitingForDisconnect()) { |
782 |
5013 |
client_->contextDestroyed(parent_env_->context()); |
|
783 |
✓✓ | 3 |
} else if (is_worker) { |
784 |
2 |
client_->waitForSessionsDisconnect(); |
|
785 |
} |
||
786 |
✓✓ | 5016 |
if (io_ != nullptr) { |
787 |
75 |
io_->StopAcceptingNewConnections(); |
|
788 |
75 |
client_->waitForSessionsDisconnect(); |
|
789 |
} |
||
790 |
5016 |
} |
|
791 |
|||
792 |
207 |
void Agent::ReportUncaughtException(Local<Value> error, |
|
793 |
Local<Message> message) { |
||
794 |
✓✓ | 207 |
if (!IsListening()) |
795 |
205 |
return; |
|
796 |
2 |
client_->ReportUncaughtException(error, message); |
|
797 |
2 |
WaitForDisconnect(); |
|
798 |
} |
||
799 |
|||
800 |
19 |
void Agent::PauseOnNextJavascriptStatement(const std::string& reason) { |
|
801 |
19 |
client_->schedulePauseOnNextStatement(reason); |
|
802 |
19 |
} |
|
803 |
|||
804 |
5006 |
void Agent::RegisterAsyncHook(Isolate* isolate, |
|
805 |
Local<Function> enable_function, |
||
806 |
Local<Function> disable_function) { |
||
807 |
5006 |
enable_async_hook_function_.Reset(isolate, enable_function); |
|
808 |
5006 |
disable_async_hook_function_.Reset(isolate, disable_function); |
|
809 |
✓✓ | 5006 |
if (pending_enable_async_hook_) { |
810 |
✗✓ | 2 |
CHECK(!pending_disable_async_hook_); |
811 |
2 |
pending_enable_async_hook_ = false; |
|
812 |
2 |
EnableAsyncHook(); |
|
813 |
✗✓ | 5004 |
} else if (pending_disable_async_hook_) { |
814 |
CHECK(!pending_enable_async_hook_); |
||
815 |
pending_disable_async_hook_ = false; |
||
816 |
DisableAsyncHook(); |
||
817 |
} |
||
818 |
5006 |
} |
|
819 |
|||
820 |
8 |
void Agent::EnableAsyncHook() { |
|
821 |
✓✓ | 16 |
if (!enable_async_hook_function_.IsEmpty()) { |
822 |
6 |
ToggleAsyncHook(parent_env_->isolate(), enable_async_hook_function_); |
|
823 |
✗✓ | 2 |
} else if (pending_disable_async_hook_) { |
824 |
CHECK(!pending_enable_async_hook_); |
||
825 |
pending_disable_async_hook_ = false; |
||
826 |
} else { |
||
827 |
2 |
pending_enable_async_hook_ = true; |
|
828 |
} |
||
829 |
8 |
} |
|
830 |
|||
831 |
3 |
void Agent::DisableAsyncHook() { |
|
832 |
✓✗ | 6 |
if (!disable_async_hook_function_.IsEmpty()) { |
833 |
3 |
ToggleAsyncHook(parent_env_->isolate(), disable_async_hook_function_); |
|
834 |
} else if (pending_enable_async_hook_) { |
||
835 |
CHECK(!pending_disable_async_hook_); |
||
836 |
pending_enable_async_hook_ = false; |
||
837 |
} else { |
||
838 |
pending_disable_async_hook_ = true; |
||
839 |
} |
||
840 |
3 |
} |
|
841 |
|||
842 |
9 |
void Agent::ToggleAsyncHook(Isolate* isolate, |
|
843 |
const Global<Function>& fn) { |
||
844 |
// Guard against running this during cleanup -- no async events will be |
||
845 |
// emitted anyway at that point anymore, and calling into JS is not possible. |
||
846 |
// This should probably not be something we're attempting in the first place, |
||
847 |
// Refs: https://github.com/nodejs/node/pull/34362#discussion_r456006039 |
||
848 |
✗✓ | 9 |
if (!parent_env_->can_call_into_js()) return; |
849 |
✗✓ | 9 |
CHECK(parent_env_->has_run_bootstrapping_code()); |
850 |
18 |
HandleScope handle_scope(isolate); |
|
851 |
✗✓ | 18 |
CHECK(!fn.IsEmpty()); |
852 |
9 |
auto context = parent_env_->context(); |
|
853 |
18 |
v8::TryCatch try_catch(isolate); |
|
854 |
27 |
USE(fn.Get(isolate)->Call(context, Undefined(isolate), 0, nullptr)); |
|
855 |
✗✓✗✗ ✗✓ |
9 |
if (try_catch.HasCaught() && !try_catch.HasTerminated()) { |
856 |
PrintCaughtException(isolate, context, try_catch); |
||
857 |
FatalError("\nnode::inspector::Agent::ToggleAsyncHook", |
||
858 |
"Cannot toggle Inspector's AsyncHook, please report this."); |
||
859 |
} |
||
860 |
} |
||
861 |
|||
862 |
1 |
void Agent::AsyncTaskScheduled(const StringView& task_name, void* task, |
|
863 |
bool recurring) { |
||
864 |
1 |
client_->AsyncTaskScheduled(task_name, task, recurring); |
|
865 |
1 |
} |
|
866 |
|||
867 |
1 |
void Agent::AsyncTaskCanceled(void* task) { |
|
868 |
1 |
client_->AsyncTaskCanceled(task); |
|
869 |
1 |
} |
|
870 |
|||
871 |
4 |
void Agent::AsyncTaskStarted(void* task) { |
|
872 |
4 |
client_->AsyncTaskStarted(task); |
|
873 |
4 |
} |
|
874 |
|||
875 |
1 |
void Agent::AsyncTaskFinished(void* task) { |
|
876 |
1 |
client_->AsyncTaskFinished(task); |
|
877 |
1 |
} |
|
878 |
|||
879 |
void Agent::AllAsyncTasksCanceled() { |
||
880 |
client_->AllAsyncTasksCanceled(); |
||
881 |
} |
||
882 |
|||
883 |
1 |
void Agent::RequestIoThreadStart() { |
|
884 |
// We need to attempt to interrupt V8 flow (in case Node is running |
||
885 |
// continuous JS code) and to wake up libuv thread (in case Node is waiting |
||
886 |
// for IO events) |
||
887 |
✗✓ | 1 |
CHECK(start_io_thread_async_initialized); |
888 |
1 |
uv_async_send(&start_io_thread_async); |
|
889 |
3 |
parent_env_->RequestInterrupt([this](Environment*) { |
|
890 |
1 |
StartIoThread(); |
|
891 |
2 |
}); |
|
892 |
|||
893 |
✗✓ | 1 |
CHECK(start_io_thread_async_initialized); |
894 |
1 |
uv_async_send(&start_io_thread_async); |
|
895 |
1 |
} |
|
896 |
|||
897 |
5563 |
void Agent::ContextCreated(Local<Context> context, const ContextInfo& info) { |
|
898 |
✓✓ | 5563 |
if (client_ == nullptr) // This happens for a main context |
899 |
5039 |
return; |
|
900 |
524 |
client_->contextCreated(context, info); |
|
901 |
} |
||
902 |
|||
903 |
bool Agent::WillWaitForConnect() { |
||
904 |
if (debug_options_.wait_for_connect()) return true; |
||
905 |
if (parent_handle_) |
||
906 |
return parent_handle_->WaitForConnect(); |
||
907 |
return false; |
||
908 |
} |
||
909 |
|||
910 |
22497 |
bool Agent::IsActive() { |
|
911 |
✗✓ | 22497 |
if (client_ == nullptr) |
912 |
return false; |
||
913 |
✓✓✓✓ |
22497 |
return io_ != nullptr || client_->IsActive(); |
914 |
} |
||
915 |
|||
916 |
394 |
void Agent::SetParentHandle( |
|
917 |
std::unique_ptr<ParentInspectorHandle> parent_handle) { |
||
918 |
394 |
parent_handle_ = std::move(parent_handle); |
|
919 |
394 |
} |
|
920 |
|||
921 |
632 |
std::unique_ptr<ParentInspectorHandle> Agent::GetParentHandle( |
|
922 |
int thread_id, const std::string& url) { |
||
923 |
✓✓ | 632 |
if (!parent_handle_) { |
924 |
625 |
return client_->getWorkerManager()->NewParentHandle(thread_id, url); |
|
925 |
} else { |
||
926 |
7 |
return parent_handle_->NewParentInspectorHandle(thread_id, url); |
|
927 |
} |
||
928 |
} |
||
929 |
|||
930 |
void Agent::WaitForConnect() { |
||
931 |
CHECK_NOT_NULL(client_); |
||
932 |
client_->waitForFrontend(); |
||
933 |
} |
||
934 |
|||
935 |
1004 |
std::shared_ptr<WorkerManager> Agent::GetWorkerManager() { |
|
936 |
✗✓ | 1004 |
CHECK_NOT_NULL(client_); |
937 |
1004 |
return client_->getWorkerManager(); |
|
938 |
} |
||
939 |
|||
940 |
6 |
std::string Agent::GetWsUrl() const { |
|
941 |
✓✓ | 6 |
if (io_ == nullptr) |
942 |
1 |
return ""; |
|
943 |
5 |
return io_->GetWsUrl(); |
|
944 |
} |
||
945 |
|||
946 |
16112 |
SameThreadInspectorSession::~SameThreadInspectorSession() { |
|
947 |
10741 |
auto client = client_.lock(); |
|
948 |
✓✓ | 5370 |
if (client) |
949 |
776 |
client->disconnectFrontend(session_id_); |
|
950 |
10742 |
} |
|
951 |
|||
952 |
16312 |
void SameThreadInspectorSession::Dispatch( |
|
953 |
const v8_inspector::StringView& message) { |
||
954 |
32626 |
auto client = client_.lock(); |
|
955 |
✓✗ | 16314 |
if (client) |
956 |
16314 |
client->dispatchMessageFromFrontend(session_id_, message); |
|
957 |
16314 |
} |
|
958 |
|||
959 |
} // namespace inspector |
||
960 |
✓✗✓✗ |
14034 |
} // namespace node |
Generated by: GCOVR (Version 3.4) |