GCC Code Coverage Report | |||||||||||||||||||||
|
|||||||||||||||||||||
Line | Branch | Exec | Source |
1 |
#include "inspector_agent.h" |
||
2 |
|||
3 |
#include "env-inl.h" |
||
4 |
#include "inspector/main_thread_interface.h" |
||
5 |
#include "inspector/node_string.h" |
||
6 |
#include "inspector/runtime_agent.h" |
||
7 |
#include "inspector/tracing_agent.h" |
||
8 |
#include "inspector/worker_agent.h" |
||
9 |
#include "inspector/worker_inspector.h" |
||
10 |
#include "inspector_io.h" |
||
11 |
#include "node/inspector/protocol/Protocol.h" |
||
12 |
#include "node_errors.h" |
||
13 |
#include "node_internals.h" |
||
14 |
#include "node_options-inl.h" |
||
15 |
#include "node_process-inl.h" |
||
16 |
#include "node_url.h" |
||
17 |
#include "util-inl.h" |
||
18 |
#include "timer_wrap-inl.h" |
||
19 |
#include "v8-inspector.h" |
||
20 |
#include "v8-platform.h" |
||
21 |
|||
22 |
#include "libplatform/libplatform.h" |
||
23 |
|||
24 |
#ifdef __POSIX__ |
||
25 |
#include <pthread.h> |
||
26 |
#include <climits> // PTHREAD_STACK_MIN |
||
27 |
#endif // __POSIX__ |
||
28 |
|||
29 |
#include <algorithm> |
||
30 |
#include <cstring> |
||
31 |
#include <sstream> |
||
32 |
#include <unordered_map> |
||
33 |
#include <vector> |
||
34 |
|||
35 |
namespace node { |
||
36 |
namespace inspector { |
||
37 |
namespace { |
||
38 |
|||
39 |
using node::FatalError; |
||
40 |
|||
41 |
using v8::Context; |
||
42 |
using v8::Function; |
||
43 |
using v8::HandleScope; |
||
44 |
using v8::Isolate; |
||
45 |
using v8::Local; |
||
46 |
using v8::Message; |
||
47 |
using v8::Object; |
||
48 |
using v8::Value; |
||
49 |
|||
50 |
using v8_inspector::StringBuffer; |
||
51 |
using v8_inspector::StringView; |
||
52 |
using v8_inspector::V8Inspector; |
||
53 |
using v8_inspector::V8InspectorClient; |
||
54 |
|||
55 |
static uv_sem_t start_io_thread_semaphore; |
||
56 |
static uv_async_t start_io_thread_async; |
||
57 |
// This is just an additional check to make sure start_io_thread_async |
||
58 |
// is not accidentally re-used or used when uninitialized. |
||
59 |
static std::atomic_bool start_io_thread_async_initialized { false }; |
||
60 |
// Protects the Agent* stored in start_io_thread_async.data. |
||
61 |
static Mutex start_io_thread_async_mutex; |
||
62 |
|||
63 |
4 |
std::unique_ptr<StringBuffer> ToProtocolString(Isolate* isolate, |
|
64 |
Local<Value> value) { |
||
65 |
4 |
TwoByteValue buffer(isolate, value); |
|
66 |
4 |
return StringBuffer::create(StringView(*buffer, buffer.length())); |
|
67 |
} |
||
68 |
|||
69 |
// Called on the main thread. |
||
70 |
3 |
void StartIoThreadAsyncCallback(uv_async_t* handle) { |
|
71 |
3 |
static_cast<Agent*>(handle->data)->StartIoThread(); |
|
72 |
3 |
} |
|
73 |
|||
74 |
|||
75 |
#ifdef __POSIX__ |
||
76 |
2 |
static void StartIoThreadWakeup(int signo, siginfo_t* info, void* ucontext) { |
|
77 |
2 |
uv_sem_post(&start_io_thread_semaphore); |
|
78 |
2 |
} |
|
79 |
|||
80 |
5647 |
inline void* StartIoThreadMain(void* unused) { |
|
81 |
for (;;) { |
||
82 |
5647 |
uv_sem_wait(&start_io_thread_semaphore); |
|
83 |
4 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
84 |
|||
85 |
✗✓ | 2 |
CHECK(start_io_thread_async_initialized); |
86 |
2 |
Agent* agent = static_cast<Agent*>(start_io_thread_async.data); |
|
87 |
✓✗ | 2 |
if (agent != nullptr) |
88 |
2 |
agent->RequestIoThreadStart(); |
|
89 |
2 |
} |
|
90 |
} |
||
91 |
|||
92 |
5645 |
static int StartDebugSignalHandler() { |
|
93 |
// Start a watchdog thread for calling v8::Debug::DebugBreak() because |
||
94 |
// it's not safe to call directly from the signal handler, it can |
||
95 |
// deadlock with the thread it interrupts. |
||
96 |
✗✓ | 5645 |
CHECK_EQ(0, uv_sem_init(&start_io_thread_semaphore, 0)); |
97 |
pthread_attr_t attr; |
||
98 |
✗✓ | 5645 |
CHECK_EQ(0, pthread_attr_init(&attr)); |
99 |
#if defined(PTHREAD_STACK_MIN) && !defined(__FreeBSD__) |
||
100 |
// PTHREAD_STACK_MIN is 2 KiB with musl libc, which is too small to safely |
||
101 |
// receive signals. PTHREAD_STACK_MIN + MINSIGSTKSZ is 8 KiB on arm64, which |
||
102 |
// is the musl architecture with the biggest MINSIGSTKSZ so let's use that |
||
103 |
// as a lower bound and let's quadruple it just in case. The goal is to avoid |
||
104 |
// creating a big 2 or 4 MiB address space gap (problematic on 32 bits |
||
105 |
// because of fragmentation), not squeeze out every last byte. |
||
106 |
// Omitted on FreeBSD because it doesn't seem to like small stacks. |
||
107 |
5645 |
const size_t stack_size = std::max(static_cast<size_t>(4 * 8192), |
|
108 |
static_cast<size_t>(PTHREAD_STACK_MIN)); |
||
109 |
✗✓ | 5645 |
CHECK_EQ(0, pthread_attr_setstacksize(&attr, stack_size)); |
110 |
#endif // defined(PTHREAD_STACK_MIN) && !defined(__FreeBSD__) |
||
111 |
✗✓ | 5645 |
CHECK_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); |
112 |
sigset_t sigmask; |
||
113 |
// Mask all signals. |
||
114 |
5645 |
sigfillset(&sigmask); |
|
115 |
sigset_t savemask; |
||
116 |
✗✓ | 5645 |
CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sigmask, &savemask)); |
117 |
5645 |
sigmask = savemask; |
|
118 |
pthread_t thread; |
||
119 |
5645 |
const int err = pthread_create(&thread, &attr, |
|
120 |
StartIoThreadMain, nullptr); |
||
121 |
// Restore original mask |
||
122 |
✗✓ | 5645 |
CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sigmask, nullptr)); |
123 |
✗✓ | 5645 |
CHECK_EQ(0, pthread_attr_destroy(&attr)); |
124 |
✗✓ | 5645 |
if (err != 0) { |
125 |
fprintf(stderr, "node[%u]: pthread_create: %s\n", |
||
126 |
uv_os_getpid(), strerror(err)); |
||
127 |
fflush(stderr); |
||
128 |
// Leave SIGUSR1 blocked. We don't install a signal handler, |
||
129 |
// receiving the signal would terminate the process. |
||
130 |
return -err; |
||
131 |
} |
||
132 |
5645 |
RegisterSignalHandler(SIGUSR1, StartIoThreadWakeup); |
|
133 |
// Unblock SIGUSR1. A pending SIGUSR1 signal will now be delivered. |
||
134 |
5645 |
sigemptyset(&sigmask); |
|
135 |
5645 |
sigaddset(&sigmask, SIGUSR1); |
|
136 |
✗✓ | 5645 |
CHECK_EQ(0, pthread_sigmask(SIG_UNBLOCK, &sigmask, nullptr)); |
137 |
5645 |
return 0; |
|
138 |
} |
||
139 |
#endif // __POSIX__ |
||
140 |
|||
141 |
|||
142 |
#ifdef _WIN32 |
||
143 |
DWORD WINAPI StartIoThreadProc(void* arg) { |
||
144 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
||
145 |
CHECK(start_io_thread_async_initialized); |
||
146 |
Agent* agent = static_cast<Agent*>(start_io_thread_async.data); |
||
147 |
if (agent != nullptr) |
||
148 |
agent->RequestIoThreadStart(); |
||
149 |
return 0; |
||
150 |
} |
||
151 |
|||
152 |
static int GetDebugSignalHandlerMappingName(DWORD pid, wchar_t* buf, |
||
153 |
size_t buf_len) { |
||
154 |
return _snwprintf(buf, buf_len, L"node-debug-handler-%u", pid); |
||
155 |
} |
||
156 |
|||
157 |
static int StartDebugSignalHandler() { |
||
158 |
wchar_t mapping_name[32]; |
||
159 |
HANDLE mapping_handle; |
||
160 |
DWORD pid; |
||
161 |
LPTHREAD_START_ROUTINE* handler; |
||
162 |
|||
163 |
pid = uv_os_getpid(); |
||
164 |
|||
165 |
if (GetDebugSignalHandlerMappingName(pid, |
||
166 |
mapping_name, |
||
167 |
arraysize(mapping_name)) < 0) { |
||
168 |
return -1; |
||
169 |
} |
||
170 |
|||
171 |
mapping_handle = CreateFileMappingW(INVALID_HANDLE_VALUE, |
||
172 |
nullptr, |
||
173 |
PAGE_READWRITE, |
||
174 |
0, |
||
175 |
sizeof *handler, |
||
176 |
mapping_name); |
||
177 |
if (mapping_handle == nullptr) { |
||
178 |
return -1; |
||
179 |
} |
||
180 |
|||
181 |
handler = reinterpret_cast<LPTHREAD_START_ROUTINE*>( |
||
182 |
MapViewOfFile(mapping_handle, |
||
183 |
FILE_MAP_ALL_ACCESS, |
||
184 |
0, |
||
185 |
0, |
||
186 |
sizeof *handler)); |
||
187 |
if (handler == nullptr) { |
||
188 |
CloseHandle(mapping_handle); |
||
189 |
return -1; |
||
190 |
} |
||
191 |
|||
192 |
*handler = StartIoThreadProc; |
||
193 |
|||
194 |
UnmapViewOfFile(static_cast<void*>(handler)); |
||
195 |
|||
196 |
return 0; |
||
197 |
} |
||
198 |
#endif // _WIN32 |
||
199 |
|||
200 |
|||
201 |
const int CONTEXT_GROUP_ID = 1; |
||
202 |
|||
203 |
732 |
std::string GetWorkerLabel(node::Environment* env) { |
|
204 |
1464 |
std::ostringstream result; |
|
205 |
732 |
result << "Worker[" << env->thread_id() << "]"; |
|
206 |
732 |
return result.str(); |
|
207 |
} |
||
208 |
|||
209 |
class ChannelImpl final : public v8_inspector::V8Inspector::Channel, |
||
210 |
public protocol::FrontendChannel { |
||
211 |
public: |
||
212 |
7348 |
explicit ChannelImpl(Environment* env, |
|
213 |
const std::unique_ptr<V8Inspector>& inspector, |
||
214 |
std::shared_ptr<WorkerManager> worker_manager, |
||
215 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
216 |
std::shared_ptr<MainThreadHandle> main_thread_, |
||
217 |
bool prevent_shutdown) |
||
218 |
14696 |
: delegate_(std::move(delegate)), prevent_shutdown_(prevent_shutdown), |
|
219 |
7348 |
retaining_context_(false) { |
|
220 |
22044 |
session_ = inspector->connect(CONTEXT_GROUP_ID, |
|
221 |
this, |
||
222 |
StringView(), |
||
223 |
14696 |
V8Inspector::ClientTrustLevel::kFullyTrusted); |
|
224 |
7348 |
node_dispatcher_ = std::make_unique<protocol::UberDispatcher>(this); |
|
225 |
tracing_agent_ = |
||
226 |
7348 |
std::make_unique<protocol::TracingAgent>(env, main_thread_); |
|
227 |
7348 |
tracing_agent_->Wire(node_dispatcher_.get()); |
|
228 |
✓✓ | 7348 |
if (worker_manager) { |
229 |
6602 |
worker_agent_ = std::make_unique<protocol::WorkerAgent>(worker_manager); |
|
230 |
6602 |
worker_agent_->Wire(node_dispatcher_.get()); |
|
231 |
} |
||
232 |
7348 |
runtime_agent_ = std::make_unique<protocol::RuntimeAgent>(); |
|
233 |
7348 |
runtime_agent_->Wire(node_dispatcher_.get()); |
|
234 |
7348 |
} |
|
235 |
|||
236 |
26624 |
~ChannelImpl() override { |
|
237 |
13312 |
tracing_agent_->disable(); |
|
238 |
13312 |
tracing_agent_.reset(); // Dispose before the dispatchers |
|
239 |
✓✓ | 13312 |
if (worker_agent_) { |
240 |
11820 |
worker_agent_->disable(); |
|
241 |
11820 |
worker_agent_.reset(); // Dispose before the dispatchers |
|
242 |
} |
||
243 |
13312 |
runtime_agent_->disable(); |
|
244 |
13312 |
runtime_agent_.reset(); // Dispose before the dispatchers |
|
245 |
26624 |
} |
|
246 |
|||
247 |
20569 |
void dispatchProtocolMessage(const StringView& message) { |
|
248 |
41138 |
std::string raw_message = protocol::StringUtil::StringViewToUtf8(message); |
|
249 |
std::unique_ptr<protocol::DictionaryValue> value = |
||
250 |
20569 |
protocol::DictionaryValue::cast(protocol::StringUtil::parseMessage( |
|
251 |
41138 |
raw_message, false)); |
|
252 |
int call_id; |
||
253 |
41138 |
std::string method; |
|
254 |
20569 |
node_dispatcher_->parseCommand(value.get(), &call_id, &method); |
|
255 |
20569 |
if (v8_inspector::V8InspectorSession::canDispatchMethod( |
|
256 |
✓✓ | 41138 |
Utf8ToStringView(method)->string())) { |
257 |
20530 |
session_->dispatchProtocolMessage(message); |
|
258 |
} else { |
||
259 |
39 |
node_dispatcher_->dispatch(call_id, method, std::move(value), |
|
260 |
raw_message); |
||
261 |
} |
||
262 |
20569 |
} |
|
263 |
|||
264 |
38 |
void schedulePauseOnNextStatement(const std::string& reason) { |
|
265 |
76 |
std::unique_ptr<StringBuffer> buffer = Utf8ToStringView(reason); |
|
266 |
38 |
session_->schedulePauseOnNextStatement(buffer->string(), buffer->string()); |
|
267 |
38 |
} |
|
268 |
|||
269 |
6679 |
bool preventShutdown() { |
|
270 |
6679 |
return prevent_shutdown_; |
|
271 |
} |
||
272 |
|||
273 |
6420 |
bool notifyWaitingForDisconnect() { |
|
274 |
6420 |
retaining_context_ = runtime_agent_->notifyWaitingForDisconnect(); |
|
275 |
6420 |
return retaining_context_; |
|
276 |
} |
||
277 |
|||
278 |
936 |
bool retainingContext() { |
|
279 |
936 |
return retaining_context_; |
|
280 |
} |
||
281 |
|||
282 |
private: |
||
283 |
20530 |
void sendResponse( |
|
284 |
int callId, |
||
285 |
std::unique_ptr<v8_inspector::StringBuffer> message) override { |
||
286 |
20530 |
sendMessageToFrontend(message->string()); |
|
287 |
20530 |
} |
|
288 |
|||
289 |
8046 |
void sendNotification( |
|
290 |
std::unique_ptr<v8_inspector::StringBuffer> message) override { |
||
291 |
8046 |
sendMessageToFrontend(message->string()); |
|
292 |
8046 |
} |
|
293 |
|||
294 |
1349 |
void flushProtocolNotifications() override { } |
|
295 |
|||
296 |
29260 |
void sendMessageToFrontend(const StringView& message) { |
|
297 |
29260 |
delegate_->SendMessageToFrontend(message); |
|
298 |
29260 |
} |
|
299 |
|||
300 |
684 |
void sendMessageToFrontend(const std::string& message) { |
|
301 |
684 |
sendMessageToFrontend(Utf8ToStringView(message)->string()); |
|
302 |
684 |
} |
|
303 |
|||
304 |
using Serializable = protocol::Serializable; |
||
305 |
|||
306 |
39 |
void sendProtocolResponse(int callId, |
|
307 |
std::unique_ptr<Serializable> message) override { |
||
308 |
39 |
sendMessageToFrontend(message->serializeToJSON()); |
|
309 |
39 |
} |
|
310 |
645 |
void sendProtocolNotification( |
|
311 |
std::unique_ptr<Serializable> message) override { |
||
312 |
645 |
sendMessageToFrontend(message->serializeToJSON()); |
|
313 |
645 |
} |
|
314 |
|||
315 |
void fallThrough(int callId, |
||
316 |
const std::string& method, |
||
317 |
const std::string& message) override { |
||
318 |
DCHECK(false); |
||
319 |
} |
||
320 |
|||
321 |
std::unique_ptr<protocol::RuntimeAgent> runtime_agent_; |
||
322 |
std::unique_ptr<protocol::TracingAgent> tracing_agent_; |
||
323 |
std::unique_ptr<protocol::WorkerAgent> worker_agent_; |
||
324 |
std::unique_ptr<InspectorSessionDelegate> delegate_; |
||
325 |
std::unique_ptr<v8_inspector::V8InspectorSession> session_; |
||
326 |
std::unique_ptr<protocol::UberDispatcher> node_dispatcher_; |
||
327 |
bool prevent_shutdown_; |
||
328 |
bool retaining_context_; |
||
329 |
}; |
||
330 |
|||
331 |
class SameThreadInspectorSession : public InspectorSession { |
||
332 |
public: |
||
333 |
7348 |
SameThreadInspectorSession( |
|
334 |
int session_id, std::shared_ptr<NodeInspectorClient> client) |
||
335 |
7348 |
: session_id_(session_id), client_(client) {} |
|
336 |
~SameThreadInspectorSession() override; |
||
337 |
void Dispatch(const v8_inspector::StringView& message) override; |
||
338 |
|||
339 |
private: |
||
340 |
int session_id_; |
||
341 |
std::weak_ptr<NodeInspectorClient> client_; |
||
342 |
}; |
||
343 |
|||
344 |
112 |
void NotifyClusterWorkersDebugEnabled(Environment* env) { |
|
345 |
112 |
Isolate* isolate = env->isolate(); |
|
346 |
112 |
HandleScope handle_scope(isolate); |
|
347 |
112 |
Local<Context> context = env->context(); |
|
348 |
|||
349 |
// Send message to enable debug in cluster workers |
||
350 |
112 |
Local<Object> message = Object::New(isolate); |
|
351 |
112 |
message->Set(context, FIXED_ONE_BYTE_STRING(isolate, "cmd"), |
|
352 |
448 |
FIXED_ONE_BYTE_STRING(isolate, "NODE_DEBUG_ENABLED")).Check(); |
|
353 |
112 |
ProcessEmit(env, "internalMessage", message); |
|
354 |
112 |
} |
|
355 |
|||
356 |
#ifdef _WIN32 |
||
357 |
bool IsFilePath(const std::string& path) { |
||
358 |
// '\\' |
||
359 |
if (path.length() > 2 && path[0] == '\\' && path[1] == '\\') |
||
360 |
return true; |
||
361 |
// '[A-Z]:[/\\]' |
||
362 |
if (path.length() < 3) |
||
363 |
return false; |
||
364 |
if ((path[0] >= 'A' && path[0] <= 'Z') || (path[0] >= 'a' && path[0] <= 'z')) |
||
365 |
return path[1] == ':' && (path[2] == '/' || path[2] == '\\'); |
||
366 |
return false; |
||
367 |
} |
||
368 |
#else |
||
369 |
746341 |
bool IsFilePath(const std::string& path) { |
|
370 |
✓✓✓✓ |
746341 |
return !path.empty() && path[0] == '/'; |
371 |
} |
||
372 |
#endif // __POSIX__ |
||
373 |
|||
374 |
void ThrowUninitializedInspectorError(Environment* env) { |
||
375 |
HandleScope scope(env->isolate()); |
||
376 |
|||
377 |
const char* msg = "This Environment was initialized without a V8::Inspector"; |
||
378 |
Local<Value> exception = |
||
379 |
v8::String::NewFromUtf8(env->isolate(), msg).ToLocalChecked(); |
||
380 |
|||
381 |
env->isolate()->ThrowException(exception); |
||
382 |
} |
||
383 |
|||
384 |
} // namespace |
||
385 |
|||
386 |
class NodeInspectorClient : public V8InspectorClient { |
||
387 |
public: |
||
388 |
6379 |
explicit NodeInspectorClient(node::Environment* env, bool is_main) |
|
389 |
6379 |
: env_(env), is_main_(is_main) { |
|
390 |
6379 |
client_ = V8Inspector::create(env->isolate(), this); |
|
391 |
// TODO(bnoordhuis) Make name configurable from src/node.cc. |
||
392 |
std::string name = |
||
393 |
✓✓ | 12758 |
is_main_ ? GetHumanReadableProcessName() : GetWorkerLabel(env); |
394 |
12758 |
ContextInfo info(name); |
|
395 |
6379 |
info.is_default = true; |
|
396 |
6379 |
contextCreated(env->context(), info); |
|
397 |
6379 |
} |
|
398 |
|||
399 |
40 |
void runMessageLoopOnPause(int context_group_id) override { |
|
400 |
40 |
waiting_for_resume_ = true; |
|
401 |
40 |
runMessageLoop(); |
|
402 |
40 |
} |
|
403 |
|||
404 |
112 |
void waitForSessionsDisconnect() { |
|
405 |
112 |
waiting_for_sessions_disconnect_ = true; |
|
406 |
112 |
runMessageLoop(); |
|
407 |
112 |
} |
|
408 |
|||
409 |
20 |
void waitForFrontend() { |
|
410 |
20 |
waiting_for_frontend_ = true; |
|
411 |
20 |
runMessageLoop(); |
|
412 |
20 |
} |
|
413 |
|||
414 |
9 |
void maxAsyncCallStackDepthChanged(int depth) override { |
|
415 |
✓✓ | 9 |
if (waiting_for_sessions_disconnect_) { |
416 |
// V8 isolate is mostly done and is only letting Inspector protocol |
||
417 |
// clients gather data. |
||
418 |
4 |
return; |
|
419 |
} |
||
420 |
✓✗ | 5 |
if (auto agent = env_->inspector_agent()) { |
421 |
✓✓ | 5 |
if (depth == 0) { |
422 |
1 |
agent->DisableAsyncHook(); |
|
423 |
} else { |
||
424 |
4 |
agent->EnableAsyncHook(); |
|
425 |
} |
||
426 |
} |
||
427 |
} |
||
428 |
|||
429 |
7003 |
void contextCreated(Local<Context> context, const ContextInfo& info) { |
|
430 |
14006 |
auto name_buffer = Utf8ToStringView(info.name); |
|
431 |
14006 |
auto origin_buffer = Utf8ToStringView(info.origin); |
|
432 |
7003 |
std::unique_ptr<StringBuffer> aux_data_buffer; |
|
433 |
|||
434 |
v8_inspector::V8ContextInfo v8info( |
||
435 |
7003 |
context, CONTEXT_GROUP_ID, name_buffer->string()); |
|
436 |
7003 |
v8info.origin = origin_buffer->string(); |
|
437 |
|||
438 |
✓✓ | 7003 |
if (info.is_default) { |
439 |
6379 |
aux_data_buffer = Utf8ToStringView("{\"isDefault\":true}"); |
|
440 |
} else { |
||
441 |
624 |
aux_data_buffer = Utf8ToStringView("{\"isDefault\":false}"); |
|
442 |
} |
||
443 |
7003 |
v8info.auxData = aux_data_buffer->string(); |
|
444 |
|||
445 |
7003 |
client_->contextCreated(v8info); |
|
446 |
7003 |
} |
|
447 |
|||
448 |
6359 |
void contextDestroyed(Local<Context> context) { |
|
449 |
6359 |
client_->contextDestroyed(context); |
|
450 |
6359 |
} |
|
451 |
|||
452 |
28 |
void quitMessageLoopOnPause() override { |
|
453 |
28 |
waiting_for_resume_ = false; |
|
454 |
28 |
} |
|
455 |
|||
456 |
21 |
void runIfWaitingForDebugger(int context_group_id) override { |
|
457 |
21 |
waiting_for_frontend_ = false; |
|
458 |
21 |
} |
|
459 |
|||
460 |
7348 |
int connectFrontend(std::unique_ptr<InspectorSessionDelegate> delegate, |
|
461 |
bool prevent_shutdown) { |
||
462 |
7348 |
int session_id = next_session_id_++; |
|
463 |
14696 |
channels_[session_id] = std::make_unique<ChannelImpl>(env_, |
|
464 |
7348 |
client_, |
|
465 |
14696 |
getWorkerManager(), |
|
466 |
7348 |
std::move(delegate), |
|
467 |
14696 |
getThreadHandle(), |
|
468 |
7348 |
prevent_shutdown); |
|
469 |
7348 |
return session_id; |
|
470 |
} |
||
471 |
|||
472 |
933 |
void disconnectFrontend(int session_id) { |
|
473 |
933 |
auto it = channels_.find(session_id); |
|
474 |
✗✓ | 933 |
if (it == channels_.end()) |
475 |
return; |
||
476 |
933 |
bool retaining_context = it->second->retainingContext(); |
|
477 |
933 |
channels_.erase(it); |
|
478 |
✓✓ | 933 |
if (retaining_context) { |
479 |
✓✓ | 6 |
for (const auto& id_channel : channels_) { |
480 |
✗✓ | 3 |
if (id_channel.second->retainingContext()) |
481 |
return; |
||
482 |
} |
||
483 |
3 |
contextDestroyed(env_->context()); |
|
484 |
} |
||
485 |
✓✓✓✓ |
933 |
if (waiting_for_sessions_disconnect_ && !is_main_) |
486 |
2 |
waiting_for_sessions_disconnect_ = false; |
|
487 |
} |
||
488 |
|||
489 |
20569 |
void dispatchMessageFromFrontend(int session_id, const StringView& message) { |
|
490 |
20569 |
channels_[session_id]->dispatchProtocolMessage(message); |
|
491 |
20569 |
} |
|
492 |
|||
493 |
310 |
Local<Context> ensureDefaultContextInGroup(int contextGroupId) override { |
|
494 |
310 |
return env_->context(); |
|
495 |
} |
||
496 |
|||
497 |
3 |
void installAdditionalCommandLineAPI(Local<Context> context, |
|
498 |
Local<Object> target) override { |
||
499 |
3 |
Local<Function> installer = env_->inspector_console_extension_installer(); |
|
500 |
✓✗ | 3 |
if (!installer.IsEmpty()) { |
501 |
3 |
Local<Value> argv[] = {target}; |
|
502 |
// If there is an exception, proceed in JS land |
||
503 |
3 |
USE(installer->Call(context, target, arraysize(argv), argv)); |
|
504 |
} |
||
505 |
3 |
} |
|
506 |
|||
507 |
2 |
void ReportUncaughtException(Local<Value> error, Local<Message> message) { |
|
508 |
2 |
Isolate* isolate = env_->isolate(); |
|
509 |
2 |
Local<Context> context = env_->context(); |
|
510 |
|||
511 |
4 |
int script_id = message->GetScriptOrigin().ScriptId(); |
|
512 |
|||
513 |
2 |
Local<v8::StackTrace> stack_trace = message->GetStackTrace(); |
|
514 |
|||
515 |
✓✓✓✗ ✓✗ |
4 |
if (!stack_trace.IsEmpty() && stack_trace->GetFrameCount() > 0 && |
516 |
✓✓ | 4 |
script_id == stack_trace->GetFrame(isolate, 0)->GetScriptId()) { |
517 |
1 |
script_id = 0; |
|
518 |
} |
||
519 |
|||
520 |
2 |
const uint8_t DETAILS[] = "Uncaught"; |
|
521 |
|||
522 |
4 |
client_->exceptionThrown( |
|
523 |
context, |
||
524 |
StringView(DETAILS, sizeof(DETAILS) - 1), |
||
525 |
error, |
||
526 |
6 |
ToProtocolString(isolate, message->Get())->string(), |
|
527 |
4 |
ToProtocolString(isolate, message->GetScriptResourceName())->string(), |
|
528 |
✓✗ | 4 |
message->GetLineNumber(context).FromMaybe(0), |
529 |
✓✗ | 4 |
message->GetStartColumn(context).FromMaybe(0), |
530 |
4 |
client_->createStackTrace(stack_trace), |
|
531 |
2 |
script_id); |
|
532 |
2 |
} |
|
533 |
|||
534 |
2 |
void startRepeatingTimer(double interval_s, |
|
535 |
TimerCallback callback, |
||
536 |
void* data) override { |
||
537 |
auto result = |
||
538 |
2 |
timers_.emplace(std::piecewise_construct, std::make_tuple(data), |
|
539 |
8 |
std::make_tuple(env_, [=]() { callback(data); })); |
|
540 |
✗✓ | 2 |
CHECK(result.second); |
541 |
2 |
uint64_t interval = static_cast<uint64_t>(1000 * interval_s); |
|
542 |
2 |
result.first->second.Update(interval, interval); |
|
543 |
2 |
} |
|
544 |
|||
545 |
2 |
void cancelTimer(void* data) override { |
|
546 |
2 |
timers_.erase(data); |
|
547 |
2 |
} |
|
548 |
|||
549 |
// Async stack traces instrumentation. |
||
550 |
3 |
void AsyncTaskScheduled(const StringView& task_name, void* task, |
|
551 |
bool recurring) { |
||
552 |
3 |
client_->asyncTaskScheduled(task_name, task, recurring); |
|
553 |
3 |
} |
|
554 |
|||
555 |
4 |
void AsyncTaskCanceled(void* task) { |
|
556 |
4 |
client_->asyncTaskCanceled(task); |
|
557 |
4 |
} |
|
558 |
|||
559 |
6 |
void AsyncTaskStarted(void* task) { |
|
560 |
6 |
client_->asyncTaskStarted(task); |
|
561 |
6 |
} |
|
562 |
|||
563 |
10 |
void AsyncTaskFinished(void* task) { |
|
564 |
10 |
client_->asyncTaskFinished(task); |
|
565 |
10 |
} |
|
566 |
|||
567 |
void AllAsyncTasksCanceled() { |
||
568 |
client_->allAsyncTasksCanceled(); |
||
569 |
} |
||
570 |
|||
571 |
19 |
void schedulePauseOnNextStatement(const std::string& reason) { |
|
572 |
✓✓ | 57 |
for (const auto& id_channel : channels_) { |
573 |
38 |
id_channel.second->schedulePauseOnNextStatement(reason); |
|
574 |
} |
||
575 |
19 |
} |
|
576 |
|||
577 |
6551 |
bool hasConnectedSessions() { |
|
578 |
✓✓ | 13135 |
for (const auto& id_channel : channels_) { |
579 |
// Other sessions are "invisible" more most purposes |
||
580 |
✓✓ | 6679 |
if (id_channel.second->preventShutdown()) |
581 |
95 |
return true; |
|
582 |
} |
||
583 |
6456 |
return false; |
|
584 |
} |
||
585 |
|||
586 |
6359 |
bool notifyWaitingForDisconnect() { |
|
587 |
6359 |
bool retaining_context = false; |
|
588 |
✓✓ | 12779 |
for (const auto& id_channel : channels_) { |
589 |
✓✓ | 6420 |
if (id_channel.second->notifyWaitingForDisconnect()) |
590 |
3 |
retaining_context = true; |
|
591 |
} |
||
592 |
6359 |
return retaining_context; |
|
593 |
} |
||
594 |
|||
595 |
13837 |
std::shared_ptr<MainThreadHandle> getThreadHandle() { |
|
596 |
✓✓ | 13837 |
if (!interface_) { |
597 |
6376 |
interface_ = std::make_shared<MainThreadInterface>( |
|
598 |
12752 |
env_->inspector_agent()); |
|
599 |
} |
||
600 |
13837 |
return interface_->GetHandle(); |
|
601 |
} |
||
602 |
|||
603 |
9988 |
std::shared_ptr<WorkerManager> getWorkerManager() { |
|
604 |
✓✓ | 9988 |
if (!is_main_) { |
605 |
746 |
return nullptr; |
|
606 |
} |
||
607 |
✓✓ | 9242 |
if (worker_manager_ == nullptr) { |
608 |
worker_manager_ = |
||
609 |
5640 |
std::make_shared<WorkerManager>(getThreadHandle()); |
|
610 |
} |
||
611 |
9242 |
return worker_manager_; |
|
612 |
} |
||
613 |
|||
614 |
78575 |
bool IsActive() { |
|
615 |
78575 |
return !channels_.empty(); |
|
616 |
} |
||
617 |
|||
618 |
private: |
||
619 |
292 |
bool shouldRunMessageLoop() { |
|
620 |
✓✓ | 292 |
if (waiting_for_frontend_) |
621 |
50 |
return true; |
|
622 |
✓✓✓✓ |
242 |
if (waiting_for_sessions_disconnect_ || waiting_for_resume_) { |
623 |
192 |
return hasConnectedSessions(); |
|
624 |
} |
||
625 |
50 |
return false; |
|
626 |
} |
||
627 |
|||
628 |
172 |
void runMessageLoop() { |
|
629 |
✗✓ | 172 |
if (running_nested_loop_) |
630 |
return; |
||
631 |
|||
632 |
172 |
running_nested_loop_ = true; |
|
633 |
|||
634 |
✓✓ | 292 |
while (shouldRunMessageLoop()) { |
635 |
✓✗ | 120 |
if (interface_) interface_->WaitForFrontendEvent(); |
636 |
120 |
env_->RunAndClearInterrupts(); |
|
637 |
} |
||
638 |
172 |
running_nested_loop_ = false; |
|
639 |
} |
||
640 |
|||
641 |
73374 |
double currentTimeMS() override { |
|
642 |
73374 |
return env_->isolate_data()->platform()->CurrentClockTimeMillis(); |
|
643 |
} |
||
644 |
|||
645 |
746341 |
std::unique_ptr<StringBuffer> resourceNameToUrl( |
|
646 |
const StringView& resource_name_view) override { |
||
647 |
std::string resource_name = |
||
648 |
1492682 |
protocol::StringUtil::StringViewToUtf8(resource_name_view); |
|
649 |
✓✓ | 746341 |
if (!IsFilePath(resource_name)) |
650 |
708581 |
return nullptr; |
|
651 |
37760 |
node::url::URL url = node::url::URL::FromFilePath(resource_name); |
|
652 |
37760 |
return Utf8ToStringView(url.href()); |
|
653 |
} |
||
654 |
|||
655 |
node::Environment* env_; |
||
656 |
bool is_main_; |
||
657 |
bool running_nested_loop_ = false; |
||
658 |
std::unique_ptr<V8Inspector> client_; |
||
659 |
// Note: ~ChannelImpl may access timers_ so timers_ has to come first. |
||
660 |
std::unordered_map<void*, TimerWrapHandle> timers_; |
||
661 |
std::unordered_map<int, std::unique_ptr<ChannelImpl>> channels_; |
||
662 |
int next_session_id_ = 1; |
||
663 |
bool waiting_for_resume_ = false; |
||
664 |
bool waiting_for_frontend_ = false; |
||
665 |
bool waiting_for_sessions_disconnect_ = false; |
||
666 |
// Allows accessing Inspector from non-main threads |
||
667 |
std::shared_ptr<MainThreadInterface> interface_; |
||
668 |
std::shared_ptr<WorkerManager> worker_manager_; |
||
669 |
}; |
||
670 |
|||
671 |
6385 |
Agent::Agent(Environment* env) |
|
672 |
: parent_env_(env), |
||
673 |
6385 |
debug_options_(env->options()->debug_options()), |
|
674 |
12770 |
host_port_(env->inspector_host_port()) {} |
|
675 |
|||
676 |
5695 |
Agent::~Agent() {} |
|
677 |
|||
678 |
6379 |
bool Agent::Start(const std::string& path, |
|
679 |
const DebugOptions& options, |
||
680 |
std::shared_ptr<ExclusiveAccess<HostPort>> host_port, |
||
681 |
bool is_main) { |
||
682 |
6379 |
path_ = path; |
|
683 |
6379 |
debug_options_ = options; |
|
684 |
✗✓ | 6379 |
CHECK_NOT_NULL(host_port); |
685 |
6379 |
host_port_ = host_port; |
|
686 |
|||
687 |
6379 |
client_ = std::make_shared<NodeInspectorClient>(parent_env_, is_main); |
|
688 |
✓✓ | 6379 |
if (parent_env_->owns_inspector()) { |
689 |
5645 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
690 |
✗✓ | 5645 |
CHECK_EQ(start_io_thread_async_initialized.exchange(true), false); |
691 |
✗✓ | 5645 |
CHECK_EQ(0, uv_async_init(parent_env_->event_loop(), |
692 |
&start_io_thread_async, |
||
693 |
StartIoThreadAsyncCallback)); |
||
694 |
5645 |
uv_unref(reinterpret_cast<uv_handle_t*>(&start_io_thread_async)); |
|
695 |
5645 |
start_io_thread_async.data = this; |
|
696 |
// Ignore failure, SIGUSR1 won't work, but that should not block node start. |
||
697 |
5645 |
StartDebugSignalHandler(); |
|
698 |
|||
699 |
5645 |
parent_env_->AddCleanupHook([](void* data) { |
|
700 |
4955 |
Environment* env = static_cast<Environment*>(data); |
|
701 |
|||
702 |
{ |
||
703 |
4955 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
704 |
4955 |
start_io_thread_async.data = nullptr; |
|
705 |
} |
||
706 |
|||
707 |
// This is global, will never get freed |
||
708 |
4955 |
env->CloseHandle(&start_io_thread_async, [](uv_async_t*) { |
|
709 |
✗✓ | 4955 |
CHECK(start_io_thread_async_initialized.exchange(false)); |
710 |
4955 |
}); |
|
711 |
5645 |
}, parent_env_); |
|
712 |
} |
||
713 |
|||
714 |
6379 |
AtExit(parent_env_, [](void* env) { |
|
715 |
6365 |
Agent* agent = static_cast<Environment*>(env)->inspector_agent(); |
|
716 |
✓✓ | 6365 |
if (agent->IsActive()) { |
717 |
6357 |
agent->WaitForDisconnect(); |
|
718 |
} |
||
719 |
6379 |
}, parent_env_); |
|
720 |
|||
721 |
6379 |
bool wait_for_connect = options.wait_for_connect(); |
|
722 |
✓✓ | 6379 |
if (parent_handle_) { |
723 |
732 |
wait_for_connect = parent_handle_->WaitForConnect(); |
|
724 |
732 |
parent_handle_->WorkerStarted(client_->getThreadHandle(), wait_for_connect); |
|
725 |
✓✓✓✗ ✓✓ |
5760 |
} else if (!options.inspector_enabled || !options.allow_attaching_debugger || |
726 |
✓✓ | 113 |
!StartIoThread()) { |
727 |
5537 |
return false; |
|
728 |
} |
||
729 |
|||
730 |
// Patch the debug options to implement waitForDebuggerOnStart for |
||
731 |
// the NodeWorker.enable method. |
||
732 |
✓✓ | 842 |
if (wait_for_connect) { |
733 |
✗✓ | 20 |
CHECK(!parent_env_->has_serialized_options()); |
734 |
20 |
debug_options_.EnableBreakFirstLine(); |
|
735 |
20 |
parent_env_->options()->get_debug_options()->EnableBreakFirstLine(); |
|
736 |
20 |
client_->waitForFrontend(); |
|
737 |
} |
||
738 |
842 |
return true; |
|
739 |
} |
||
740 |
|||
741 |
118 |
bool Agent::StartIoThread() { |
|
742 |
✓✓ | 118 |
if (io_ != nullptr) |
743 |
3 |
return true; |
|
744 |
|||
745 |
✓✓✗✓ ✗✓ |
115 |
if (!parent_env_->should_create_inspector() && !client_) { |
746 |
ThrowUninitializedInspectorError(parent_env_); |
||
747 |
return false; |
||
748 |
} |
||
749 |
|||
750 |
✗✓ | 115 |
CHECK_NOT_NULL(client_); |
751 |
|||
752 |
230 |
io_ = InspectorIo::Start(client_->getThreadHandle(), |
|
753 |
115 |
path_, |
|
754 |
115 |
host_port_, |
|
755 |
230 |
debug_options_.inspect_publish_uid); |
|
756 |
✓✓ | 115 |
if (io_ == nullptr) { |
757 |
3 |
return false; |
|
758 |
} |
||
759 |
112 |
NotifyClusterWorkersDebugEnabled(parent_env_); |
|
760 |
112 |
return true; |
|
761 |
} |
||
762 |
|||
763 |
4 |
void Agent::Stop() { |
|
764 |
4 |
io_.reset(); |
|
765 |
4 |
} |
|
766 |
|||
767 |
7348 |
std::unique_ptr<InspectorSession> Agent::Connect( |
|
768 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
769 |
bool prevent_shutdown) { |
||
770 |
✓✓✗✓ ✗✓ |
7348 |
if (!parent_env_->should_create_inspector() && !client_) { |
771 |
ThrowUninitializedInspectorError(parent_env_); |
||
772 |
return std::unique_ptr<InspectorSession>{}; |
||
773 |
} |
||
774 |
|||
775 |
✗✓ | 7348 |
CHECK_NOT_NULL(client_); |
776 |
|||
777 |
7348 |
int session_id = client_->connectFrontend(std::move(delegate), |
|
778 |
prevent_shutdown); |
||
779 |
return std::unique_ptr<InspectorSession>( |
||
780 |
7348 |
new SameThreadInspectorSession(session_id, client_)); |
|
781 |
} |
||
782 |
|||
783 |
2 |
std::unique_ptr<InspectorSession> Agent::ConnectToMainThread( |
|
784 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
785 |
bool prevent_shutdown) { |
||
786 |
✗✓✗✗ ✗✓ |
2 |
if (!parent_env_->should_create_inspector() && !client_) { |
787 |
ThrowUninitializedInspectorError(parent_env_); |
||
788 |
return std::unique_ptr<InspectorSession>{}; |
||
789 |
} |
||
790 |
|||
791 |
✗✓ | 2 |
CHECK_NOT_NULL(parent_handle_); |
792 |
✗✓ | 2 |
CHECK_NOT_NULL(client_); |
793 |
auto thread_safe_delegate = |
||
794 |
4 |
client_->getThreadHandle()->MakeDelegateThreadSafe(std::move(delegate)); |
|
795 |
2 |
return parent_handle_->Connect(std::move(thread_safe_delegate), |
|
796 |
2 |
prevent_shutdown); |
|
797 |
} |
||
798 |
|||
799 |
6359 |
void Agent::WaitForDisconnect() { |
|
800 |
✓✓✗✓ ✗✓ |
6359 |
if (!parent_env_->should_create_inspector() && !client_) { |
801 |
ThrowUninitializedInspectorError(parent_env_); |
||
802 |
return; |
||
803 |
} |
||
804 |
|||
805 |
✗✓ | 6359 |
CHECK_NOT_NULL(client_); |
806 |
6359 |
bool is_worker = parent_handle_ != nullptr; |
|
807 |
6359 |
parent_handle_.reset(); |
|
808 |
✓✓✓✓ ✓✓ |
6359 |
if (client_->hasConnectedSessions() && !is_worker) { |
809 |
13 |
fprintf(stderr, "Waiting for the debugger to disconnect...\n"); |
|
810 |
13 |
fflush(stderr); |
|
811 |
} |
||
812 |
✓✓ | 6359 |
if (!client_->notifyWaitingForDisconnect()) { |
813 |
6356 |
client_->contextDestroyed(parent_env_->context()); |
|
814 |
✓✓ | 3 |
} else if (is_worker) { |
815 |
2 |
client_->waitForSessionsDisconnect(); |
|
816 |
} |
||
817 |
✓✓ | 6359 |
if (io_ != nullptr) { |
818 |
110 |
io_->StopAcceptingNewConnections(); |
|
819 |
110 |
client_->waitForSessionsDisconnect(); |
|
820 |
} |
||
821 |
} |
||
822 |
|||
823 |
299 |
void Agent::ReportUncaughtException(Local<Value> error, |
|
824 |
Local<Message> message) { |
||
825 |
✓✓ | 299 |
if (!IsListening()) |
826 |
297 |
return; |
|
827 |
2 |
client_->ReportUncaughtException(error, message); |
|
828 |
2 |
WaitForDisconnect(); |
|
829 |
} |
||
830 |
|||
831 |
19 |
void Agent::PauseOnNextJavascriptStatement(const std::string& reason) { |
|
832 |
19 |
client_->schedulePauseOnNextStatement(reason); |
|
833 |
19 |
} |
|
834 |
|||
835 |
6348 |
void Agent::RegisterAsyncHook(Isolate* isolate, |
|
836 |
Local<Function> enable_function, |
||
837 |
Local<Function> disable_function) { |
||
838 |
6348 |
parent_env_->set_inspector_enable_async_hooks(enable_function); |
|
839 |
6348 |
parent_env_->set_inspector_disable_async_hooks(disable_function); |
|
840 |
✓✓ | 6348 |
if (pending_enable_async_hook_) { |
841 |
✗✓ | 2 |
CHECK(!pending_disable_async_hook_); |
842 |
2 |
pending_enable_async_hook_ = false; |
|
843 |
2 |
EnableAsyncHook(); |
|
844 |
✗✓ | 6346 |
} else if (pending_disable_async_hook_) { |
845 |
CHECK(!pending_enable_async_hook_); |
||
846 |
pending_disable_async_hook_ = false; |
||
847 |
DisableAsyncHook(); |
||
848 |
} |
||
849 |
6348 |
} |
|
850 |
|||
851 |
6 |
void Agent::EnableAsyncHook() { |
|
852 |
12 |
HandleScope scope(parent_env_->isolate()); |
|
853 |
6 |
Local<Function> enable = parent_env_->inspector_enable_async_hooks(); |
|
854 |
✓✓ | 6 |
if (!enable.IsEmpty()) { |
855 |
4 |
ToggleAsyncHook(parent_env_->isolate(), enable); |
|
856 |
✗✓ | 2 |
} else if (pending_disable_async_hook_) { |
857 |
CHECK(!pending_enable_async_hook_); |
||
858 |
pending_disable_async_hook_ = false; |
||
859 |
} else { |
||
860 |
2 |
pending_enable_async_hook_ = true; |
|
861 |
} |
||
862 |
6 |
} |
|
863 |
|||
864 |
1 |
void Agent::DisableAsyncHook() { |
|
865 |
2 |
HandleScope scope(parent_env_->isolate()); |
|
866 |
1 |
Local<Function> disable = parent_env_->inspector_enable_async_hooks(); |
|
867 |
✓✗ | 1 |
if (!disable.IsEmpty()) { |
868 |
1 |
ToggleAsyncHook(parent_env_->isolate(), disable); |
|
869 |
} else if (pending_enable_async_hook_) { |
||
870 |
CHECK(!pending_disable_async_hook_); |
||
871 |
pending_enable_async_hook_ = false; |
||
872 |
} else { |
||
873 |
pending_disable_async_hook_ = true; |
||
874 |
} |
||
875 |
1 |
} |
|
876 |
|||
877 |
5 |
void Agent::ToggleAsyncHook(Isolate* isolate, Local<Function> fn) { |
|
878 |
// Guard against running this during cleanup -- no async events will be |
||
879 |
// emitted anyway at that point anymore, and calling into JS is not possible. |
||
880 |
// This should probably not be something we're attempting in the first place, |
||
881 |
// Refs: https://github.com/nodejs/node/pull/34362#discussion_r456006039 |
||
882 |
✗✓ | 5 |
if (!parent_env_->can_call_into_js()) return; |
883 |
✗✓ | 5 |
CHECK(parent_env_->has_run_bootstrapping_code()); |
884 |
10 |
HandleScope handle_scope(isolate); |
|
885 |
✗✓ | 5 |
CHECK(!fn.IsEmpty()); |
886 |
5 |
auto context = parent_env_->context(); |
|
887 |
10 |
v8::TryCatch try_catch(isolate); |
|
888 |
10 |
USE(fn->Call(context, Undefined(isolate), 0, nullptr)); |
|
889 |
✗✓✗✗ ✗✓ |
5 |
if (try_catch.HasCaught() && !try_catch.HasTerminated()) { |
890 |
PrintCaughtException(isolate, context, try_catch); |
||
891 |
FatalError("\nnode::inspector::Agent::ToggleAsyncHook", |
||
892 |
"Cannot toggle Inspector's AsyncHook, please report this."); |
||
893 |
} |
||
894 |
} |
||
895 |
|||
896 |
3 |
void Agent::AsyncTaskScheduled(const StringView& task_name, void* task, |
|
897 |
bool recurring) { |
||
898 |
3 |
client_->AsyncTaskScheduled(task_name, task, recurring); |
|
899 |
3 |
} |
|
900 |
|||
901 |
4 |
void Agent::AsyncTaskCanceled(void* task) { |
|
902 |
4 |
client_->AsyncTaskCanceled(task); |
|
903 |
4 |
} |
|
904 |
|||
905 |
6 |
void Agent::AsyncTaskStarted(void* task) { |
|
906 |
6 |
client_->AsyncTaskStarted(task); |
|
907 |
6 |
} |
|
908 |
|||
909 |
10 |
void Agent::AsyncTaskFinished(void* task) { |
|
910 |
10 |
client_->AsyncTaskFinished(task); |
|
911 |
10 |
} |
|
912 |
|||
913 |
void Agent::AllAsyncTasksCanceled() { |
||
914 |
client_->AllAsyncTasksCanceled(); |
||
915 |
} |
||
916 |
|||
917 |
2 |
void Agent::RequestIoThreadStart() { |
|
918 |
// We need to attempt to interrupt V8 flow (in case Node is running |
||
919 |
// continuous JS code) and to wake up libuv thread (in case Node is waiting |
||
920 |
// for IO events) |
||
921 |
✗✓ | 2 |
if (!options().allow_attaching_debugger) { |
922 |
return; |
||
923 |
} |
||
924 |
✗✓ | 2 |
CHECK(start_io_thread_async_initialized); |
925 |
2 |
uv_async_send(&start_io_thread_async); |
|
926 |
2 |
parent_env_->RequestInterrupt([this](Environment*) { |
|
927 |
2 |
StartIoThread(); |
|
928 |
2 |
}); |
|
929 |
|||
930 |
✗✓ | 2 |
CHECK(start_io_thread_async_initialized); |
931 |
2 |
uv_async_send(&start_io_thread_async); |
|
932 |
} |
||
933 |
|||
934 |
7009 |
void Agent::ContextCreated(Local<Context> context, const ContextInfo& info) { |
|
935 |
✓✓ | 7009 |
if (client_ == nullptr) // This happens for a main context |
936 |
6385 |
return; |
|
937 |
624 |
client_->contextCreated(context, info); |
|
938 |
} |
||
939 |
|||
940 |
78721 |
bool Agent::IsActive() { |
|
941 |
✗✓ | 78721 |
if (client_ == nullptr) |
942 |
return false; |
||
943 |
✓✓✓✓ |
78721 |
return io_ != nullptr || client_->IsActive(); |
944 |
} |
||
945 |
|||
946 |
732 |
void Agent::SetParentHandle( |
|
947 |
std::unique_ptr<ParentInspectorHandle> parent_handle) { |
||
948 |
732 |
parent_handle_ = std::move(parent_handle); |
|
949 |
732 |
} |
|
950 |
|||
951 |
970 |
std::unique_ptr<ParentInspectorHandle> Agent::GetParentHandle( |
|
952 |
uint64_t thread_id, const std::string& url) { |
||
953 |
✗✓✗✗ ✗✓ |
970 |
if (!parent_env_->should_create_inspector() && !client_) { |
954 |
ThrowUninitializedInspectorError(parent_env_); |
||
955 |
return std::unique_ptr<ParentInspectorHandle>{}; |
||
956 |
} |
||
957 |
|||
958 |
✗✓ | 970 |
CHECK_NOT_NULL(client_); |
959 |
✓✓ | 970 |
if (!parent_handle_) { |
960 |
960 |
return client_->getWorkerManager()->NewParentHandle(thread_id, url); |
|
961 |
} else { |
||
962 |
10 |
return parent_handle_->NewParentInspectorHandle(thread_id, url); |
|
963 |
} |
||
964 |
} |
||
965 |
|||
966 |
void Agent::WaitForConnect() { |
||
967 |
if (!parent_env_->should_create_inspector() && !client_) { |
||
968 |
ThrowUninitializedInspectorError(parent_env_); |
||
969 |
return; |
||
970 |
} |
||
971 |
|||
972 |
CHECK_NOT_NULL(client_); |
||
973 |
client_->waitForFrontend(); |
||
974 |
} |
||
975 |
|||
976 |
1680 |
std::shared_ptr<WorkerManager> Agent::GetWorkerManager() { |
|
977 |
✗✓✗✗ ✗✓ |
1680 |
if (!parent_env_->should_create_inspector() && !client_) { |
978 |
ThrowUninitializedInspectorError(parent_env_); |
||
979 |
return std::unique_ptr<WorkerManager>{}; |
||
980 |
} |
||
981 |
|||
982 |
✗✓ | 1680 |
CHECK_NOT_NULL(client_); |
983 |
1680 |
return client_->getWorkerManager(); |
|
984 |
} |
||
985 |
|||
986 |
6 |
std::string Agent::GetWsUrl() const { |
|
987 |
✓✓ | 6 |
if (io_ == nullptr) |
988 |
1 |
return ""; |
|
989 |
5 |
return io_->GetWsUrl(); |
|
990 |
} |
||
991 |
|||
992 |
26624 |
SameThreadInspectorSession::~SameThreadInspectorSession() { |
|
993 |
26624 |
auto client = client_.lock(); |
|
994 |
✓✓ | 13312 |
if (client) |
995 |
1866 |
client->disconnectFrontend(session_id_); |
|
996 |
26624 |
} |
|
997 |
|||
998 |
20569 |
void SameThreadInspectorSession::Dispatch( |
|
999 |
const v8_inspector::StringView& message) { |
||
1000 |
41138 |
auto client = client_.lock(); |
|
1001 |
✓✗ | 20569 |
if (client) |
1002 |
20569 |
client->dispatchMessageFromFrontend(session_id_, message); |
|
1003 |
20569 |
} |
|
1004 |
|||
1005 |
} // namespace inspector |
||
1006 |
} // namespace node |
Generated by: GCOVR (Version 4.2) |