GCC Code Coverage Report | |||||||||||||||||||||
|
|||||||||||||||||||||
Line | Branch | Exec | Source |
1 |
#include "inspector_agent.h" |
||
2 |
|||
3 |
#include "env-inl.h" |
||
4 |
#include "inspector/main_thread_interface.h" |
||
5 |
#include "inspector/node_string.h" |
||
6 |
#include "inspector/runtime_agent.h" |
||
7 |
#include "inspector/tracing_agent.h" |
||
8 |
#include "inspector/worker_agent.h" |
||
9 |
#include "inspector/worker_inspector.h" |
||
10 |
#include "inspector_io.h" |
||
11 |
#include "node/inspector/protocol/Protocol.h" |
||
12 |
#include "node_errors.h" |
||
13 |
#include "node_internals.h" |
||
14 |
#include "node_options-inl.h" |
||
15 |
#include "node_process-inl.h" |
||
16 |
#include "node_url.h" |
||
17 |
#include "util-inl.h" |
||
18 |
#include "timer_wrap-inl.h" |
||
19 |
#include "v8-inspector.h" |
||
20 |
#include "v8-platform.h" |
||
21 |
|||
22 |
#include "libplatform/libplatform.h" |
||
23 |
|||
24 |
#ifdef __POSIX__ |
||
25 |
#include <pthread.h> |
||
26 |
#include <climits> // PTHREAD_STACK_MIN |
||
27 |
#endif // __POSIX__ |
||
28 |
|||
29 |
#include <algorithm> |
||
30 |
#include <cstring> |
||
31 |
#include <sstream> |
||
32 |
#include <unordered_map> |
||
33 |
#include <vector> |
||
34 |
|||
35 |
namespace node { |
||
36 |
namespace inspector { |
||
37 |
namespace { |
||
38 |
|||
39 |
using node::FatalError; |
||
40 |
|||
41 |
using v8::Context; |
||
42 |
using v8::Function; |
||
43 |
using v8::HandleScope; |
||
44 |
using v8::Isolate; |
||
45 |
using v8::Local; |
||
46 |
using v8::Message; |
||
47 |
using v8::Object; |
||
48 |
using v8::Value; |
||
49 |
|||
50 |
using v8_inspector::StringBuffer; |
||
51 |
using v8_inspector::StringView; |
||
52 |
using v8_inspector::V8Inspector; |
||
53 |
using v8_inspector::V8InspectorClient; |
||
54 |
|||
55 |
static uv_sem_t start_io_thread_semaphore; |
||
56 |
static uv_async_t start_io_thread_async; |
||
57 |
// This is just an additional check to make sure start_io_thread_async |
||
58 |
// is not accidentally re-used or used when uninitialized. |
||
59 |
static std::atomic_bool start_io_thread_async_initialized { false }; |
||
60 |
// Protects the Agent* stored in start_io_thread_async.data. |
||
61 |
static Mutex start_io_thread_async_mutex; |
||
62 |
|||
63 |
4 |
std::unique_ptr<StringBuffer> ToProtocolString(Isolate* isolate, |
|
64 |
Local<Value> value) { |
||
65 |
4 |
TwoByteValue buffer(isolate, value); |
|
66 |
4 |
return StringBuffer::create(StringView(*buffer, buffer.length())); |
|
67 |
} |
||
68 |
|||
69 |
// Called on the main thread. |
||
70 |
1 |
void StartIoThreadAsyncCallback(uv_async_t* handle) { |
|
71 |
1 |
static_cast<Agent*>(handle->data)->StartIoThread(); |
|
72 |
1 |
} |
|
73 |
|||
74 |
|||
75 |
#ifdef __POSIX__ |
||
76 |
1 |
static void StartIoThreadWakeup(int signo, siginfo_t* info, void* ucontext) { |
|
77 |
1 |
uv_sem_post(&start_io_thread_semaphore); |
|
78 |
1 |
} |
|
79 |
|||
80 |
5362 |
inline void* StartIoThreadMain(void* unused) { |
|
81 |
for (;;) { |
||
82 |
5362 |
uv_sem_wait(&start_io_thread_semaphore); |
|
83 |
2 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
84 |
|||
85 |
✗✓ | 1 |
CHECK(start_io_thread_async_initialized); |
86 |
1 |
Agent* agent = static_cast<Agent*>(start_io_thread_async.data); |
|
87 |
✓✗ | 1 |
if (agent != nullptr) |
88 |
1 |
agent->RequestIoThreadStart(); |
|
89 |
1 |
} |
|
90 |
} |
||
91 |
|||
92 |
5361 |
static int StartDebugSignalHandler() { |
|
93 |
// Start a watchdog thread for calling v8::Debug::DebugBreak() because |
||
94 |
// it's not safe to call directly from the signal handler, it can |
||
95 |
// deadlock with the thread it interrupts. |
||
96 |
✗✓ | 5361 |
CHECK_EQ(0, uv_sem_init(&start_io_thread_semaphore, 0)); |
97 |
pthread_attr_t attr; |
||
98 |
✗✓ | 5361 |
CHECK_EQ(0, pthread_attr_init(&attr)); |
99 |
#if defined(PTHREAD_STACK_MIN) && !defined(__FreeBSD__) |
||
100 |
// PTHREAD_STACK_MIN is 2 KiB with musl libc, which is too small to safely |
||
101 |
// receive signals. PTHREAD_STACK_MIN + MINSIGSTKSZ is 8 KiB on arm64, which |
||
102 |
// is the musl architecture with the biggest MINSIGSTKSZ so let's use that |
||
103 |
// as a lower bound and let's quadruple it just in case. The goal is to avoid |
||
104 |
// creating a big 2 or 4 MiB address space gap (problematic on 32 bits |
||
105 |
// because of fragmentation), not squeeze out every last byte. |
||
106 |
// Omitted on FreeBSD because it doesn't seem to like small stacks. |
||
107 |
5361 |
const size_t stack_size = std::max(static_cast<size_t>(4 * 8192), |
|
108 |
static_cast<size_t>(PTHREAD_STACK_MIN)); |
||
109 |
✗✓ | 5361 |
CHECK_EQ(0, pthread_attr_setstacksize(&attr, stack_size)); |
110 |
#endif // defined(PTHREAD_STACK_MIN) && !defined(__FreeBSD__) |
||
111 |
✗✓ | 5361 |
CHECK_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED)); |
112 |
sigset_t sigmask; |
||
113 |
// Mask all signals. |
||
114 |
5361 |
sigfillset(&sigmask); |
|
115 |
sigset_t savemask; |
||
116 |
✗✓ | 5361 |
CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sigmask, &savemask)); |
117 |
5361 |
sigmask = savemask; |
|
118 |
pthread_t thread; |
||
119 |
5361 |
const int err = pthread_create(&thread, &attr, |
|
120 |
StartIoThreadMain, nullptr); |
||
121 |
// Restore original mask |
||
122 |
✗✓ | 5361 |
CHECK_EQ(0, pthread_sigmask(SIG_SETMASK, &sigmask, nullptr)); |
123 |
✗✓ | 5361 |
CHECK_EQ(0, pthread_attr_destroy(&attr)); |
124 |
✗✓ | 5361 |
if (err != 0) { |
125 |
fprintf(stderr, "node[%u]: pthread_create: %s\n", |
||
126 |
uv_os_getpid(), strerror(err)); |
||
127 |
fflush(stderr); |
||
128 |
// Leave SIGUSR1 blocked. We don't install a signal handler, |
||
129 |
// receiving the signal would terminate the process. |
||
130 |
return -err; |
||
131 |
} |
||
132 |
5361 |
RegisterSignalHandler(SIGUSR1, StartIoThreadWakeup); |
|
133 |
// Unblock SIGUSR1. A pending SIGUSR1 signal will now be delivered. |
||
134 |
5361 |
sigemptyset(&sigmask); |
|
135 |
5361 |
sigaddset(&sigmask, SIGUSR1); |
|
136 |
✗✓ | 5361 |
CHECK_EQ(0, pthread_sigmask(SIG_UNBLOCK, &sigmask, nullptr)); |
137 |
5361 |
return 0; |
|
138 |
} |
||
139 |
#endif // __POSIX__ |
||
140 |
|||
141 |
|||
142 |
#ifdef _WIN32 |
||
143 |
DWORD WINAPI StartIoThreadProc(void* arg) { |
||
144 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
||
145 |
CHECK(start_io_thread_async_initialized); |
||
146 |
Agent* agent = static_cast<Agent*>(start_io_thread_async.data); |
||
147 |
if (agent != nullptr) |
||
148 |
agent->RequestIoThreadStart(); |
||
149 |
return 0; |
||
150 |
} |
||
151 |
|||
152 |
static int GetDebugSignalHandlerMappingName(DWORD pid, wchar_t* buf, |
||
153 |
size_t buf_len) { |
||
154 |
return _snwprintf(buf, buf_len, L"node-debug-handler-%u", pid); |
||
155 |
} |
||
156 |
|||
157 |
static int StartDebugSignalHandler() { |
||
158 |
wchar_t mapping_name[32]; |
||
159 |
HANDLE mapping_handle; |
||
160 |
DWORD pid; |
||
161 |
LPTHREAD_START_ROUTINE* handler; |
||
162 |
|||
163 |
pid = uv_os_getpid(); |
||
164 |
|||
165 |
if (GetDebugSignalHandlerMappingName(pid, |
||
166 |
mapping_name, |
||
167 |
arraysize(mapping_name)) < 0) { |
||
168 |
return -1; |
||
169 |
} |
||
170 |
|||
171 |
mapping_handle = CreateFileMappingW(INVALID_HANDLE_VALUE, |
||
172 |
nullptr, |
||
173 |
PAGE_READWRITE, |
||
174 |
0, |
||
175 |
sizeof *handler, |
||
176 |
mapping_name); |
||
177 |
if (mapping_handle == nullptr) { |
||
178 |
return -1; |
||
179 |
} |
||
180 |
|||
181 |
handler = reinterpret_cast<LPTHREAD_START_ROUTINE*>( |
||
182 |
MapViewOfFile(mapping_handle, |
||
183 |
FILE_MAP_ALL_ACCESS, |
||
184 |
0, |
||
185 |
0, |
||
186 |
sizeof *handler)); |
||
187 |
if (handler == nullptr) { |
||
188 |
CloseHandle(mapping_handle); |
||
189 |
return -1; |
||
190 |
} |
||
191 |
|||
192 |
*handler = StartIoThreadProc; |
||
193 |
|||
194 |
UnmapViewOfFile(static_cast<void*>(handler)); |
||
195 |
|||
196 |
return 0; |
||
197 |
} |
||
198 |
#endif // _WIN32 |
||
199 |
|||
200 |
|||
201 |
const int CONTEXT_GROUP_ID = 1; |
||
202 |
|||
203 |
725 |
std::string GetWorkerLabel(node::Environment* env) { |
|
204 |
1450 |
std::ostringstream result; |
|
205 |
725 |
result << "Worker[" << env->thread_id() << "]"; |
|
206 |
725 |
return result.str(); |
|
207 |
} |
||
208 |
|||
209 |
class ChannelImpl final : public v8_inspector::V8Inspector::Channel, |
||
210 |
public protocol::FrontendChannel { |
||
211 |
public: |
||
212 |
7057 |
explicit ChannelImpl(Environment* env, |
|
213 |
const std::unique_ptr<V8Inspector>& inspector, |
||
214 |
std::shared_ptr<WorkerManager> worker_manager, |
||
215 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
216 |
std::shared_ptr<MainThreadHandle> main_thread_, |
||
217 |
bool prevent_shutdown) |
||
218 |
14114 |
: delegate_(std::move(delegate)), prevent_shutdown_(prevent_shutdown), |
|
219 |
7057 |
retaining_context_(false) { |
|
220 |
7057 |
session_ = inspector->connect(CONTEXT_GROUP_ID, this, StringView()); |
|
221 |
7057 |
node_dispatcher_ = std::make_unique<protocol::UberDispatcher>(this); |
|
222 |
tracing_agent_ = |
||
223 |
7057 |
std::make_unique<protocol::TracingAgent>(env, main_thread_); |
|
224 |
7057 |
tracing_agent_->Wire(node_dispatcher_.get()); |
|
225 |
✓✓ | 7057 |
if (worker_manager) { |
226 |
6317 |
worker_agent_ = std::make_unique<protocol::WorkerAgent>(worker_manager); |
|
227 |
6317 |
worker_agent_->Wire(node_dispatcher_.get()); |
|
228 |
} |
||
229 |
7057 |
runtime_agent_ = std::make_unique<protocol::RuntimeAgent>(); |
|
230 |
7057 |
runtime_agent_->Wire(node_dispatcher_.get()); |
|
231 |
7057 |
} |
|
232 |
|||
233 |
26092 |
~ChannelImpl() override { |
|
234 |
13046 |
tracing_agent_->disable(); |
|
235 |
13046 |
tracing_agent_.reset(); // Dispose before the dispatchers |
|
236 |
✓✓ | 13046 |
if (worker_agent_) { |
237 |
11566 |
worker_agent_->disable(); |
|
238 |
11566 |
worker_agent_.reset(); // Dispose before the dispatchers |
|
239 |
} |
||
240 |
13046 |
runtime_agent_->disable(); |
|
241 |
13046 |
runtime_agent_.reset(); // Dispose before the dispatchers |
|
242 |
26092 |
} |
|
243 |
|||
244 |
19699 |
void dispatchProtocolMessage(const StringView& message) { |
|
245 |
39398 |
std::string raw_message = protocol::StringUtil::StringViewToUtf8(message); |
|
246 |
std::unique_ptr<protocol::DictionaryValue> value = |
||
247 |
19699 |
protocol::DictionaryValue::cast(protocol::StringUtil::parseMessage( |
|
248 |
39398 |
raw_message, false)); |
|
249 |
int call_id; |
||
250 |
39398 |
std::string method; |
|
251 |
19699 |
node_dispatcher_->parseCommand(value.get(), &call_id, &method); |
|
252 |
19699 |
if (v8_inspector::V8InspectorSession::canDispatchMethod( |
|
253 |
✓✓ | 39398 |
Utf8ToStringView(method)->string())) { |
254 |
19660 |
session_->dispatchProtocolMessage(message); |
|
255 |
} else { |
||
256 |
39 |
node_dispatcher_->dispatch(call_id, method, std::move(value), |
|
257 |
raw_message); |
||
258 |
} |
||
259 |
19699 |
} |
|
260 |
|||
261 |
38 |
void schedulePauseOnNextStatement(const std::string& reason) { |
|
262 |
76 |
std::unique_ptr<StringBuffer> buffer = Utf8ToStringView(reason); |
|
263 |
38 |
session_->schedulePauseOnNextStatement(buffer->string(), buffer->string()); |
|
264 |
38 |
} |
|
265 |
|||
266 |
6357 |
bool preventShutdown() { |
|
267 |
6357 |
return prevent_shutdown_; |
|
268 |
} |
||
269 |
|||
270 |
6134 |
bool notifyWaitingForDisconnect() { |
|
271 |
6134 |
retaining_context_ = runtime_agent_->notifyWaitingForDisconnect(); |
|
272 |
6134 |
return retaining_context_; |
|
273 |
} |
||
274 |
|||
275 |
933 |
bool retainingContext() { |
|
276 |
933 |
return retaining_context_; |
|
277 |
} |
||
278 |
|||
279 |
private: |
||
280 |
19660 |
void sendResponse( |
|
281 |
int callId, |
||
282 |
std::unique_ptr<v8_inspector::StringBuffer> message) override { |
||
283 |
19660 |
sendMessageToFrontend(message->string()); |
|
284 |
19660 |
} |
|
285 |
|||
286 |
7789 |
void sendNotification( |
|
287 |
std::unique_ptr<v8_inspector::StringBuffer> message) override { |
||
288 |
7789 |
sendMessageToFrontend(message->string()); |
|
289 |
7789 |
} |
|
290 |
|||
291 |
1344 |
void flushProtocolNotifications() override { } |
|
292 |
|||
293 |
28105 |
void sendMessageToFrontend(const StringView& message) { |
|
294 |
28105 |
delegate_->SendMessageToFrontend(message); |
|
295 |
28105 |
} |
|
296 |
|||
297 |
656 |
void sendMessageToFrontend(const std::string& message) { |
|
298 |
656 |
sendMessageToFrontend(Utf8ToStringView(message)->string()); |
|
299 |
656 |
} |
|
300 |
|||
301 |
using Serializable = protocol::Serializable; |
||
302 |
|||
303 |
39 |
void sendProtocolResponse(int callId, |
|
304 |
std::unique_ptr<Serializable> message) override { |
||
305 |
39 |
sendMessageToFrontend(message->serializeToJSON()); |
|
306 |
39 |
} |
|
307 |
617 |
void sendProtocolNotification( |
|
308 |
std::unique_ptr<Serializable> message) override { |
||
309 |
617 |
sendMessageToFrontend(message->serializeToJSON()); |
|
310 |
617 |
} |
|
311 |
|||
312 |
void fallThrough(int callId, |
||
313 |
const std::string& method, |
||
314 |
const std::string& message) override { |
||
315 |
DCHECK(false); |
||
316 |
} |
||
317 |
|||
318 |
std::unique_ptr<protocol::RuntimeAgent> runtime_agent_; |
||
319 |
std::unique_ptr<protocol::TracingAgent> tracing_agent_; |
||
320 |
std::unique_ptr<protocol::WorkerAgent> worker_agent_; |
||
321 |
std::unique_ptr<InspectorSessionDelegate> delegate_; |
||
322 |
std::unique_ptr<v8_inspector::V8InspectorSession> session_; |
||
323 |
std::unique_ptr<protocol::UberDispatcher> node_dispatcher_; |
||
324 |
bool prevent_shutdown_; |
||
325 |
bool retaining_context_; |
||
326 |
}; |
||
327 |
|||
328 |
class SameThreadInspectorSession : public InspectorSession { |
||
329 |
public: |
||
330 |
7057 |
SameThreadInspectorSession( |
|
331 |
int session_id, std::shared_ptr<NodeInspectorClient> client) |
||
332 |
7057 |
: session_id_(session_id), client_(client) {} |
|
333 |
~SameThreadInspectorSession() override; |
||
334 |
void Dispatch(const v8_inspector::StringView& message) override; |
||
335 |
|||
336 |
private: |
||
337 |
int session_id_; |
||
338 |
std::weak_ptr<NodeInspectorClient> client_; |
||
339 |
}; |
||
340 |
|||
341 |
77 |
void NotifyClusterWorkersDebugEnabled(Environment* env) { |
|
342 |
77 |
Isolate* isolate = env->isolate(); |
|
343 |
77 |
HandleScope handle_scope(isolate); |
|
344 |
77 |
Local<Context> context = env->context(); |
|
345 |
|||
346 |
// Send message to enable debug in cluster workers |
||
347 |
77 |
Local<Object> message = Object::New(isolate); |
|
348 |
77 |
message->Set(context, FIXED_ONE_BYTE_STRING(isolate, "cmd"), |
|
349 |
308 |
FIXED_ONE_BYTE_STRING(isolate, "NODE_DEBUG_ENABLED")).Check(); |
|
350 |
77 |
ProcessEmit(env, "internalMessage", message); |
|
351 |
77 |
} |
|
352 |
|||
353 |
#ifdef _WIN32 |
||
354 |
bool IsFilePath(const std::string& path) { |
||
355 |
// '\\' |
||
356 |
if (path.length() > 2 && path[0] == '\\' && path[1] == '\\') |
||
357 |
return true; |
||
358 |
// '[A-Z]:[/\\]' |
||
359 |
if (path.length() < 3) |
||
360 |
return false; |
||
361 |
if ((path[0] >= 'A' && path[0] <= 'Z') || (path[0] >= 'a' && path[0] <= 'z')) |
||
362 |
return path[1] == ':' && (path[2] == '/' || path[2] == '\\'); |
||
363 |
return false; |
||
364 |
} |
||
365 |
#else |
||
366 |
664726 |
bool IsFilePath(const std::string& path) { |
|
367 |
✓✓✓✓ |
664726 |
return !path.empty() && path[0] == '/'; |
368 |
} |
||
369 |
#endif // __POSIX__ |
||
370 |
|||
371 |
void ThrowUninitializedInspectorError(Environment* env) { |
||
372 |
HandleScope scope(env->isolate()); |
||
373 |
|||
374 |
const char* msg = "This Environment was initialized without a V8::Inspector"; |
||
375 |
Local<Value> exception = |
||
376 |
v8::String::NewFromUtf8(env->isolate(), msg).ToLocalChecked(); |
||
377 |
|||
378 |
env->isolate()->ThrowException(exception); |
||
379 |
} |
||
380 |
|||
381 |
} // namespace |
||
382 |
|||
383 |
class NodeInspectorClient : public V8InspectorClient { |
||
384 |
public: |
||
385 |
6088 |
explicit NodeInspectorClient(node::Environment* env, bool is_main) |
|
386 |
6088 |
: env_(env), is_main_(is_main) { |
|
387 |
6088 |
client_ = V8Inspector::create(env->isolate(), this); |
|
388 |
// TODO(bnoordhuis) Make name configurable from src/node.cc. |
||
389 |
std::string name = |
||
390 |
✓✓ | 12176 |
is_main_ ? GetHumanReadableProcessName() : GetWorkerLabel(env); |
391 |
12176 |
ContextInfo info(name); |
|
392 |
6088 |
info.is_default = true; |
|
393 |
6088 |
contextCreated(env->context(), info); |
|
394 |
6088 |
} |
|
395 |
|||
396 |
40 |
void runMessageLoopOnPause(int context_group_id) override { |
|
397 |
40 |
waiting_for_resume_ = true; |
|
398 |
40 |
runMessageLoop(); |
|
399 |
40 |
} |
|
400 |
|||
401 |
77 |
void waitForSessionsDisconnect() { |
|
402 |
77 |
waiting_for_sessions_disconnect_ = true; |
|
403 |
77 |
runMessageLoop(); |
|
404 |
77 |
} |
|
405 |
|||
406 |
19 |
void waitForFrontend() { |
|
407 |
19 |
waiting_for_frontend_ = true; |
|
408 |
19 |
runMessageLoop(); |
|
409 |
19 |
} |
|
410 |
|||
411 |
9 |
void maxAsyncCallStackDepthChanged(int depth) override { |
|
412 |
✓✓ | 9 |
if (waiting_for_sessions_disconnect_) { |
413 |
// V8 isolate is mostly done and is only letting Inspector protocol |
||
414 |
// clients gather data. |
||
415 |
4 |
return; |
|
416 |
} |
||
417 |
✓✗ | 5 |
if (auto agent = env_->inspector_agent()) { |
418 |
✓✓ | 5 |
if (depth == 0) { |
419 |
1 |
agent->DisableAsyncHook(); |
|
420 |
} else { |
||
421 |
4 |
agent->EnableAsyncHook(); |
|
422 |
} |
||
423 |
} |
||
424 |
} |
||
425 |
|||
426 |
6698 |
void contextCreated(Local<Context> context, const ContextInfo& info) { |
|
427 |
13396 |
auto name_buffer = Utf8ToStringView(info.name); |
|
428 |
13396 |
auto origin_buffer = Utf8ToStringView(info.origin); |
|
429 |
6698 |
std::unique_ptr<StringBuffer> aux_data_buffer; |
|
430 |
|||
431 |
v8_inspector::V8ContextInfo v8info( |
||
432 |
6698 |
context, CONTEXT_GROUP_ID, name_buffer->string()); |
|
433 |
6698 |
v8info.origin = origin_buffer->string(); |
|
434 |
|||
435 |
✓✓ | 6698 |
if (info.is_default) { |
436 |
6088 |
aux_data_buffer = Utf8ToStringView("{\"isDefault\":true}"); |
|
437 |
} else { |
||
438 |
610 |
aux_data_buffer = Utf8ToStringView("{\"isDefault\":false}"); |
|
439 |
} |
||
440 |
6698 |
v8info.auxData = aux_data_buffer->string(); |
|
441 |
|||
442 |
6698 |
client_->contextCreated(v8info); |
|
443 |
6698 |
} |
|
444 |
|||
445 |
6073 |
void contextDestroyed(Local<Context> context) { |
|
446 |
6073 |
client_->contextDestroyed(context); |
|
447 |
6073 |
} |
|
448 |
|||
449 |
28 |
void quitMessageLoopOnPause() override { |
|
450 |
28 |
waiting_for_resume_ = false; |
|
451 |
28 |
} |
|
452 |
|||
453 |
20 |
void runIfWaitingForDebugger(int context_group_id) override { |
|
454 |
20 |
waiting_for_frontend_ = false; |
|
455 |
20 |
} |
|
456 |
|||
457 |
7057 |
int connectFrontend(std::unique_ptr<InspectorSessionDelegate> delegate, |
|
458 |
bool prevent_shutdown) { |
||
459 |
7057 |
int session_id = next_session_id_++; |
|
460 |
14114 |
channels_[session_id] = std::make_unique<ChannelImpl>(env_, |
|
461 |
7057 |
client_, |
|
462 |
14114 |
getWorkerManager(), |
|
463 |
7057 |
std::move(delegate), |
|
464 |
14114 |
getThreadHandle(), |
|
465 |
7057 |
prevent_shutdown); |
|
466 |
7057 |
return session_id; |
|
467 |
} |
||
468 |
|||
469 |
930 |
void disconnectFrontend(int session_id) { |
|
470 |
930 |
auto it = channels_.find(session_id); |
|
471 |
✗✓ | 930 |
if (it == channels_.end()) |
472 |
return; |
||
473 |
930 |
bool retaining_context = it->second->retainingContext(); |
|
474 |
930 |
channels_.erase(it); |
|
475 |
✓✓ | 930 |
if (retaining_context) { |
476 |
✓✓ | 6 |
for (const auto& id_channel : channels_) { |
477 |
✗✓ | 3 |
if (id_channel.second->retainingContext()) |
478 |
return; |
||
479 |
} |
||
480 |
3 |
contextDestroyed(env_->context()); |
|
481 |
} |
||
482 |
✓✓✓✓ |
930 |
if (waiting_for_sessions_disconnect_ && !is_main_) |
483 |
2 |
waiting_for_sessions_disconnect_ = false; |
|
484 |
} |
||
485 |
|||
486 |
19699 |
void dispatchMessageFromFrontend(int session_id, const StringView& message) { |
|
487 |
19699 |
channels_[session_id]->dispatchProtocolMessage(message); |
|
488 |
19699 |
} |
|
489 |
|||
490 |
307 |
Local<Context> ensureDefaultContextInGroup(int contextGroupId) override { |
|
491 |
307 |
return env_->context(); |
|
492 |
} |
||
493 |
|||
494 |
3 |
void installAdditionalCommandLineAPI(Local<Context> context, |
|
495 |
Local<Object> target) override { |
||
496 |
3 |
Local<Function> installer = env_->inspector_console_extension_installer(); |
|
497 |
✓✗ | 3 |
if (!installer.IsEmpty()) { |
498 |
3 |
Local<Value> argv[] = {target}; |
|
499 |
// If there is an exception, proceed in JS land |
||
500 |
3 |
USE(installer->Call(context, target, arraysize(argv), argv)); |
|
501 |
} |
||
502 |
3 |
} |
|
503 |
|||
504 |
2 |
void ReportUncaughtException(Local<Value> error, Local<Message> message) { |
|
505 |
2 |
Isolate* isolate = env_->isolate(); |
|
506 |
2 |
Local<Context> context = env_->context(); |
|
507 |
|||
508 |
4 |
int script_id = message->GetScriptOrigin().ScriptId(); |
|
509 |
|||
510 |
2 |
Local<v8::StackTrace> stack_trace = message->GetStackTrace(); |
|
511 |
|||
512 |
✓✓✓✗ ✓✗ |
4 |
if (!stack_trace.IsEmpty() && stack_trace->GetFrameCount() > 0 && |
513 |
✓✓ | 4 |
script_id == stack_trace->GetFrame(isolate, 0)->GetScriptId()) { |
514 |
1 |
script_id = 0; |
|
515 |
} |
||
516 |
|||
517 |
2 |
const uint8_t DETAILS[] = "Uncaught"; |
|
518 |
|||
519 |
4 |
client_->exceptionThrown( |
|
520 |
context, |
||
521 |
StringView(DETAILS, sizeof(DETAILS) - 1), |
||
522 |
error, |
||
523 |
6 |
ToProtocolString(isolate, message->Get())->string(), |
|
524 |
4 |
ToProtocolString(isolate, message->GetScriptResourceName())->string(), |
|
525 |
✓✗ | 4 |
message->GetLineNumber(context).FromMaybe(0), |
526 |
✓✗ | 4 |
message->GetStartColumn(context).FromMaybe(0), |
527 |
4 |
client_->createStackTrace(stack_trace), |
|
528 |
2 |
script_id); |
|
529 |
2 |
} |
|
530 |
|||
531 |
2 |
void startRepeatingTimer(double interval_s, |
|
532 |
TimerCallback callback, |
||
533 |
void* data) override { |
||
534 |
auto result = |
||
535 |
2 |
timers_.emplace(std::piecewise_construct, std::make_tuple(data), |
|
536 |
8 |
std::make_tuple(env_, [=]() { callback(data); })); |
|
537 |
✗✓ | 2 |
CHECK(result.second); |
538 |
2 |
uint64_t interval = static_cast<uint64_t>(1000 * interval_s); |
|
539 |
2 |
result.first->second.Update(interval, interval); |
|
540 |
2 |
} |
|
541 |
|||
542 |
2 |
void cancelTimer(void* data) override { |
|
543 |
2 |
timers_.erase(data); |
|
544 |
2 |
} |
|
545 |
|||
546 |
// Async stack traces instrumentation. |
||
547 |
3 |
void AsyncTaskScheduled(const StringView& task_name, void* task, |
|
548 |
bool recurring) { |
||
549 |
3 |
client_->asyncTaskScheduled(task_name, task, recurring); |
|
550 |
3 |
} |
|
551 |
|||
552 |
4 |
void AsyncTaskCanceled(void* task) { |
|
553 |
4 |
client_->asyncTaskCanceled(task); |
|
554 |
4 |
} |
|
555 |
|||
556 |
7 |
void AsyncTaskStarted(void* task) { |
|
557 |
7 |
client_->asyncTaskStarted(task); |
|
558 |
7 |
} |
|
559 |
|||
560 |
11 |
void AsyncTaskFinished(void* task) { |
|
561 |
11 |
client_->asyncTaskFinished(task); |
|
562 |
11 |
} |
|
563 |
|||
564 |
void AllAsyncTasksCanceled() { |
||
565 |
client_->allAsyncTasksCanceled(); |
||
566 |
} |
||
567 |
|||
568 |
19 |
void schedulePauseOnNextStatement(const std::string& reason) { |
|
569 |
✓✓ | 57 |
for (const auto& id_channel : channels_) { |
570 |
38 |
id_channel.second->schedulePauseOnNextStatement(reason); |
|
571 |
} |
||
572 |
19 |
} |
|
573 |
|||
574 |
6229 |
bool hasConnectedSessions() { |
|
575 |
✓✓ | 12492 |
for (const auto& id_channel : channels_) { |
576 |
// Other sessions are "invisible" more most purposes |
||
577 |
✓✓ | 6357 |
if (id_channel.second->preventShutdown()) |
578 |
94 |
return true; |
|
579 |
} |
||
580 |
6135 |
return false; |
|
581 |
} |
||
582 |
|||
583 |
6073 |
bool notifyWaitingForDisconnect() { |
|
584 |
6073 |
bool retaining_context = false; |
|
585 |
✓✓ | 12207 |
for (const auto& id_channel : channels_) { |
586 |
✓✓ | 6134 |
if (id_channel.second->notifyWaitingForDisconnect()) |
587 |
3 |
retaining_context = true; |
|
588 |
} |
||
589 |
6073 |
return retaining_context; |
|
590 |
} |
||
591 |
|||
592 |
13220 |
std::shared_ptr<MainThreadHandle> getThreadHandle() { |
|
593 |
✓✓ | 13220 |
if (!interface_) { |
594 |
6085 |
interface_ = std::make_shared<MainThreadInterface>( |
|
595 |
12170 |
env_->inspector_agent()); |
|
596 |
} |
||
597 |
13220 |
return interface_->GetHandle(); |
|
598 |
} |
||
599 |
|||
600 |
9673 |
std::shared_ptr<WorkerManager> getWorkerManager() { |
|
601 |
✓✓ | 9673 |
if (!is_main_) { |
602 |
740 |
return nullptr; |
|
603 |
} |
||
604 |
✓✓ | 8933 |
if (worker_manager_ == nullptr) { |
605 |
worker_manager_ = |
||
606 |
5358 |
std::make_shared<WorkerManager>(getThreadHandle()); |
|
607 |
} |
||
608 |
8933 |
return worker_manager_; |
|
609 |
} |
||
610 |
|||
611 |
49740 |
bool IsActive() { |
|
612 |
49740 |
return !channels_.empty(); |
|
613 |
} |
||
614 |
|||
615 |
private: |
||
616 |
253 |
bool shouldRunMessageLoop() { |
|
617 |
✓✓ | 253 |
if (waiting_for_frontend_) |
618 |
48 |
return true; |
|
619 |
✓✓✓✓ |
205 |
if (waiting_for_sessions_disconnect_ || waiting_for_resume_) { |
620 |
156 |
return hasConnectedSessions(); |
|
621 |
} |
||
622 |
49 |
return false; |
|
623 |
} |
||
624 |
|||
625 |
136 |
void runMessageLoop() { |
|
626 |
✗✓ | 136 |
if (running_nested_loop_) |
627 |
return; |
||
628 |
|||
629 |
136 |
running_nested_loop_ = true; |
|
630 |
|||
631 |
✓✓ | 253 |
while (shouldRunMessageLoop()) { |
632 |
✓✗ | 117 |
if (interface_) interface_->WaitForFrontendEvent(); |
633 |
117 |
env_->RunAndClearInterrupts(); |
|
634 |
} |
||
635 |
136 |
running_nested_loop_ = false; |
|
636 |
} |
||
637 |
|||
638 |
44794 |
double currentTimeMS() override { |
|
639 |
44794 |
return env_->isolate_data()->platform()->CurrentClockTimeMillis(); |
|
640 |
} |
||
641 |
|||
642 |
664726 |
std::unique_ptr<StringBuffer> resourceNameToUrl( |
|
643 |
const StringView& resource_name_view) override { |
||
644 |
std::string resource_name = |
||
645 |
1329452 |
protocol::StringUtil::StringViewToUtf8(resource_name_view); |
|
646 |
✓✓ | 664726 |
if (!IsFilePath(resource_name)) |
647 |
629007 |
return nullptr; |
|
648 |
35719 |
node::url::URL url = node::url::URL::FromFilePath(resource_name); |
|
649 |
35719 |
return Utf8ToStringView(url.href()); |
|
650 |
} |
||
651 |
|||
652 |
node::Environment* env_; |
||
653 |
bool is_main_; |
||
654 |
bool running_nested_loop_ = false; |
||
655 |
std::unique_ptr<V8Inspector> client_; |
||
656 |
// Note: ~ChannelImpl may access timers_ so timers_ has to come first. |
||
657 |
std::unordered_map<void*, TimerWrapHandle> timers_; |
||
658 |
std::unordered_map<int, std::unique_ptr<ChannelImpl>> channels_; |
||
659 |
int next_session_id_ = 1; |
||
660 |
bool waiting_for_resume_ = false; |
||
661 |
bool waiting_for_frontend_ = false; |
||
662 |
bool waiting_for_sessions_disconnect_ = false; |
||
663 |
// Allows accessing Inspector from non-main threads |
||
664 |
std::shared_ptr<MainThreadInterface> interface_; |
||
665 |
std::shared_ptr<WorkerManager> worker_manager_; |
||
666 |
}; |
||
667 |
|||
668 |
6094 |
Agent::Agent(Environment* env) |
|
669 |
: parent_env_(env), |
||
670 |
6094 |
debug_options_(env->options()->debug_options()), |
|
671 |
12188 |
host_port_(env->inspector_host_port()) {} |
|
672 |
|||
673 |
5562 |
Agent::~Agent() {} |
|
674 |
|||
675 |
6088 |
bool Agent::Start(const std::string& path, |
|
676 |
const DebugOptions& options, |
||
677 |
std::shared_ptr<ExclusiveAccess<HostPort>> host_port, |
||
678 |
bool is_main) { |
||
679 |
6088 |
path_ = path; |
|
680 |
6088 |
debug_options_ = options; |
|
681 |
✗✓ | 6088 |
CHECK_NOT_NULL(host_port); |
682 |
6088 |
host_port_ = host_port; |
|
683 |
|||
684 |
6088 |
client_ = std::make_shared<NodeInspectorClient>(parent_env_, is_main); |
|
685 |
✓✓ | 6088 |
if (parent_env_->owns_inspector()) { |
686 |
5361 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
687 |
✗✓ | 5361 |
CHECK_EQ(start_io_thread_async_initialized.exchange(true), false); |
688 |
✗✓ | 5361 |
CHECK_EQ(0, uv_async_init(parent_env_->event_loop(), |
689 |
&start_io_thread_async, |
||
690 |
StartIoThreadAsyncCallback)); |
||
691 |
5361 |
uv_unref(reinterpret_cast<uv_handle_t*>(&start_io_thread_async)); |
|
692 |
5361 |
start_io_thread_async.data = this; |
|
693 |
// Ignore failure, SIGUSR1 won't work, but that should not block node start. |
||
694 |
5361 |
StartDebugSignalHandler(); |
|
695 |
|||
696 |
5361 |
parent_env_->AddCleanupHook([](void* data) { |
|
697 |
4829 |
Environment* env = static_cast<Environment*>(data); |
|
698 |
|||
699 |
{ |
||
700 |
4829 |
Mutex::ScopedLock lock(start_io_thread_async_mutex); |
|
701 |
4829 |
start_io_thread_async.data = nullptr; |
|
702 |
} |
||
703 |
|||
704 |
// This is global, will never get freed |
||
705 |
4829 |
env->CloseHandle(&start_io_thread_async, [](uv_async_t*) { |
|
706 |
✗✓ | 4829 |
CHECK(start_io_thread_async_initialized.exchange(false)); |
707 |
4829 |
}); |
|
708 |
5361 |
}, parent_env_); |
|
709 |
} |
||
710 |
|||
711 |
6088 |
AtExit(parent_env_, [](void* env) { |
|
712 |
6076 |
Agent* agent = static_cast<Environment*>(env)->inspector_agent(); |
|
713 |
✓✓ | 6076 |
if (agent->IsActive()) { |
714 |
6071 |
agent->WaitForDisconnect(); |
|
715 |
} |
||
716 |
6088 |
}, parent_env_); |
|
717 |
|||
718 |
6088 |
bool wait_for_connect = options.wait_for_connect(); |
|
719 |
✓✓ | 6088 |
if (parent_handle_) { |
720 |
725 |
wait_for_connect = parent_handle_->WaitForConnect(); |
|
721 |
725 |
parent_handle_->WorkerStarted(client_->getThreadHandle(), wait_for_connect); |
|
722 |
✓✓✓✓ ✓✓ |
5363 |
} else if (!options.inspector_enabled || !StartIoThread()) { |
723 |
5287 |
return false; |
|
724 |
} |
||
725 |
|||
726 |
// Patch the debug options to implement waitForDebuggerOnStart for |
||
727 |
// the NodeWorker.enable method. |
||
728 |
✓✓ | 801 |
if (wait_for_connect) { |
729 |
✗✓ | 19 |
CHECK(!parent_env_->has_serialized_options()); |
730 |
19 |
debug_options_.EnableBreakFirstLine(); |
|
731 |
19 |
parent_env_->options()->get_debug_options()->EnableBreakFirstLine(); |
|
732 |
19 |
client_->waitForFrontend(); |
|
733 |
} |
||
734 |
801 |
return true; |
|
735 |
} |
||
736 |
|||
737 |
79 |
bool Agent::StartIoThread() { |
|
738 |
✓✓ | 79 |
if (io_ != nullptr) |
739 |
1 |
return true; |
|
740 |
|||
741 |
✗✓✗✗ ✗✓ |
78 |
if (!parent_env_->should_create_inspector() && !client_) { |
742 |
ThrowUninitializedInspectorError(parent_env_); |
||
743 |
return false; |
||
744 |
} |
||
745 |
|||
746 |
✗✓ | 78 |
CHECK_NOT_NULL(client_); |
747 |
|||
748 |
156 |
io_ = InspectorIo::Start(client_->getThreadHandle(), |
|
749 |
78 |
path_, |
|
750 |
78 |
host_port_, |
|
751 |
156 |
debug_options_.inspect_publish_uid); |
|
752 |
✓✓ | 78 |
if (io_ == nullptr) { |
753 |
1 |
return false; |
|
754 |
} |
||
755 |
77 |
NotifyClusterWorkersDebugEnabled(parent_env_); |
|
756 |
77 |
return true; |
|
757 |
} |
||
758 |
|||
759 |
4 |
void Agent::Stop() { |
|
760 |
4 |
io_.reset(); |
|
761 |
4 |
} |
|
762 |
|||
763 |
7057 |
std::unique_ptr<InspectorSession> Agent::Connect( |
|
764 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
765 |
bool prevent_shutdown) { |
||
766 |
✗✓✗✗ ✗✓ |
7057 |
if (!parent_env_->should_create_inspector() && !client_) { |
767 |
ThrowUninitializedInspectorError(parent_env_); |
||
768 |
return std::unique_ptr<InspectorSession>{}; |
||
769 |
} |
||
770 |
|||
771 |
✗✓ | 7057 |
CHECK_NOT_NULL(client_); |
772 |
|||
773 |
7057 |
int session_id = client_->connectFrontend(std::move(delegate), |
|
774 |
prevent_shutdown); |
||
775 |
return std::unique_ptr<InspectorSession>( |
||
776 |
7057 |
new SameThreadInspectorSession(session_id, client_)); |
|
777 |
} |
||
778 |
|||
779 |
2 |
std::unique_ptr<InspectorSession> Agent::ConnectToMainThread( |
|
780 |
std::unique_ptr<InspectorSessionDelegate> delegate, |
||
781 |
bool prevent_shutdown) { |
||
782 |
✗✓✗✗ ✗✓ |
2 |
if (!parent_env_->should_create_inspector() && !client_) { |
783 |
ThrowUninitializedInspectorError(parent_env_); |
||
784 |
return std::unique_ptr<InspectorSession>{}; |
||
785 |
} |
||
786 |
|||
787 |
✗✓ | 2 |
CHECK_NOT_NULL(parent_handle_); |
788 |
✗✓ | 2 |
CHECK_NOT_NULL(client_); |
789 |
auto thread_safe_delegate = |
||
790 |
4 |
client_->getThreadHandle()->MakeDelegateThreadSafe(std::move(delegate)); |
|
791 |
2 |
return parent_handle_->Connect(std::move(thread_safe_delegate), |
|
792 |
2 |
prevent_shutdown); |
|
793 |
} |
||
794 |
|||
795 |
6073 |
void Agent::WaitForDisconnect() { |
|
796 |
✗✓✗✗ ✗✓ |
6073 |
if (!parent_env_->should_create_inspector() && !client_) { |
797 |
ThrowUninitializedInspectorError(parent_env_); |
||
798 |
return; |
||
799 |
} |
||
800 |
|||
801 |
✗✓ | 6073 |
CHECK_NOT_NULL(client_); |
802 |
6073 |
bool is_worker = parent_handle_ != nullptr; |
|
803 |
6073 |
parent_handle_.reset(); |
|
804 |
✓✓✓✓ ✓✓ |
6073 |
if (client_->hasConnectedSessions() && !is_worker) { |
805 |
13 |
fprintf(stderr, "Waiting for the debugger to disconnect...\n"); |
|
806 |
13 |
fflush(stderr); |
|
807 |
} |
||
808 |
✓✓ | 6073 |
if (!client_->notifyWaitingForDisconnect()) { |
809 |
6070 |
client_->contextDestroyed(parent_env_->context()); |
|
810 |
✓✓ | 3 |
} else if (is_worker) { |
811 |
2 |
client_->waitForSessionsDisconnect(); |
|
812 |
} |
||
813 |
✓✓ | 6073 |
if (io_ != nullptr) { |
814 |
75 |
io_->StopAcceptingNewConnections(); |
|
815 |
75 |
client_->waitForSessionsDisconnect(); |
|
816 |
} |
||
817 |
} |
||
818 |
|||
819 |
250 |
void Agent::ReportUncaughtException(Local<Value> error, |
|
820 |
Local<Message> message) { |
||
821 |
✓✓ | 250 |
if (!IsListening()) |
822 |
248 |
return; |
|
823 |
2 |
client_->ReportUncaughtException(error, message); |
|
824 |
2 |
WaitForDisconnect(); |
|
825 |
} |
||
826 |
|||
827 |
19 |
void Agent::PauseOnNextJavascriptStatement(const std::string& reason) { |
|
828 |
19 |
client_->schedulePauseOnNextStatement(reason); |
|
829 |
19 |
} |
|
830 |
|||
831 |
6057 |
void Agent::RegisterAsyncHook(Isolate* isolate, |
|
832 |
Local<Function> enable_function, |
||
833 |
Local<Function> disable_function) { |
||
834 |
6057 |
parent_env_->set_inspector_enable_async_hooks(enable_function); |
|
835 |
6057 |
parent_env_->set_inspector_disable_async_hooks(disable_function); |
|
836 |
✓✓ | 6057 |
if (pending_enable_async_hook_) { |
837 |
✗✓ | 2 |
CHECK(!pending_disable_async_hook_); |
838 |
2 |
pending_enable_async_hook_ = false; |
|
839 |
2 |
EnableAsyncHook(); |
|
840 |
✗✓ | 6055 |
} else if (pending_disable_async_hook_) { |
841 |
CHECK(!pending_enable_async_hook_); |
||
842 |
pending_disable_async_hook_ = false; |
||
843 |
DisableAsyncHook(); |
||
844 |
} |
||
845 |
6057 |
} |
|
846 |
|||
847 |
6 |
void Agent::EnableAsyncHook() { |
|
848 |
12 |
HandleScope scope(parent_env_->isolate()); |
|
849 |
6 |
Local<Function> enable = parent_env_->inspector_enable_async_hooks(); |
|
850 |
✓✓ | 6 |
if (!enable.IsEmpty()) { |
851 |
4 |
ToggleAsyncHook(parent_env_->isolate(), enable); |
|
852 |
✗✓ | 2 |
} else if (pending_disable_async_hook_) { |
853 |
CHECK(!pending_enable_async_hook_); |
||
854 |
pending_disable_async_hook_ = false; |
||
855 |
} else { |
||
856 |
2 |
pending_enable_async_hook_ = true; |
|
857 |
} |
||
858 |
6 |
} |
|
859 |
|||
860 |
1 |
void Agent::DisableAsyncHook() { |
|
861 |
2 |
HandleScope scope(parent_env_->isolate()); |
|
862 |
1 |
Local<Function> disable = parent_env_->inspector_enable_async_hooks(); |
|
863 |
✓✗ | 1 |
if (!disable.IsEmpty()) { |
864 |
1 |
ToggleAsyncHook(parent_env_->isolate(), disable); |
|
865 |
} else if (pending_enable_async_hook_) { |
||
866 |
CHECK(!pending_disable_async_hook_); |
||
867 |
pending_enable_async_hook_ = false; |
||
868 |
} else { |
||
869 |
pending_disable_async_hook_ = true; |
||
870 |
} |
||
871 |
1 |
} |
|
872 |
|||
873 |
5 |
void Agent::ToggleAsyncHook(Isolate* isolate, Local<Function> fn) { |
|
874 |
// Guard against running this during cleanup -- no async events will be |
||
875 |
// emitted anyway at that point anymore, and calling into JS is not possible. |
||
876 |
// This should probably not be something we're attempting in the first place, |
||
877 |
// Refs: https://github.com/nodejs/node/pull/34362#discussion_r456006039 |
||
878 |
✗✓ | 5 |
if (!parent_env_->can_call_into_js()) return; |
879 |
✗✓ | 5 |
CHECK(parent_env_->has_run_bootstrapping_code()); |
880 |
10 |
HandleScope handle_scope(isolate); |
|
881 |
✗✓ | 5 |
CHECK(!fn.IsEmpty()); |
882 |
5 |
auto context = parent_env_->context(); |
|
883 |
10 |
v8::TryCatch try_catch(isolate); |
|
884 |
10 |
USE(fn->Call(context, Undefined(isolate), 0, nullptr)); |
|
885 |
✗✓✗✗ ✗✓ |
5 |
if (try_catch.HasCaught() && !try_catch.HasTerminated()) { |
886 |
PrintCaughtException(isolate, context, try_catch); |
||
887 |
FatalError("\nnode::inspector::Agent::ToggleAsyncHook", |
||
888 |
"Cannot toggle Inspector's AsyncHook, please report this."); |
||
889 |
} |
||
890 |
} |
||
891 |
|||
892 |
3 |
void Agent::AsyncTaskScheduled(const StringView& task_name, void* task, |
|
893 |
bool recurring) { |
||
894 |
3 |
client_->AsyncTaskScheduled(task_name, task, recurring); |
|
895 |
3 |
} |
|
896 |
|||
897 |
4 |
void Agent::AsyncTaskCanceled(void* task) { |
|
898 |
4 |
client_->AsyncTaskCanceled(task); |
|
899 |
4 |
} |
|
900 |
|||
901 |
7 |
void Agent::AsyncTaskStarted(void* task) { |
|
902 |
7 |
client_->AsyncTaskStarted(task); |
|
903 |
7 |
} |
|
904 |
|||
905 |
11 |
void Agent::AsyncTaskFinished(void* task) { |
|
906 |
11 |
client_->AsyncTaskFinished(task); |
|
907 |
11 |
} |
|
908 |
|||
909 |
void Agent::AllAsyncTasksCanceled() { |
||
910 |
client_->AllAsyncTasksCanceled(); |
||
911 |
} |
||
912 |
|||
913 |
1 |
void Agent::RequestIoThreadStart() { |
|
914 |
// We need to attempt to interrupt V8 flow (in case Node is running |
||
915 |
// continuous JS code) and to wake up libuv thread (in case Node is waiting |
||
916 |
// for IO events) |
||
917 |
✗✓ | 1 |
CHECK(start_io_thread_async_initialized); |
918 |
1 |
uv_async_send(&start_io_thread_async); |
|
919 |
1 |
parent_env_->RequestInterrupt([this](Environment*) { |
|
920 |
1 |
StartIoThread(); |
|
921 |
1 |
}); |
|
922 |
|||
923 |
✗✓ | 1 |
CHECK(start_io_thread_async_initialized); |
924 |
1 |
uv_async_send(&start_io_thread_async); |
|
925 |
1 |
} |
|
926 |
|||
927 |
6704 |
void Agent::ContextCreated(Local<Context> context, const ContextInfo& info) { |
|
928 |
✓✓ | 6704 |
if (client_ == nullptr) // This happens for a main context |
929 |
6094 |
return; |
|
930 |
610 |
client_->contextCreated(context, info); |
|
931 |
} |
||
932 |
|||
933 |
49850 |
bool Agent::IsActive() { |
|
934 |
✗✓ | 49850 |
if (client_ == nullptr) |
935 |
return false; |
||
936 |
✓✓✓✓ |
49850 |
return io_ != nullptr || client_->IsActive(); |
937 |
} |
||
938 |
|||
939 |
725 |
void Agent::SetParentHandle( |
|
940 |
std::unique_ptr<ParentInspectorHandle> parent_handle) { |
||
941 |
725 |
parent_handle_ = std::move(parent_handle); |
|
942 |
725 |
} |
|
943 |
|||
944 |
962 |
std::unique_ptr<ParentInspectorHandle> Agent::GetParentHandle( |
|
945 |
uint64_t thread_id, const std::string& url) { |
||
946 |
✗✓✗✗ ✗✓ |
962 |
if (!parent_env_->should_create_inspector() && !client_) { |
947 |
ThrowUninitializedInspectorError(parent_env_); |
||
948 |
return std::unique_ptr<ParentInspectorHandle>{}; |
||
949 |
} |
||
950 |
|||
951 |
✗✓ | 962 |
CHECK_NOT_NULL(client_); |
952 |
✓✓ | 962 |
if (!parent_handle_) { |
953 |
952 |
return client_->getWorkerManager()->NewParentHandle(thread_id, url); |
|
954 |
} else { |
||
955 |
10 |
return parent_handle_->NewParentInspectorHandle(thread_id, url); |
|
956 |
} |
||
957 |
} |
||
958 |
|||
959 |
void Agent::WaitForConnect() { |
||
960 |
if (!parent_env_->should_create_inspector() && !client_) { |
||
961 |
ThrowUninitializedInspectorError(parent_env_); |
||
962 |
return; |
||
963 |
} |
||
964 |
|||
965 |
CHECK_NOT_NULL(client_); |
||
966 |
client_->waitForFrontend(); |
||
967 |
} |
||
968 |
|||
969 |
1664 |
std::shared_ptr<WorkerManager> Agent::GetWorkerManager() { |
|
970 |
✗✓✗✗ ✗✓ |
1664 |
if (!parent_env_->should_create_inspector() && !client_) { |
971 |
ThrowUninitializedInspectorError(parent_env_); |
||
972 |
return std::unique_ptr<WorkerManager>{}; |
||
973 |
} |
||
974 |
|||
975 |
✗✓ | 1664 |
CHECK_NOT_NULL(client_); |
976 |
1664 |
return client_->getWorkerManager(); |
|
977 |
} |
||
978 |
|||
979 |
6 |
std::string Agent::GetWsUrl() const { |
|
980 |
✓✓ | 6 |
if (io_ == nullptr) |
981 |
1 |
return ""; |
|
982 |
5 |
return io_->GetWsUrl(); |
|
983 |
} |
||
984 |
|||
985 |
26092 |
SameThreadInspectorSession::~SameThreadInspectorSession() { |
|
986 |
26092 |
auto client = client_.lock(); |
|
987 |
✓✓ | 13046 |
if (client) |
988 |
1860 |
client->disconnectFrontend(session_id_); |
|
989 |
26092 |
} |
|
990 |
|||
991 |
19699 |
void SameThreadInspectorSession::Dispatch( |
|
992 |
const v8_inspector::StringView& message) { |
||
993 |
39398 |
auto client = client_.lock(); |
|
994 |
✓✗ | 19699 |
if (client) |
995 |
19699 |
client->dispatchMessageFromFrontend(session_id_, message); |
|
996 |
19699 |
} |
|
997 |
|||
998 |
} // namespace inspector |
||
999 |
} // namespace node |
Generated by: GCOVR (Version 4.2) |