1 |
|
|
#include "node_platform.h" |
2 |
|
|
#include "node_internals.h" |
3 |
|
|
|
4 |
|
|
#include "env-inl.h" |
5 |
|
|
#include "debug_utils-inl.h" |
6 |
|
|
#include <algorithm> // find_if(), find(), move() |
7 |
|
|
#include <cmath> // llround() |
8 |
|
|
#include <memory> // unique_ptr(), shared_ptr(), make_shared() |
9 |
|
|
|
10 |
|
|
namespace node { |
11 |
|
|
|
12 |
|
|
using v8::Isolate; |
13 |
|
|
using v8::Object; |
14 |
|
|
using v8::Platform; |
15 |
|
|
using v8::Task; |
16 |
|
|
|
17 |
|
|
namespace { |
18 |
|
|
|
19 |
|
|
struct PlatformWorkerData { |
20 |
|
|
TaskQueue<Task>* task_queue; |
21 |
|
|
Mutex* platform_workers_mutex; |
22 |
|
|
ConditionVariable* platform_workers_ready; |
23 |
|
|
int* pending_platform_workers; |
24 |
|
|
int id; |
25 |
|
|
}; |
26 |
|
|
|
27 |
|
21490 |
static void PlatformWorkerThread(void* data) { |
28 |
|
|
std::unique_ptr<PlatformWorkerData> |
29 |
|
42952 |
worker_data(static_cast<PlatformWorkerData*>(data)); |
30 |
|
|
|
31 |
|
21490 |
TaskQueue<Task>* pending_worker_tasks = worker_data->task_queue; |
32 |
✓✓✓✓
|
27107 |
TRACE_EVENT_METADATA1("__metadata", "thread_name", "name", |
33 |
|
|
"PlatformWorkerThread"); |
34 |
|
|
|
35 |
|
|
// Notify the main thread that the platform worker is ready. |
36 |
|
|
{ |
37 |
|
42980 |
Mutex::ScopedLock lock(*worker_data->platform_workers_mutex); |
38 |
|
21490 |
(*worker_data->pending_platform_workers)--; |
39 |
|
21490 |
worker_data->platform_workers_ready->Signal(lock); |
40 |
|
|
} |
41 |
|
|
|
42 |
✓✓ |
127022 |
while (std::unique_ptr<Task> task = pending_worker_tasks->BlockingPop()) { |
43 |
|
105532 |
task->Run(); |
44 |
|
105532 |
pending_worker_tasks->NotifyOfCompletion(); |
45 |
|
105532 |
} |
46 |
|
21462 |
} |
47 |
|
|
|
48 |
|
|
} // namespace |
49 |
|
|
|
50 |
|
|
class WorkerThreadsTaskRunner::DelayedTaskScheduler { |
51 |
|
|
public: |
52 |
|
5371 |
explicit DelayedTaskScheduler(TaskQueue<Task>* tasks) |
53 |
|
5371 |
: pending_worker_tasks_(tasks) {} |
54 |
|
|
|
55 |
|
5371 |
std::unique_ptr<uv_thread_t> Start() { |
56 |
|
5371 |
auto start_thread = [](void* data) { |
57 |
|
5371 |
static_cast<DelayedTaskScheduler*>(data)->Run(); |
58 |
|
5364 |
}; |
59 |
|
5371 |
std::unique_ptr<uv_thread_t> t { new uv_thread_t() }; |
60 |
|
5371 |
uv_sem_init(&ready_, 0); |
61 |
✗✓ |
5371 |
CHECK_EQ(0, uv_thread_create(t.get(), start_thread, this)); |
62 |
|
5371 |
uv_sem_wait(&ready_); |
63 |
|
5371 |
uv_sem_destroy(&ready_); |
64 |
|
5371 |
return t; |
65 |
|
|
} |
66 |
|
|
|
67 |
|
526 |
void PostDelayedTask(std::unique_ptr<Task> task, double delay_in_seconds) { |
68 |
|
526 |
tasks_.Push(std::make_unique<ScheduleTask>(this, std::move(task), |
69 |
|
|
delay_in_seconds)); |
70 |
|
526 |
uv_async_send(&flush_tasks_); |
71 |
|
526 |
} |
72 |
|
|
|
73 |
|
5364 |
void Stop() { |
74 |
|
5364 |
tasks_.Push(std::make_unique<StopTask>(this)); |
75 |
|
5364 |
uv_async_send(&flush_tasks_); |
76 |
|
5364 |
} |
77 |
|
|
|
78 |
|
|
private: |
79 |
|
5371 |
void Run() { |
80 |
✓✗✓✓
|
10796 |
TRACE_EVENT_METADATA1("__metadata", "thread_name", "name", |
81 |
|
|
"WorkerThreadsTaskRunner::DelayedTaskScheduler"); |
82 |
|
5371 |
loop_.data = this; |
83 |
✗✓ |
5371 |
CHECK_EQ(0, uv_loop_init(&loop_)); |
84 |
|
5371 |
flush_tasks_.data = this; |
85 |
✗✓ |
5371 |
CHECK_EQ(0, uv_async_init(&loop_, &flush_tasks_, FlushTasks)); |
86 |
|
5371 |
uv_sem_post(&ready_); |
87 |
|
|
|
88 |
|
5371 |
uv_run(&loop_, UV_RUN_DEFAULT); |
89 |
|
5364 |
CheckedUvLoopClose(&loop_); |
90 |
|
5364 |
} |
91 |
|
|
|
92 |
|
5890 |
static void FlushTasks(uv_async_t* flush_tasks) { |
93 |
|
|
DelayedTaskScheduler* scheduler = |
94 |
|
5890 |
ContainerOf(&DelayedTaskScheduler::loop_, flush_tasks->loop); |
95 |
✓✓ |
11780 |
while (std::unique_ptr<Task> task = scheduler->tasks_.Pop()) |
96 |
|
11780 |
task->Run(); |
97 |
|
5890 |
} |
98 |
|
|
|
99 |
|
|
class StopTask : public Task { |
100 |
|
|
public: |
101 |
|
5364 |
explicit StopTask(DelayedTaskScheduler* scheduler): scheduler_(scheduler) {} |
102 |
|
|
|
103 |
|
5364 |
void Run() override { |
104 |
|
5364 |
std::vector<uv_timer_t*> timers; |
105 |
✓✓ |
5703 |
for (uv_timer_t* timer : scheduler_->timers_) |
106 |
|
339 |
timers.push_back(timer); |
107 |
✓✓ |
5703 |
for (uv_timer_t* timer : timers) |
108 |
|
339 |
scheduler_->TakeTimerTask(timer); |
109 |
|
5364 |
uv_close(reinterpret_cast<uv_handle_t*>(&scheduler_->flush_tasks_), |
110 |
|
5364 |
[](uv_handle_t* handle) {}); |
111 |
|
5364 |
} |
112 |
|
|
|
113 |
|
|
private: |
114 |
|
|
DelayedTaskScheduler* scheduler_; |
115 |
|
|
}; |
116 |
|
|
|
117 |
|
|
class ScheduleTask : public Task { |
118 |
|
|
public: |
119 |
|
526 |
ScheduleTask(DelayedTaskScheduler* scheduler, |
120 |
|
|
std::unique_ptr<Task> task, |
121 |
|
|
double delay_in_seconds) |
122 |
|
526 |
: scheduler_(scheduler), |
123 |
|
526 |
task_(std::move(task)), |
124 |
|
526 |
delay_in_seconds_(delay_in_seconds) {} |
125 |
|
|
|
126 |
|
526 |
void Run() override { |
127 |
|
526 |
uint64_t delay_millis = llround(delay_in_seconds_ * 1000); |
128 |
|
526 |
std::unique_ptr<uv_timer_t> timer(new uv_timer_t()); |
129 |
✗✓ |
526 |
CHECK_EQ(0, uv_timer_init(&scheduler_->loop_, timer.get())); |
130 |
|
526 |
timer->data = task_.release(); |
131 |
✗✓ |
526 |
CHECK_EQ(0, uv_timer_start(timer.get(), RunTask, delay_millis, 0)); |
132 |
|
526 |
scheduler_->timers_.insert(timer.release()); |
133 |
|
526 |
} |
134 |
|
|
|
135 |
|
|
private: |
136 |
|
|
DelayedTaskScheduler* scheduler_; |
137 |
|
|
std::unique_ptr<Task> task_; |
138 |
|
|
double delay_in_seconds_; |
139 |
|
|
}; |
140 |
|
|
|
141 |
|
187 |
static void RunTask(uv_timer_t* timer) { |
142 |
|
|
DelayedTaskScheduler* scheduler = |
143 |
|
187 |
ContainerOf(&DelayedTaskScheduler::loop_, timer->loop); |
144 |
|
187 |
scheduler->pending_worker_tasks_->Push(scheduler->TakeTimerTask(timer)); |
145 |
|
187 |
} |
146 |
|
|
|
147 |
|
526 |
std::unique_ptr<Task> TakeTimerTask(uv_timer_t* timer) { |
148 |
|
526 |
std::unique_ptr<Task> task(static_cast<Task*>(timer->data)); |
149 |
|
526 |
uv_timer_stop(timer); |
150 |
|
526 |
uv_close(reinterpret_cast<uv_handle_t*>(timer), [](uv_handle_t* handle) { |
151 |
|
526 |
delete reinterpret_cast<uv_timer_t*>(handle); |
152 |
|
526 |
}); |
153 |
|
526 |
timers_.erase(timer); |
154 |
|
526 |
return task; |
155 |
|
|
} |
156 |
|
|
|
157 |
|
|
uv_sem_t ready_; |
158 |
|
|
TaskQueue<Task>* pending_worker_tasks_; |
159 |
|
|
|
160 |
|
|
TaskQueue<Task> tasks_; |
161 |
|
|
uv_loop_t loop_; |
162 |
|
|
uv_async_t flush_tasks_; |
163 |
|
|
std::unordered_set<uv_timer_t*> timers_; |
164 |
|
|
}; |
165 |
|
|
|
166 |
|
5371 |
WorkerThreadsTaskRunner::WorkerThreadsTaskRunner(int thread_pool_size) { |
167 |
|
10742 |
Mutex platform_workers_mutex; |
168 |
|
10742 |
ConditionVariable platform_workers_ready; |
169 |
|
|
|
170 |
|
10742 |
Mutex::ScopedLock lock(platform_workers_mutex); |
171 |
|
5371 |
int pending_platform_workers = thread_pool_size; |
172 |
|
|
|
173 |
|
5371 |
delayed_task_scheduler_ = std::make_unique<DelayedTaskScheduler>( |
174 |
|
5371 |
&pending_worker_tasks_); |
175 |
|
5371 |
threads_.push_back(delayed_task_scheduler_->Start()); |
176 |
|
|
|
177 |
✓✓ |
26861 |
for (int i = 0; i < thread_pool_size; i++) { |
178 |
|
|
PlatformWorkerData* worker_data = new PlatformWorkerData{ |
179 |
|
21490 |
&pending_worker_tasks_, &platform_workers_mutex, |
180 |
|
|
&platform_workers_ready, &pending_platform_workers, i |
181 |
|
21490 |
}; |
182 |
|
21490 |
std::unique_ptr<uv_thread_t> t { new uv_thread_t() }; |
183 |
|
21490 |
if (uv_thread_create(t.get(), PlatformWorkerThread, |
184 |
✗✓ |
21490 |
worker_data) != 0) { |
185 |
|
|
break; |
186 |
|
|
} |
187 |
|
21490 |
threads_.push_back(std::move(t)); |
188 |
|
|
} |
189 |
|
|
|
190 |
|
|
// Wait for platform workers to initialize before continuing with the |
191 |
|
|
// bootstrap. |
192 |
✓✓ |
24663 |
while (pending_platform_workers > 0) { |
193 |
|
19292 |
platform_workers_ready.Wait(lock); |
194 |
|
|
} |
195 |
|
5371 |
} |
196 |
|
|
|
197 |
|
105345 |
void WorkerThreadsTaskRunner::PostTask(std::unique_ptr<Task> task) { |
198 |
|
105345 |
pending_worker_tasks_.Push(std::move(task)); |
199 |
|
105345 |
} |
200 |
|
|
|
201 |
|
526 |
void WorkerThreadsTaskRunner::PostDelayedTask(std::unique_ptr<Task> task, |
202 |
|
|
double delay_in_seconds) { |
203 |
|
526 |
delayed_task_scheduler_->PostDelayedTask(std::move(task), delay_in_seconds); |
204 |
|
526 |
} |
205 |
|
|
|
206 |
|
14576 |
void WorkerThreadsTaskRunner::BlockingDrain() { |
207 |
|
14576 |
pending_worker_tasks_.BlockingDrain(); |
208 |
|
14576 |
} |
209 |
|
|
|
210 |
|
5364 |
void WorkerThreadsTaskRunner::Shutdown() { |
211 |
|
5364 |
pending_worker_tasks_.Stop(); |
212 |
|
5364 |
delayed_task_scheduler_->Stop(); |
213 |
✓✓ |
32190 |
for (size_t i = 0; i < threads_.size(); i++) { |
214 |
✗✓ |
26826 |
CHECK_EQ(0, uv_thread_join(threads_[i].get())); |
215 |
|
|
} |
216 |
|
5364 |
} |
217 |
|
|
|
218 |
|
63300 |
int WorkerThreadsTaskRunner::NumberOfWorkerThreads() const { |
219 |
|
63300 |
return threads_.size(); |
220 |
|
|
} |
221 |
|
|
|
222 |
|
6150 |
PerIsolatePlatformData::PerIsolatePlatformData( |
223 |
|
6150 |
Isolate* isolate, uv_loop_t* loop) |
224 |
|
6150 |
: isolate_(isolate), loop_(loop) { |
225 |
|
6150 |
flush_tasks_ = new uv_async_t(); |
226 |
✗✓ |
6150 |
CHECK_EQ(0, uv_async_init(loop, flush_tasks_, FlushTasks)); |
227 |
|
6150 |
flush_tasks_->data = static_cast<void*>(this); |
228 |
|
6150 |
uv_unref(reinterpret_cast<uv_handle_t*>(flush_tasks_)); |
229 |
|
6150 |
} |
230 |
|
|
|
231 |
|
|
std::shared_ptr<v8::TaskRunner> |
232 |
|
29478 |
PerIsolatePlatformData::GetForegroundTaskRunner() { |
233 |
|
29478 |
return shared_from_this(); |
234 |
|
|
} |
235 |
|
|
|
236 |
|
9044 |
void PerIsolatePlatformData::FlushTasks(uv_async_t* handle) { |
237 |
|
9044 |
auto platform_data = static_cast<PerIsolatePlatformData*>(handle->data); |
238 |
|
9044 |
platform_data->FlushForegroundTasksInternal(); |
239 |
|
9044 |
} |
240 |
|
|
|
241 |
|
|
void PerIsolatePlatformData::PostIdleTask(std::unique_ptr<v8::IdleTask> task) { |
242 |
|
|
UNREACHABLE(); |
243 |
|
|
} |
244 |
|
|
|
245 |
|
10954 |
void PerIsolatePlatformData::PostTask(std::unique_ptr<Task> task) { |
246 |
✗✓ |
10954 |
if (flush_tasks_ == nullptr) { |
247 |
|
|
// V8 may post tasks during Isolate disposal. In that case, the only |
248 |
|
|
// sensible path forward is to discard the task. |
249 |
|
|
return; |
250 |
|
|
} |
251 |
|
10954 |
foreground_tasks_.Push(std::move(task)); |
252 |
|
10954 |
uv_async_send(flush_tasks_); |
253 |
|
|
} |
254 |
|
|
|
255 |
|
5626 |
void PerIsolatePlatformData::PostDelayedTask( |
256 |
|
|
std::unique_ptr<Task> task, double delay_in_seconds) { |
257 |
✗✓ |
5626 |
if (flush_tasks_ == nullptr) { |
258 |
|
|
// V8 may post tasks during Isolate disposal. In that case, the only |
259 |
|
|
// sensible path forward is to discard the task. |
260 |
|
|
return; |
261 |
|
|
} |
262 |
|
11252 |
std::unique_ptr<DelayedTask> delayed(new DelayedTask()); |
263 |
|
5626 |
delayed->task = std::move(task); |
264 |
|
5626 |
delayed->platform_data = shared_from_this(); |
265 |
|
5626 |
delayed->timeout = delay_in_seconds; |
266 |
|
5626 |
foreground_delayed_tasks_.Push(std::move(delayed)); |
267 |
|
5626 |
uv_async_send(flush_tasks_); |
268 |
|
|
} |
269 |
|
|
|
270 |
|
8985 |
void PerIsolatePlatformData::PostNonNestableTask(std::unique_ptr<Task> task) { |
271 |
|
8985 |
PostTask(std::move(task)); |
272 |
|
8985 |
} |
273 |
|
|
|
274 |
|
|
void PerIsolatePlatformData::PostNonNestableDelayedTask( |
275 |
|
|
std::unique_ptr<Task> task, |
276 |
|
|
double delay_in_seconds) { |
277 |
|
|
PostDelayedTask(std::move(task), delay_in_seconds); |
278 |
|
|
} |
279 |
|
|
|
280 |
|
1578 |
PerIsolatePlatformData::~PerIsolatePlatformData() { |
281 |
✗✓ |
1578 |
CHECK(!flush_tasks_); |
282 |
|
|
} |
283 |
|
|
|
284 |
|
731 |
void PerIsolatePlatformData::AddShutdownCallback(void (*callback)(void*), |
285 |
|
|
void* data) { |
286 |
|
731 |
shutdown_callbacks_.emplace_back(ShutdownCallback { callback, data }); |
287 |
|
731 |
} |
288 |
|
|
|
289 |
|
5602 |
void PerIsolatePlatformData::Shutdown() { |
290 |
✗✓ |
5602 |
if (flush_tasks_ == nullptr) |
291 |
|
|
return; |
292 |
|
|
|
293 |
|
|
// While there should be no V8 tasks in the queues at this point, it is |
294 |
|
|
// possible that Node.js-internal tasks from e.g. the inspector are still |
295 |
|
|
// lying around. We clear these queues and ignore the return value, |
296 |
|
|
// effectively deleting the tasks instead of running them. |
297 |
|
5602 |
foreground_delayed_tasks_.PopAll(); |
298 |
|
5602 |
foreground_tasks_.PopAll(); |
299 |
|
5602 |
scheduled_delayed_tasks_.clear(); |
300 |
|
|
|
301 |
|
|
// Both destroying the scheduled_delayed_tasks_ lists and closing |
302 |
|
|
// flush_tasks_ handle add tasks to the event loop. We keep a count of all |
303 |
|
|
// non-closed handles, and when that reaches zero, we inform any shutdown |
304 |
|
|
// callbacks that the platform is done as far as this Isolate is concerned. |
305 |
|
5602 |
self_reference_ = shared_from_this(); |
306 |
|
5602 |
uv_close(reinterpret_cast<uv_handle_t*>(flush_tasks_), |
307 |
|
789 |
[](uv_handle_t* handle) { |
308 |
|
|
std::unique_ptr<uv_async_t> flush_tasks { |
309 |
|
1578 |
reinterpret_cast<uv_async_t*>(handle) }; |
310 |
|
|
PerIsolatePlatformData* platform_data = |
311 |
|
789 |
static_cast<PerIsolatePlatformData*>(flush_tasks->data); |
312 |
|
789 |
platform_data->DecreaseHandleCount(); |
313 |
|
789 |
platform_data->self_reference_.reset(); |
314 |
|
789 |
}); |
315 |
|
5602 |
flush_tasks_ = nullptr; |
316 |
|
|
} |
317 |
|
|
|
318 |
|
1572 |
void PerIsolatePlatformData::DecreaseHandleCount() { |
319 |
✗✓ |
1572 |
CHECK_GE(uv_handle_count_, 1); |
320 |
✓✓ |
1572 |
if (--uv_handle_count_ == 0) { |
321 |
✓✓ |
1520 |
for (const auto& callback : shutdown_callbacks_) |
322 |
|
731 |
callback.cb(callback.data); |
323 |
|
|
} |
324 |
|
1572 |
} |
325 |
|
|
|
326 |
|
5371 |
NodePlatform::NodePlatform(int thread_pool_size, |
327 |
|
|
v8::TracingController* tracing_controller, |
328 |
|
5371 |
v8::PageAllocator* page_allocator) { |
329 |
✓✓ |
5371 |
if (tracing_controller != nullptr) { |
330 |
|
5364 |
tracing_controller_ = tracing_controller; |
331 |
|
|
} else { |
332 |
|
7 |
tracing_controller_ = new v8::TracingController(); |
333 |
|
|
} |
334 |
|
|
|
335 |
|
|
// V8 will default to its built in allocator if none is provided. |
336 |
|
5371 |
page_allocator_ = page_allocator; |
337 |
|
|
|
338 |
|
|
// TODO(addaleax): It's a bit icky that we use global state here, but we can't |
339 |
|
|
// really do anything about it unless V8 starts exposing a way to access the |
340 |
|
|
// current v8::Platform instance. |
341 |
|
5371 |
SetTracingController(tracing_controller_); |
342 |
|
|
DCHECK_EQ(GetTracingController(), tracing_controller_); |
343 |
|
|
worker_thread_task_runner_ = |
344 |
|
5371 |
std::make_shared<WorkerThreadsTaskRunner>(thread_pool_size); |
345 |
|
5371 |
} |
346 |
|
|
|
347 |
|
21456 |
NodePlatform::~NodePlatform() { |
348 |
|
10728 |
Shutdown(); |
349 |
|
21456 |
} |
350 |
|
|
|
351 |
|
6149 |
void NodePlatform::RegisterIsolate(Isolate* isolate, uv_loop_t* loop) { |
352 |
|
12298 |
Mutex::ScopedLock lock(per_isolate_mutex_); |
353 |
|
12298 |
auto delegate = std::make_shared<PerIsolatePlatformData>(isolate, loop); |
354 |
|
6149 |
IsolatePlatformDelegate* ptr = delegate.get(); |
355 |
|
|
auto insertion = per_isolate_.emplace( |
356 |
|
|
isolate, |
357 |
|
6149 |
std::make_pair(ptr, std::move(delegate))); |
358 |
✗✓ |
6149 |
CHECK(insertion.second); |
359 |
|
6149 |
} |
360 |
|
|
|
361 |
|
1 |
void NodePlatform::RegisterIsolate(Isolate* isolate, |
362 |
|
|
IsolatePlatformDelegate* delegate) { |
363 |
|
2 |
Mutex::ScopedLock lock(per_isolate_mutex_); |
364 |
|
|
auto insertion = per_isolate_.emplace( |
365 |
|
|
isolate, |
366 |
|
1 |
std::make_pair(delegate, std::shared_ptr<PerIsolatePlatformData>{})); |
367 |
✗✓ |
1 |
CHECK(insertion.second); |
368 |
|
1 |
} |
369 |
|
|
|
370 |
|
5602 |
void NodePlatform::UnregisterIsolate(Isolate* isolate) { |
371 |
|
11204 |
Mutex::ScopedLock lock(per_isolate_mutex_); |
372 |
|
5602 |
auto existing_it = per_isolate_.find(isolate); |
373 |
✗✓ |
5602 |
CHECK_NE(existing_it, per_isolate_.end()); |
374 |
|
5602 |
auto& existing = existing_it->second; |
375 |
✓✓ |
5602 |
if (existing.second) { |
376 |
|
5601 |
existing.second->Shutdown(); |
377 |
|
|
} |
378 |
|
5602 |
per_isolate_.erase(existing_it); |
379 |
|
5602 |
} |
380 |
|
|
|
381 |
|
731 |
void NodePlatform::AddIsolateFinishedCallback(Isolate* isolate, |
382 |
|
|
void (*cb)(void*), void* data) { |
383 |
|
731 |
Mutex::ScopedLock lock(per_isolate_mutex_); |
384 |
|
731 |
auto it = per_isolate_.find(isolate); |
385 |
✗✓ |
731 |
if (it == per_isolate_.end()) { |
386 |
|
|
cb(data); |
387 |
|
|
return; |
388 |
|
|
} |
389 |
✗✓ |
731 |
CHECK(it->second.second); |
390 |
|
731 |
it->second.second->AddShutdownCallback(cb, data); |
391 |
|
|
} |
392 |
|
|
|
393 |
|
10723 |
void NodePlatform::Shutdown() { |
394 |
✓✓ |
10723 |
if (has_shut_down_) return; |
395 |
|
5364 |
has_shut_down_ = true; |
396 |
|
5364 |
worker_thread_task_runner_->Shutdown(); |
397 |
|
|
|
398 |
|
|
{ |
399 |
|
10728 |
Mutex::ScopedLock lock(per_isolate_mutex_); |
400 |
|
5364 |
per_isolate_.clear(); |
401 |
|
|
} |
402 |
|
|
} |
403 |
|
|
|
404 |
|
63300 |
int NodePlatform::NumberOfWorkerThreads() { |
405 |
|
63300 |
return worker_thread_task_runner_->NumberOfWorkerThreads(); |
406 |
|
|
} |
407 |
|
|
|
408 |
|
10576 |
void PerIsolatePlatformData::RunForegroundTask(std::unique_ptr<Task> task) { |
409 |
|
10576 |
DebugSealHandleScope scope(isolate_); |
410 |
|
10576 |
Environment* env = Environment::GetCurrent(isolate_); |
411 |
✓✓ |
10576 |
if (env != nullptr) { |
412 |
|
17806 |
v8::HandleScope scope(isolate_); |
413 |
|
8907 |
InternalCallbackScope cb_scope(env, Object::New(isolate_), { 0, 0 }, |
414 |
|
17814 |
InternalCallbackScope::kNoFlags); |
415 |
|
8907 |
task->Run(); |
416 |
|
|
} else { |
417 |
|
|
// The task is moved out of InternalCallbackScope if env is not available. |
418 |
|
|
// This is a required else block, and should not be removed. |
419 |
|
|
// See comment: https://github.com/nodejs/node/pull/34688#pullrequestreview-463867489 |
420 |
|
1669 |
task->Run(); |
421 |
|
|
} |
422 |
|
10568 |
} |
423 |
|
|
|
424 |
|
23 |
void PerIsolatePlatformData::DeleteFromScheduledTasks(DelayedTask* task) { |
425 |
|
|
auto it = std::find_if(scheduled_delayed_tasks_.begin(), |
426 |
|
|
scheduled_delayed_tasks_.end(), |
427 |
|
46 |
[task](const DelayedTaskPointer& delayed) -> bool { |
428 |
|
23 |
return delayed.get() == task; |
429 |
|
23 |
}); |
430 |
✗✓ |
23 |
CHECK_NE(it, scheduled_delayed_tasks_.end()); |
431 |
|
23 |
scheduled_delayed_tasks_.erase(it); |
432 |
|
23 |
} |
433 |
|
|
|
434 |
|
23 |
void PerIsolatePlatformData::RunForegroundTask(uv_timer_t* handle) { |
435 |
|
23 |
DelayedTask* delayed = ContainerOf(&DelayedTask::timer, handle); |
436 |
|
23 |
delayed->platform_data->RunForegroundTask(std::move(delayed->task)); |
437 |
|
23 |
delayed->platform_data->DeleteFromScheduledTasks(delayed); |
438 |
|
23 |
} |
439 |
|
|
|
440 |
|
10905 |
void NodePlatform::DrainTasks(Isolate* isolate) { |
441 |
|
10905 |
std::shared_ptr<PerIsolatePlatformData> per_isolate = ForNodeIsolate(isolate); |
442 |
✓✓ |
10905 |
if (!per_isolate) return; |
443 |
|
|
|
444 |
|
3672 |
do { |
445 |
|
|
// Worker tasks aren't associated with an Isolate. |
446 |
|
14576 |
worker_thread_task_runner_->BlockingDrain(); |
447 |
✓✓ |
14576 |
} while (per_isolate->FlushForegroundTasksInternal()); |
448 |
|
|
} |
449 |
|
|
|
450 |
|
23624 |
bool PerIsolatePlatformData::FlushForegroundTasksInternal() { |
451 |
|
23624 |
bool did_work = false; |
452 |
|
|
|
453 |
|
|
while (std::unique_ptr<DelayedTask> delayed = |
454 |
✓✓ |
28991 |
foreground_delayed_tasks_.Pop()) { |
455 |
|
5367 |
did_work = true; |
456 |
|
5367 |
uint64_t delay_millis = llround(delayed->timeout * 1000); |
457 |
|
|
|
458 |
|
5367 |
delayed->timer.data = static_cast<void*>(delayed.get()); |
459 |
|
5367 |
uv_timer_init(loop_, &delayed->timer); |
460 |
|
|
// Timers may not guarantee queue ordering of events with the same delay if |
461 |
|
|
// the delay is non-zero. This should not be a problem in practice. |
462 |
|
5367 |
uv_timer_start(&delayed->timer, RunForegroundTask, delay_millis, 0); |
463 |
|
5367 |
uv_unref(reinterpret_cast<uv_handle_t*>(&delayed->timer)); |
464 |
|
5367 |
uv_handle_count_++; |
465 |
|
|
|
466 |
|
16101 |
scheduled_delayed_tasks_.emplace_back(delayed.release(), |
467 |
|
5187 |
[](DelayedTask* delayed) { |
468 |
|
5187 |
uv_close(reinterpret_cast<uv_handle_t*>(&delayed->timer), |
469 |
|
783 |
[](uv_handle_t* handle) { |
470 |
|
|
std::unique_ptr<DelayedTask> task { |
471 |
|
1566 |
static_cast<DelayedTask*>(handle->data) }; |
472 |
|
783 |
task->platform_data->DecreaseHandleCount(); |
473 |
|
783 |
}); |
474 |
|
5367 |
}); |
475 |
|
5367 |
} |
476 |
|
|
// Move all foreground tasks into a separate queue and flush that queue. |
477 |
|
|
// This way tasks that are posted while flushing the queue will be run on the |
478 |
|
|
// next call of FlushForegroundTasksInternal. |
479 |
|
23624 |
std::queue<std::unique_ptr<Task>> tasks = foreground_tasks_.PopAll(); |
480 |
✓✓ |
34169 |
while (!tasks.empty()) { |
481 |
|
10553 |
std::unique_ptr<Task> task = std::move(tasks.front()); |
482 |
|
10553 |
tasks.pop(); |
483 |
|
10553 |
did_work = true; |
484 |
|
10553 |
RunForegroundTask(std::move(task)); |
485 |
|
|
} |
486 |
|
23616 |
return did_work; |
487 |
|
|
} |
488 |
|
|
|
489 |
|
105345 |
void NodePlatform::CallOnWorkerThread(std::unique_ptr<Task> task) { |
490 |
|
105345 |
worker_thread_task_runner_->PostTask(std::move(task)); |
491 |
|
105345 |
} |
492 |
|
|
|
493 |
|
526 |
void NodePlatform::CallDelayedOnWorkerThread(std::unique_ptr<Task> task, |
494 |
|
|
double delay_in_seconds) { |
495 |
|
526 |
worker_thread_task_runner_->PostDelayedTask(std::move(task), |
496 |
|
|
delay_in_seconds); |
497 |
|
526 |
} |
498 |
|
|
|
499 |
|
|
|
500 |
|
29478 |
IsolatePlatformDelegate* NodePlatform::ForIsolate(Isolate* isolate) { |
501 |
|
58956 |
Mutex::ScopedLock lock(per_isolate_mutex_); |
502 |
|
29478 |
auto data = per_isolate_[isolate]; |
503 |
✗✓ |
29478 |
CHECK_NOT_NULL(data.first); |
504 |
|
29478 |
return data.first; |
505 |
|
|
} |
506 |
|
|
|
507 |
|
|
std::shared_ptr<PerIsolatePlatformData> |
508 |
|
10909 |
NodePlatform::ForNodeIsolate(Isolate* isolate) { |
509 |
|
21818 |
Mutex::ScopedLock lock(per_isolate_mutex_); |
510 |
|
21818 |
auto data = per_isolate_[isolate]; |
511 |
✗✓ |
10909 |
CHECK_NOT_NULL(data.first); |
512 |
|
10909 |
return data.second; |
513 |
|
|
} |
514 |
|
|
|
515 |
|
4 |
bool NodePlatform::FlushForegroundTasks(Isolate* isolate) { |
516 |
|
8 |
std::shared_ptr<PerIsolatePlatformData> per_isolate = ForNodeIsolate(isolate); |
517 |
✗✓ |
4 |
if (!per_isolate) return false; |
518 |
|
4 |
return per_isolate->FlushForegroundTasksInternal(); |
519 |
|
|
} |
520 |
|
|
|
521 |
|
29079 |
std::unique_ptr<v8::JobHandle> NodePlatform::PostJob(v8::TaskPriority priority, |
522 |
|
|
std::unique_ptr<v8::JobTask> job_task) { |
523 |
|
|
return v8::platform::NewDefaultJobHandle( |
524 |
|
29079 |
this, priority, std::move(job_task), NumberOfWorkerThreads()); |
525 |
|
|
} |
526 |
|
|
|
527 |
|
|
bool NodePlatform::IdleTasksEnabled(Isolate* isolate) { |
528 |
|
|
return ForIsolate(isolate)->IdleTasksEnabled(); |
529 |
|
|
} |
530 |
|
|
|
531 |
|
|
std::shared_ptr<v8::TaskRunner> |
532 |
|
29478 |
NodePlatform::GetForegroundTaskRunner(Isolate* isolate) { |
533 |
|
29478 |
return ForIsolate(isolate)->GetForegroundTaskRunner(); |
534 |
|
|
} |
535 |
|
|
|
536 |
|
228321 |
double NodePlatform::MonotonicallyIncreasingTime() { |
537 |
|
|
// Convert nanos to seconds. |
538 |
|
228321 |
return uv_hrtime() / 1e9; |
539 |
|
|
} |
540 |
|
|
|
541 |
|
15715379 |
double NodePlatform::CurrentClockTimeMillis() { |
542 |
|
15715379 |
return SystemClockTimeMillis(); |
543 |
|
|
} |
544 |
|
|
|
545 |
|
351630 |
v8::TracingController* NodePlatform::GetTracingController() { |
546 |
✗✓ |
351630 |
CHECK_NOT_NULL(tracing_controller_); |
547 |
|
351630 |
return tracing_controller_; |
548 |
|
|
} |
549 |
|
|
|
550 |
|
5371 |
Platform::StackTracePrinter NodePlatform::GetStackTracePrinter() { |
551 |
|
|
return []() { |
552 |
|
|
fprintf(stderr, "\n"); |
553 |
|
|
DumpBacktrace(stderr); |
554 |
|
|
fflush(stderr); |
555 |
|
5371 |
}; |
556 |
|
|
} |
557 |
|
|
|
558 |
|
5372 |
v8::PageAllocator* NodePlatform::GetPageAllocator() { |
559 |
|
5372 |
return page_allocator_; |
560 |
|
|
} |
561 |
|
|
|
562 |
|
|
template <class T> |
563 |
|
46084 |
TaskQueue<T>::TaskQueue() |
564 |
|
|
: lock_(), tasks_available_(), tasks_drained_(), |
565 |
|
46084 |
outstanding_tasks_(0), stopped_(false), task_queue_() { } |
566 |
|
|
|
567 |
|
|
template <class T> |
568 |
|
256004 |
void TaskQueue<T>::Push(std::unique_ptr<T> task) { |
569 |
|
512008 |
Mutex::ScopedLock scoped_lock(lock_); |
570 |
|
256004 |
outstanding_tasks_++; |
571 |
|
256004 |
task_queue_.push(std::move(task)); |
572 |
|
256004 |
tasks_available_.Signal(scoped_lock); |
573 |
|
256004 |
} |
574 |
|
|
|
575 |
|
|
template <class T> |
576 |
|
81542 |
std::unique_ptr<T> TaskQueue<T>::Pop() { |
577 |
|
163084 |
Mutex::ScopedLock scoped_lock(lock_); |
578 |
✓✓ |
81542 |
if (task_queue_.empty()) { |
579 |
|
59028 |
return std::unique_ptr<T>(nullptr); |
580 |
|
|
} |
581 |
|
45028 |
std::unique_ptr<T> result = std::move(task_queue_.front()); |
582 |
|
22514 |
task_queue_.pop(); |
583 |
|
22514 |
return result; |
584 |
|
|
} |
585 |
|
|
|
586 |
|
|
template <class T> |
587 |
|
127022 |
std::unique_ptr<T> TaskQueue<T>::BlockingPop() { |
588 |
|
254016 |
Mutex::ScopedLock scoped_lock(lock_); |
589 |
✓✓✓✓ ✓✓ |
245010 |
while (task_queue_.empty() && !stopped_) { |
590 |
|
118016 |
tasks_available_.Wait(scoped_lock); |
591 |
|
|
} |
592 |
✓✓ |
126994 |
if (stopped_) { |
593 |
|
21462 |
return std::unique_ptr<T>(nullptr); |
594 |
|
|
} |
595 |
|
211064 |
std::unique_ptr<T> result = std::move(task_queue_.front()); |
596 |
|
105532 |
task_queue_.pop(); |
597 |
|
105532 |
return result; |
598 |
|
|
} |
599 |
|
|
|
600 |
|
|
template <class T> |
601 |
|
105532 |
void TaskQueue<T>::NotifyOfCompletion() { |
602 |
|
211064 |
Mutex::ScopedLock scoped_lock(lock_); |
603 |
✓✓ |
105532 |
if (--outstanding_tasks_ == 0) { |
604 |
|
46188 |
tasks_drained_.Broadcast(scoped_lock); |
605 |
|
|
} |
606 |
|
105532 |
} |
607 |
|
|
|
608 |
|
|
template <class T> |
609 |
|
14576 |
void TaskQueue<T>::BlockingDrain() { |
610 |
|
29152 |
Mutex::ScopedLock scoped_lock(lock_); |
611 |
✓✓ |
15800 |
while (outstanding_tasks_ > 0) { |
612 |
|
1224 |
tasks_drained_.Wait(scoped_lock); |
613 |
|
|
} |
614 |
|
14576 |
} |
615 |
|
|
|
616 |
|
|
template <class T> |
617 |
|
5364 |
void TaskQueue<T>::Stop() { |
618 |
|
10728 |
Mutex::ScopedLock scoped_lock(lock_); |
619 |
|
5364 |
stopped_ = true; |
620 |
|
5364 |
tasks_available_.Broadcast(scoped_lock); |
621 |
|
5364 |
} |
622 |
|
|
|
623 |
|
|
template <class T> |
624 |
|
34828 |
std::queue<std::unique_ptr<T>> TaskQueue<T>::PopAll() { |
625 |
|
69656 |
Mutex::ScopedLock scoped_lock(lock_); |
626 |
|
34828 |
std::queue<std::unique_ptr<T>> result; |
627 |
|
34828 |
result.swap(task_queue_); |
628 |
|
34828 |
return result; |
629 |
|
|
} |
630 |
|
|
|
631 |
|
|
} // namespace node |