GCC Code Coverage Report
Directory: ../ Exec Total Coverage
File: /home/iojs/build/workspace/node-test-commit-linux-coverage-daily/nodes/benchmark/out/../src/node_platform.cc Lines: 351 366 95.9 %
Date: 2020-06-24 22:13:30 Branches: 91 116 78.4 %

Line Branch Exec Source
1
#include "node_platform.h"
2
#include "node_internals.h"
3
4
#include "env-inl.h"
5
#include "debug_utils-inl.h"
6
#include <algorithm>  // find_if(), find(), move()
7
#include <cmath>  // llround()
8
#include <memory>  // unique_ptr(), shared_ptr(), make_shared()
9
10
namespace node {
11
12
using v8::Isolate;
13
using v8::Object;
14
using v8::Platform;
15
using v8::Task;
16
17
namespace {
18
19
struct PlatformWorkerData {
20
  TaskQueue<Task>* task_queue;
21
  Mutex* platform_workers_mutex;
22
  ConditionVariable* platform_workers_ready;
23
  int* pending_platform_workers;
24
  int id;
25
};
26
27
17250
static void PlatformWorkerThread(void* data) {
28
  std::unique_ptr<PlatformWorkerData>
29
34604
      worker_data(static_cast<PlatformWorkerData*>(data));
30
31
17316
  TaskQueue<Task>* pending_worker_tasks = worker_data->task_queue;
32

34520
  TRACE_EVENT_METADATA1("__metadata", "thread_name", "name",
33
                        "PlatformWorkerThread");
34
35
  // Notify the main thread that the platform worker is ready.
36
  {
37
34715
    Mutex::ScopedLock lock(*worker_data->platform_workers_mutex);
38
17382
    (*worker_data->pending_platform_workers)--;
39
17382
    worker_data->platform_workers_ready->Signal(lock);
40
  }
41
42
201508
  while (std::unique_ptr<Task> task = pending_worker_tasks->BlockingPop()) {
43
91883
    task->Run();
44
91807
    pending_worker_tasks->NotifyOfCompletion();
45
92065
  }
46
17354
}
47
48
}  // namespace
49
50
4337
class WorkerThreadsTaskRunner::DelayedTaskScheduler {
51
 public:
52
4344
  explicit DelayedTaskScheduler(TaskQueue<Task>* tasks)
53
4344
    : pending_worker_tasks_(tasks) {}
54
55
4344
  std::unique_ptr<uv_thread_t> Start() {
56
13032
    auto start_thread = [](void* data) {
57
4344
      static_cast<DelayedTaskScheduler*>(data)->Run();
58
13025
    };
59
4344
    std::unique_ptr<uv_thread_t> t { new uv_thread_t() };
60
4344
    uv_sem_init(&ready_, 0);
61
4344
    CHECK_EQ(0, uv_thread_create(t.get(), start_thread, this));
62
4344
    uv_sem_wait(&ready_);
63
4344
    uv_sem_destroy(&ready_);
64
4344
    return t;
65
  }
66
67
73
  void PostDelayedTask(std::unique_ptr<Task> task, double delay_in_seconds) {
68
146
    tasks_.Push(std::make_unique<ScheduleTask>(this, std::move(task),
69
73
                                               delay_in_seconds));
70
73
    uv_async_send(&flush_tasks_);
71
73
  }
72
73
4337
  void Stop() {
74
4337
    tasks_.Push(std::make_unique<StopTask>(this));
75
4337
    uv_async_send(&flush_tasks_);
76
4337
  }
77
78
 private:
79
4344
  void Run() {
80

8688
    TRACE_EVENT_METADATA1("__metadata", "thread_name", "name",
81
                          "WorkerThreadsTaskRunner::DelayedTaskScheduler");
82
4344
    loop_.data = this;
83
4344
    CHECK_EQ(0, uv_loop_init(&loop_));
84
4344
    flush_tasks_.data = this;
85
4344
    CHECK_EQ(0, uv_async_init(&loop_, &flush_tasks_, FlushTasks));
86
4344
    uv_sem_post(&ready_);
87
88
4344
    uv_run(&loop_, UV_RUN_DEFAULT);
89
4337
    CheckedUvLoopClose(&loop_);
90
4337
  }
91
92
4408
  static void FlushTasks(uv_async_t* flush_tasks) {
93
    DelayedTaskScheduler* scheduler =
94
4408
        ContainerOf(&DelayedTaskScheduler::loop_, flush_tasks->loop);
95
13228
    while (std::unique_ptr<Task> task = scheduler->tasks_.Pop())
96
8820
      task->Run();
97
4408
  }
98
99
8674
  class StopTask : public Task {
100
   public:
101
4337
    explicit StopTask(DelayedTaskScheduler* scheduler): scheduler_(scheduler) {}
102
103
4337
    void Run() override {
104
8674
      std::vector<uv_timer_t*> timers;
105
4405
      for (uv_timer_t* timer : scheduler_->timers_)
106
68
        timers.push_back(timer);
107
4405
      for (uv_timer_t* timer : timers)
108
68
        scheduler_->TakeTimerTask(timer);
109
8674
      uv_close(reinterpret_cast<uv_handle_t*>(&scheduler_->flush_tasks_),
110
17348
               [](uv_handle_t* handle) {});
111
4337
    }
112
113
   private:
114
     DelayedTaskScheduler* scheduler_;
115
  };
116
117
146
  class ScheduleTask : public Task {
118
   public:
119
73
    ScheduleTask(DelayedTaskScheduler* scheduler,
120
                 std::unique_ptr<Task> task,
121
                 double delay_in_seconds)
122
73
      : scheduler_(scheduler),
123
73
        task_(std::move(task)),
124
146
        delay_in_seconds_(delay_in_seconds) {}
125
126
73
    void Run() override {
127
73
      uint64_t delay_millis = llround(delay_in_seconds_ * 1000);
128
146
      std::unique_ptr<uv_timer_t> timer(new uv_timer_t());
129
73
      CHECK_EQ(0, uv_timer_init(&scheduler_->loop_, timer.get()));
130
73
      timer->data = task_.release();
131
73
      CHECK_EQ(0, uv_timer_start(timer.get(), RunTask, delay_millis, 0));
132
73
      scheduler_->timers_.insert(timer.release());
133
73
    }
134
135
   private:
136
    DelayedTaskScheduler* scheduler_;
137
    std::unique_ptr<Task> task_;
138
    double delay_in_seconds_;
139
  };
140
141
5
  static void RunTask(uv_timer_t* timer) {
142
    DelayedTaskScheduler* scheduler =
143
5
        ContainerOf(&DelayedTaskScheduler::loop_, timer->loop);
144
5
    scheduler->pending_worker_tasks_->Push(scheduler->TakeTimerTask(timer));
145
5
  }
146
147
73
  std::unique_ptr<Task> TakeTimerTask(uv_timer_t* timer) {
148
73
    std::unique_ptr<Task> task(static_cast<Task*>(timer->data));
149
73
    uv_timer_stop(timer);
150
365
    uv_close(reinterpret_cast<uv_handle_t*>(timer), [](uv_handle_t* handle) {
151
73
      delete reinterpret_cast<uv_timer_t*>(handle);
152
292
    });
153
73
    timers_.erase(timer);
154
73
    return task;
155
  }
156
157
  uv_sem_t ready_;
158
  TaskQueue<Task>* pending_worker_tasks_;
159
160
  TaskQueue<Task> tasks_;
161
  uv_loop_t loop_;
162
  uv_async_t flush_tasks_;
163
  std::unordered_set<uv_timer_t*> timers_;
164
};
165
166
4344
WorkerThreadsTaskRunner::WorkerThreadsTaskRunner(int thread_pool_size) {
167
8688
  Mutex platform_workers_mutex;
168
8688
  ConditionVariable platform_workers_ready;
169
170
8688
  Mutex::ScopedLock lock(platform_workers_mutex);
171
4344
  int pending_platform_workers = thread_pool_size;
172
173
8688
  delayed_task_scheduler_ = std::make_unique<DelayedTaskScheduler>(
174
13032
      &pending_worker_tasks_);
175
4344
  threads_.push_back(delayed_task_scheduler_->Start());
176
177
21726
  for (int i = 0; i < thread_pool_size; i++) {
178
    PlatformWorkerData* worker_data = new PlatformWorkerData{
179
17382
      &pending_worker_tasks_, &platform_workers_mutex,
180
      &platform_workers_ready, &pending_platform_workers, i
181
34764
    };
182
34764
    std::unique_ptr<uv_thread_t> t { new uv_thread_t() };
183
17382
    if (uv_thread_create(t.get(), PlatformWorkerThread,
184
                         worker_data) != 0) {
185
      break;
186
    }
187
17382
    threads_.push_back(std::move(t));
188
  }
189
190
  // Wait for platform workers to initialize before continuing with the
191
  // bootstrap.
192
34898
  while (pending_platform_workers > 0) {
193
15277
    platform_workers_ready.Wait(lock);
194
  }
195
4344
}
196
197
92114
void WorkerThreadsTaskRunner::PostTask(std::unique_ptr<Task> task) {
198
92114
  pending_worker_tasks_.Push(std::move(task));
199
92114
}
200
201
73
void WorkerThreadsTaskRunner::PostDelayedTask(std::unique_ptr<Task> task,
202
                                              double delay_in_seconds) {
203
73
  delayed_task_scheduler_->PostDelayedTask(std::move(task), delay_in_seconds);
204
73
}
205
206
10425
void WorkerThreadsTaskRunner::BlockingDrain() {
207
10425
  pending_worker_tasks_.BlockingDrain();
208
10425
}
209
210
4337
void WorkerThreadsTaskRunner::Shutdown() {
211
4337
  pending_worker_tasks_.Stop();
212
4337
  delayed_task_scheduler_->Stop();
213
26028
  for (size_t i = 0; i < threads_.size(); i++) {
214
21691
    CHECK_EQ(0, uv_thread_join(threads_[i].get()));
215
  }
216
4337
}
217
218
4677
int WorkerThreadsTaskRunner::NumberOfWorkerThreads() const {
219
4677
  return threads_.size();
220
}
221
222
4735
PerIsolatePlatformData::PerIsolatePlatformData(
223
4735
    Isolate* isolate, uv_loop_t* loop)
224
4735
  : isolate_(isolate), loop_(loop) {
225
4735
  flush_tasks_ = new uv_async_t();
226
4735
  CHECK_EQ(0, uv_async_init(loop, flush_tasks_, FlushTasks));
227
4735
  flush_tasks_->data = static_cast<void*>(this);
228
4735
  uv_unref(reinterpret_cast<uv_handle_t*>(flush_tasks_));
229
4735
}
230
231
std::shared_ptr<v8::TaskRunner>
232
24694
PerIsolatePlatformData::GetForegroundTaskRunner() {
233
24694
  return shared_from_this();
234
}
235
236
6362
void PerIsolatePlatformData::FlushTasks(uv_async_t* handle) {
237
6362
  auto platform_data = static_cast<PerIsolatePlatformData*>(handle->data);
238
6362
  platform_data->FlushForegroundTasksInternal();
239
6362
}
240
241
void PerIsolatePlatformData::PostIdleTask(std::unique_ptr<v8::IdleTask> task) {
242
  UNREACHABLE();
243
}
244
245
5825
void PerIsolatePlatformData::PostTask(std::unique_ptr<Task> task) {
246
5825
  if (flush_tasks_ == nullptr) {
247
    // V8 may post tasks during Isolate disposal. In that case, the only
248
    // sensible path forward is to discard the task.
249
    return;
250
  }
251
5825
  foreground_tasks_.Push(std::move(task));
252
5825
  uv_async_send(flush_tasks_);
253
}
254
255
4700
void PerIsolatePlatformData::PostDelayedTask(
256
    std::unique_ptr<Task> task, double delay_in_seconds) {
257
4700
  if (flush_tasks_ == nullptr) {
258
    // V8 may post tasks during Isolate disposal. In that case, the only
259
    // sensible path forward is to discard the task.
260
    return;
261
  }
262
9400
  std::unique_ptr<DelayedTask> delayed(new DelayedTask());
263
4700
  delayed->task = std::move(task);
264
4700
  delayed->platform_data = shared_from_this();
265
4700
  delayed->timeout = delay_in_seconds;
266
4700
  foreground_delayed_tasks_.Push(std::move(delayed));
267
4700
  uv_async_send(flush_tasks_);
268
}
269
270
1205
void PerIsolatePlatformData::PostNonNestableTask(std::unique_ptr<Task> task) {
271
1205
  PostTask(std::move(task));
272
1205
}
273
274
void PerIsolatePlatformData::PostNonNestableDelayedTask(
275
    std::unique_ptr<Task> task,
276
    double delay_in_seconds) {
277
  PostDelayedTask(std::move(task), delay_in_seconds);
278
}
279
280
802
PerIsolatePlatformData::~PerIsolatePlatformData() {
281
401
  CHECK(!flush_tasks_);
282
401
}
283
284
353
void PerIsolatePlatformData::AddShutdownCallback(void (*callback)(void*),
285
                                                 void* data) {
286
353
  shutdown_callbacks_.emplace_back(ShutdownCallback { callback, data });
287
353
}
288
289
4265
void PerIsolatePlatformData::Shutdown() {
290
4265
  if (flush_tasks_ == nullptr)
291
    return;
292
293
  // While there should be no V8 tasks in the queues at this point, it is
294
  // possible that Node.js-internal tasks from e.g. the inspector are still
295
  // lying around. We clear these queues and ignore the return value,
296
  // effectively deleting the tasks instead of running them.
297
4265
  foreground_delayed_tasks_.PopAll();
298
4265
  foreground_tasks_.PopAll();
299
4265
  scheduled_delayed_tasks_.clear();
300
301
  // Both destroying the scheduled_delayed_tasks_ lists and closing
302
  // flush_tasks_ handle add tasks to the event loop. We keep a count of all
303
  // non-closed handles, and when that reaches zero, we inform any shutdown
304
  // callbacks that the platform is done as far as this Isolate is concerned.
305
4265
  self_reference_ = shared_from_this();
306
8530
  uv_close(reinterpret_cast<uv_handle_t*>(flush_tasks_),
307
5067
           [](uv_handle_t* handle) {
308
    std::unique_ptr<uv_async_t> flush_tasks {
309
802
        reinterpret_cast<uv_async_t*>(handle) };
310
    PerIsolatePlatformData* platform_data =
311
401
        static_cast<PerIsolatePlatformData*>(flush_tasks->data);
312
401
    platform_data->DecreaseHandleCount();
313
401
    platform_data->self_reference_.reset();
314
9332
  });
315
4265
  flush_tasks_ = nullptr;
316
}
317
318
769
void PerIsolatePlatformData::DecreaseHandleCount() {
319
769
  CHECK_GE(uv_handle_count_, 1);
320
769
  if (--uv_handle_count_ == 0) {
321
754
    for (const auto& callback : shutdown_callbacks_)
322
353
      callback.cb(callback.data);
323
  }
324
769
}
325
326
4344
NodePlatform::NodePlatform(int thread_pool_size,
327
4344
                           v8::TracingController* tracing_controller) {
328
4344
  if (tracing_controller != nullptr) {
329
4339
    tracing_controller_ = tracing_controller;
330
  } else {
331
5
    tracing_controller_ = new v8::TracingController();
332
  }
333
  // TODO(addaleax): It's a bit icky that we use global state here, but we can't
334
  // really do anything about it unless V8 starts exposing a way to access the
335
  // current v8::Platform instance.
336
4344
  SetTracingController(tracing_controller_);
337
  DCHECK_EQ(GetTracingController(), tracing_controller_);
338
  worker_thread_task_runner_ =
339
4344
      std::make_shared<WorkerThreadsTaskRunner>(thread_pool_size);
340
4344
}
341
342
13011
NodePlatform::~NodePlatform() {
343
4337
  Shutdown();
344
8674
}
345
346
4734
void NodePlatform::RegisterIsolate(Isolate* isolate, uv_loop_t* loop) {
347
9468
  Mutex::ScopedLock lock(per_isolate_mutex_);
348
9468
  auto delegate = std::make_shared<PerIsolatePlatformData>(isolate, loop);
349
4734
  IsolatePlatformDelegate* ptr = delegate.get();
350
  auto insertion = per_isolate_.emplace(
351
    isolate,
352
4734
    std::make_pair(ptr, std::move(delegate)));
353
4734
  CHECK(insertion.second);
354
4734
}
355
356
1
void NodePlatform::RegisterIsolate(Isolate* isolate,
357
                                   IsolatePlatformDelegate* delegate) {
358
2
  Mutex::ScopedLock lock(per_isolate_mutex_);
359
  auto insertion = per_isolate_.emplace(
360
    isolate,
361
1
    std::make_pair(delegate, std::shared_ptr<PerIsolatePlatformData>{}));
362
1
  CHECK(insertion.second);
363
1
}
364
365
4265
void NodePlatform::UnregisterIsolate(Isolate* isolate) {
366
8530
  Mutex::ScopedLock lock(per_isolate_mutex_);
367
4265
  auto existing_it = per_isolate_.find(isolate);
368
4265
  CHECK_NE(existing_it, per_isolate_.end());
369
4265
  auto& existing = existing_it->second;
370
4265
  if (existing.second) {
371
4264
    existing.second->Shutdown();
372
  }
373
4265
  per_isolate_.erase(existing_it);
374
4265
}
375
376
351
void NodePlatform::AddIsolateFinishedCallback(Isolate* isolate,
377
                                              void (*cb)(void*), void* data) {
378
704
  Mutex::ScopedLock lock(per_isolate_mutex_);
379
353
  auto it = per_isolate_.find(isolate);
380
353
  if (it == per_isolate_.end()) {
381
    cb(data);
382
    return;
383
  }
384
353
  CHECK(it->second.second);
385
353
  it->second.second->AddShutdownCallback(cb, data);
386
}
387
388
8671
void NodePlatform::Shutdown() {
389
8671
  if (has_shut_down_) return;
390
4337
  has_shut_down_ = true;
391
4337
  worker_thread_task_runner_->Shutdown();
392
393
  {
394
8674
    Mutex::ScopedLock lock(per_isolate_mutex_);
395
4337
    per_isolate_.clear();
396
  }
397
}
398
399
4677
int NodePlatform::NumberOfWorkerThreads() {
400
4677
  return worker_thread_task_runner_->NumberOfWorkerThreads();
401
}
402
403
5687
void PerIsolatePlatformData::RunForegroundTask(std::unique_ptr<Task> task) {
404
5687
  DebugSealHandleScope scope(isolate_);
405
5687
  Environment* env = Environment::GetCurrent(isolate_);
406
5687
  if (env != nullptr) {
407
11301
    v8::HandleScope scope(isolate_);
408
5651
    InternalCallbackScope cb_scope(env, Object::New(isolate_), { 0, 0 },
409
16953
                                   InternalCallbackScope::kNoFlags);
410
5651
    task->Run();
411
  } else {
412
36
    task->Run();
413
  }
414
5686
}
415
416
21
void PerIsolatePlatformData::DeleteFromScheduledTasks(DelayedTask* task) {
417
  auto it = std::find_if(scheduled_delayed_tasks_.begin(),
418
                         scheduled_delayed_tasks_.end(),
419
42
                         [task](const DelayedTaskPointer& delayed) -> bool {
420
42
          return delayed.get() == task;
421
21
      });
422
21
  CHECK_NE(it, scheduled_delayed_tasks_.end());
423
21
  scheduled_delayed_tasks_.erase(it);
424
21
}
425
426
21
void PerIsolatePlatformData::RunForegroundTask(uv_timer_t* handle) {
427
21
  DelayedTask* delayed = ContainerOf(&DelayedTask::timer, handle);
428
21
  delayed->platform_data->RunForegroundTask(std::move(delayed->task));
429
21
  delayed->platform_data->DeleteFromScheduledTasks(delayed);
430
21
}
431
432
8433
void NodePlatform::DrainTasks(Isolate* isolate) {
433
16865
  std::shared_ptr<PerIsolatePlatformData> per_isolate = ForNodeIsolate(isolate);
434
8434
  if (!per_isolate) return;
435
436

10425
  do {
437
    // Worker tasks aren't associated with an Isolate.
438
10425
    worker_thread_task_runner_->BlockingDrain();
439
10425
  } while (per_isolate->FlushForegroundTasksInternal());
440
}
441
442
16790
bool PerIsolatePlatformData::FlushForegroundTasksInternal() {
443
16790
  bool did_work = false;
444
445
  while (std::unique_ptr<DelayedTask> delayed =
446
25562
      foreground_delayed_tasks_.Pop()) {
447
4386
    did_work = true;
448
4386
    uint64_t delay_millis = llround(delayed->timeout * 1000);
449
450
4386
    delayed->timer.data = static_cast<void*>(delayed.get());
451
4386
    uv_timer_init(loop_, &delayed->timer);
452
    // Timers may not guarantee queue ordering of events with the same delay if
453
    // the delay is non-zero. This should not be a problem in practice.
454
4386
    uv_timer_start(&delayed->timer, RunForegroundTask, delay_millis, 0);
455
4386
    uv_unref(reinterpret_cast<uv_handle_t*>(&delayed->timer));
456
4386
    uv_handle_count_++;
457
458
8772
    scheduled_delayed_tasks_.emplace_back(delayed.release(),
459
12846
                                          [](DelayedTask* delayed) {
460
8460
      uv_close(reinterpret_cast<uv_handle_t*>(&delayed->timer),
461
4966
               [](uv_handle_t* handle) {
462
        std::unique_ptr<DelayedTask> task {
463
736
            static_cast<DelayedTask*>(handle->data) };
464
368
        task->platform_data->DecreaseHandleCount();
465
9196
      });
466
17232
    });
467
4386
  }
468
  // Move all foreground tasks into a separate queue and flush that queue.
469
  // This way tasks that are posted while flushing the queue will be run on the
470
  // next call of FlushForegroundTasksInternal.
471
33580
  std::queue<std::unique_ptr<Task>> tasks = foreground_tasks_.PopAll();
472
28121
  while (!tasks.empty()) {
473
11331
    std::unique_ptr<Task> task = std::move(tasks.front());
474
5666
    tasks.pop();
475
5666
    did_work = true;
476
5666
    RunForegroundTask(std::move(task));
477
  }
478
33579
  return did_work;
479
}
480
481
92114
void NodePlatform::CallOnWorkerThread(std::unique_ptr<Task> task) {
482
92114
  worker_thread_task_runner_->PostTask(std::move(task));
483
92114
}
484
485
73
void NodePlatform::CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
486
                                             double delay_in_seconds) {
487
146
  worker_thread_task_runner_->PostDelayedTask(std::move(task),
488
73
                                              delay_in_seconds);
489
73
}
490
491
492
24694
IsolatePlatformDelegate* NodePlatform::ForIsolate(Isolate* isolate) {
493
49388
  Mutex::ScopedLock lock(per_isolate_mutex_);
494
49388
  auto data = per_isolate_[isolate];
495
24694
  CHECK_NOT_NULL(data.first);
496
49388
  return data.first;
497
}
498
499
std::shared_ptr<PerIsolatePlatformData>
500
8437
NodePlatform::ForNodeIsolate(Isolate* isolate) {
501
16875
  Mutex::ScopedLock lock(per_isolate_mutex_);
502
16876
  auto data = per_isolate_[isolate];
503
8438
  CHECK_NOT_NULL(data.first);
504
16876
  return data.second;
505
}
506
507
4
bool NodePlatform::FlushForegroundTasks(Isolate* isolate) {
508
8
  std::shared_ptr<PerIsolatePlatformData> per_isolate = ForNodeIsolate(isolate);
509
4
  if (!per_isolate) return false;
510
4
  return per_isolate->FlushForegroundTasksInternal();
511
}
512
513
bool NodePlatform::IdleTasksEnabled(Isolate* isolate) {
514
  return ForIsolate(isolate)->IdleTasksEnabled();
515
}
516
517
std::shared_ptr<v8::TaskRunner>
518
24694
NodePlatform::GetForegroundTaskRunner(Isolate* isolate) {
519
24694
  return ForIsolate(isolate)->GetForegroundTaskRunner();
520
}
521
522
1254937
double NodePlatform::MonotonicallyIncreasingTime() {
523
  // Convert nanos to seconds.
524
1254937
  return uv_hrtime() / 1e9;
525
}
526
527
23015521
double NodePlatform::CurrentClockTimeMillis() {
528
23015521
  return SystemClockTimeMillis();
529
}
530
531
270941
v8::TracingController* NodePlatform::GetTracingController() {
532
270941
  CHECK_NOT_NULL(tracing_controller_);
533
270941
  return tracing_controller_;
534
}
535
536
4344
Platform::StackTracePrinter NodePlatform::GetStackTracePrinter() {
537
4344
  return []() {
538
    fprintf(stderr, "\n");
539
    DumpBacktrace(stderr);
540
    fflush(stderr);
541
8688
  };
542
}
543
544
template <class T>
545
18158
TaskQueue<T>::TaskQueue()
546
    : lock_(), tasks_available_(), tasks_drained_(),
547
18158
      outstanding_tasks_(0), stopped_(false), task_queue_() { }
548
549
template <class T>
550
107054
void TaskQueue<T>::Push(std::unique_ptr<T> task) {
551
214108
  Mutex::ScopedLock scoped_lock(lock_);
552
107054
  outstanding_tasks_++;
553
107054
  task_queue_.push(std::move(task));
554
107054
  tasks_available_.Signal(scoped_lock);
555
107054
}
556
557
template <class T>
558
29994
std::unique_ptr<T> TaskQueue<T>::Pop() {
559
59989
  Mutex::ScopedLock scoped_lock(lock_);
560

29995
  if (task_queue_.empty()) {
561
21199
    return std::unique_ptr<T>(nullptr);
562
  }
563
17592
  std::unique_ptr<T> result = std::move(task_queue_.front());
564
8796
  task_queue_.pop();
565
8796
  return result;
566
}
567
568
template <class T>
569
109433
std::unique_ptr<T> TaskQueue<T>::BlockingPop() {
570
218904
  Mutex::ScopedLock scoped_lock(lock_);
571

314451
  while (task_queue_.empty() && !stopped_) {
572
102504
    tasks_available_.Wait(scoped_lock);
573
  }
574
109471
  if (stopped_) {
575
17354
    return std::unique_ptr<T>(nullptr);
576
  }
577
184234
  std::unique_ptr<T> result = std::move(task_queue_.front());
578
92117
  task_queue_.pop();
579
92117
  return result;
580
}
581
582
template <class T>
583
91739
void TaskQueue<T>::NotifyOfCompletion() {
584
183856
  Mutex::ScopedLock scoped_lock(lock_);
585
92117
  if (--outstanding_tasks_ == 0) {
586
53645
    tasks_drained_.Broadcast(scoped_lock);
587
  }
588
92083
}
589
590
template <class T>
591
10425
void TaskQueue<T>::BlockingDrain() {
592
20850
  Mutex::ScopedLock scoped_lock(lock_);
593
11251
  while (outstanding_tasks_ > 0) {
594
413
    tasks_drained_.Wait(scoped_lock);
595
  }
596
10425
}
597
598
template <class T>
599
4337
void TaskQueue<T>::Stop() {
600
8674
  Mutex::ScopedLock scoped_lock(lock_);
601
4337
  stopped_ = true;
602
4337
  tasks_available_.Broadcast(scoped_lock);
603
4337
}
604
605
template <class T>
606
25321
std::queue<std::unique_ptr<T>> TaskQueue<T>::PopAll() {
607
50640
  Mutex::ScopedLock scoped_lock(lock_);
608
25321
  std::queue<std::unique_ptr<T>> result;
609
25320
  result.swap(task_queue_);
610
50640
  return result;
611
}
612
613
}  // namespace node