GCC Code Coverage Report
Directory: ../ Exec Total Coverage
File: /home/iojs/build/workspace/node-test-commit-linux-coverage-daily/nodes/benchmark/out/../src/node_platform.cc Lines: 264 320 82.5 %
Date: 2019-02-01 22:03:38 Branches: 68 100 68.0 %

Line Branch Exec Source
1
#include "node_platform.h"
2
#include "node_internals.h"
3
4
#include "env-inl.h"
5
#include "debug_utils.h"
6
#include "util.h"
7
#include <algorithm>
8
9
namespace node {
10
11
using v8::HandleScope;
12
using v8::Isolate;
13
using v8::Local;
14
using v8::Object;
15
using v8::Platform;
16
using v8::Task;
17
using node::tracing::TracingController;
18
19
namespace {
20
21
struct PlatformWorkerData {
22
  TaskQueue<Task>* task_queue;
23
  Mutex* platform_workers_mutex;
24
  ConditionVariable* platform_workers_ready;
25
  int* pending_platform_workers;
26
  int id;
27
};
28
29
652
static void PlatformWorkerThread(void* data) {
30
  std::unique_ptr<PlatformWorkerData>
31
652
      worker_data(static_cast<PlatformWorkerData*>(data));
32
33
654
  TaskQueue<Task>* pending_worker_tasks = worker_data->task_queue;
34

653
  TRACE_EVENT_METADATA1("__metadata", "thread_name", "name",
35
                        "PlatformWorkerThread");
36
37
  // Notify the main thread that the platform worker is ready.
38
  {
39
652
    Mutex::ScopedLock lock(*worker_data->platform_workers_mutex);
40
656
    (*worker_data->pending_platform_workers)--;
41
656
    worker_data->platform_workers_ready->Signal(lock);
42
  }
43
44
21465
  while (std::unique_ptr<Task> task = pending_worker_tasks->BlockingPop()) {
45
20728
    task->Run();
46
20767
    pending_worker_tasks->NotifyOfCompletion();
47
20809
  }
48
656
}
49
50
}  // namespace
51
52
164
class WorkerThreadsTaskRunner::DelayedTaskScheduler {
53
 public:
54
164
  explicit DelayedTaskScheduler(TaskQueue<Task>* tasks)
55
164
    : pending_worker_tasks_(tasks) {}
56
57
164
  std::unique_ptr<uv_thread_t> Start() {
58
492
    auto start_thread = [](void* data) {
59
164
      static_cast<DelayedTaskScheduler*>(data)->Run();
60
492
    };
61
164
    std::unique_ptr<uv_thread_t> t { new uv_thread_t() };
62
164
    uv_sem_init(&ready_, 0);
63
164
    CHECK_EQ(0, uv_thread_create(t.get(), start_thread, this));
64
164
    uv_sem_wait(&ready_);
65
164
    uv_sem_destroy(&ready_);
66
164
    return t;
67
  }
68
69
  void PostDelayedTask(std::unique_ptr<Task> task, double delay_in_seconds) {
70
    tasks_.Push(std::unique_ptr<Task>(new ScheduleTask(this, std::move(task),
71
                                                       delay_in_seconds)));
72
    uv_async_send(&flush_tasks_);
73
  }
74
75
164
  void Stop() {
76
164
    tasks_.Push(std::unique_ptr<Task>(new StopTask(this)));
77
164
    uv_async_send(&flush_tasks_);
78
164
  }
79
80
 private:
81
164
  void Run() {
82

164
    TRACE_EVENT_METADATA1("__metadata", "thread_name", "name",
83
                          "WorkerThreadsTaskRunner::DelayedTaskScheduler");
84
164
    loop_.data = this;
85
164
    CHECK_EQ(0, uv_loop_init(&loop_));
86
164
    flush_tasks_.data = this;
87
164
    CHECK_EQ(0, uv_async_init(&loop_, &flush_tasks_, FlushTasks));
88
164
    uv_sem_post(&ready_);
89
90
164
    uv_run(&loop_, UV_RUN_DEFAULT);
91
164
    CheckedUvLoopClose(&loop_);
92
164
  }
93
94
164
  static void FlushTasks(uv_async_t* flush_tasks) {
95
    DelayedTaskScheduler* scheduler =
96
164
        ContainerOf(&DelayedTaskScheduler::loop_, flush_tasks->loop);
97
328
    while (std::unique_ptr<Task> task = scheduler->tasks_.Pop())
98
164
      task->Run();
99
164
  }
100
101
328
  class StopTask : public Task {
102
   public:
103
164
    explicit StopTask(DelayedTaskScheduler* scheduler): scheduler_(scheduler) {}
104
105
164
    void Run() override {
106
164
      std::vector<uv_timer_t*> timers;
107
164
      for (uv_timer_t* timer : scheduler_->timers_)
108
        timers.push_back(timer);
109
164
      for (uv_timer_t* timer : timers)
110
        scheduler_->TakeTimerTask(timer);
111
      uv_close(reinterpret_cast<uv_handle_t*>(&scheduler_->flush_tasks_),
112
656
               [](uv_handle_t* handle) {});
113
164
    }
114
115
   private:
116
     DelayedTaskScheduler* scheduler_;
117
  };
118
119
  class ScheduleTask : public Task {
120
   public:
121
    ScheduleTask(DelayedTaskScheduler* scheduler,
122
                 std::unique_ptr<Task> task,
123
                 double delay_in_seconds)
124
      : scheduler_(scheduler),
125
        task_(std::move(task)),
126
        delay_in_seconds_(delay_in_seconds) {}
127
128
    void Run() override {
129
      uint64_t delay_millis =
130
          static_cast<uint64_t>(delay_in_seconds_ + 0.5) * 1000;
131
      std::unique_ptr<uv_timer_t> timer(new uv_timer_t());
132
      CHECK_EQ(0, uv_timer_init(&scheduler_->loop_, timer.get()));
133
      timer->data = task_.release();
134
      CHECK_EQ(0, uv_timer_start(timer.get(), RunTask, delay_millis, 0));
135
      scheduler_->timers_.insert(timer.release());
136
    }
137
138
   private:
139
    DelayedTaskScheduler* scheduler_;
140
    std::unique_ptr<Task> task_;
141
    double delay_in_seconds_;
142
  };
143
144
  static void RunTask(uv_timer_t* timer) {
145
    DelayedTaskScheduler* scheduler =
146
        ContainerOf(&DelayedTaskScheduler::loop_, timer->loop);
147
    scheduler->pending_worker_tasks_->Push(scheduler->TakeTimerTask(timer));
148
  }
149
150
  std::unique_ptr<Task> TakeTimerTask(uv_timer_t* timer) {
151
    std::unique_ptr<Task> task(static_cast<Task*>(timer->data));
152
    uv_timer_stop(timer);
153
    uv_close(reinterpret_cast<uv_handle_t*>(timer), [](uv_handle_t* handle) {
154
      delete reinterpret_cast<uv_timer_t*>(handle);
155
    });
156
    timers_.erase(timer);
157
    return task;
158
  }
159
160
  uv_sem_t ready_;
161
  TaskQueue<v8::Task>* pending_worker_tasks_;
162
163
  TaskQueue<v8::Task> tasks_;
164
  uv_loop_t loop_;
165
  uv_async_t flush_tasks_;
166
  std::unordered_set<uv_timer_t*> timers_;
167
};
168
169
164
WorkerThreadsTaskRunner::WorkerThreadsTaskRunner(int thread_pool_size) {
170
164
  Mutex platform_workers_mutex;
171
328
  ConditionVariable platform_workers_ready;
172
173
328
  Mutex::ScopedLock lock(platform_workers_mutex);
174
164
  int pending_platform_workers = thread_pool_size;
175
176
  delayed_task_scheduler_.reset(
177
164
      new DelayedTaskScheduler(&pending_worker_tasks_));
178
164
  threads_.push_back(delayed_task_scheduler_->Start());
179
180
820
  for (int i = 0; i < thread_pool_size; i++) {
181
    PlatformWorkerData* worker_data = new PlatformWorkerData{
182
      &pending_worker_tasks_, &platform_workers_mutex,
183
      &platform_workers_ready, &pending_platform_workers, i
184
656
    };
185
656
    std::unique_ptr<uv_thread_t> t { new uv_thread_t() };
186
656
    if (uv_thread_create(t.get(), PlatformWorkerThread,
187
656
                         worker_data) != 0) {
188
      break;
189
    }
190
656
    threads_.push_back(std::move(t));
191
656
  }
192
193
  // Wait for platform workers to initialize before continuing with the
194
  // bootstrap.
195
882
  while (pending_platform_workers > 0) {
196
554
    platform_workers_ready.Wait(lock);
197
164
  }
198
164
}
199
200
20810
void WorkerThreadsTaskRunner::PostTask(std::unique_ptr<Task> task) {
201
20810
  pending_worker_tasks_.Push(std::move(task));
202
20810
}
203
204
void WorkerThreadsTaskRunner::PostDelayedTask(std::unique_ptr<v8::Task> task,
205
                                              double delay_in_seconds) {
206
  delayed_task_scheduler_->PostDelayedTask(std::move(task), delay_in_seconds);
207
}
208
209
370
void WorkerThreadsTaskRunner::BlockingDrain() {
210
370
  pending_worker_tasks_.BlockingDrain();
211
370
}
212
213
164
void WorkerThreadsTaskRunner::Shutdown() {
214
164
  pending_worker_tasks_.Stop();
215
164
  delayed_task_scheduler_->Stop();
216
984
  for (size_t i = 0; i < threads_.size(); i++) {
217
820
    CHECK_EQ(0, uv_thread_join(threads_[i].get()));
218
  }
219
164
}
220
221
198
int WorkerThreadsTaskRunner::NumberOfWorkerThreads() const {
222
198
  return threads_.size();
223
}
224
225
164
PerIsolatePlatformData::PerIsolatePlatformData(
226
    v8::Isolate* isolate, uv_loop_t* loop)
227
164
  : loop_(loop) {
228
164
  flush_tasks_ = new uv_async_t();
229
164
  CHECK_EQ(0, uv_async_init(loop, flush_tasks_, FlushTasks));
230
164
  flush_tasks_->data = static_cast<void*>(this);
231
164
  uv_unref(reinterpret_cast<uv_handle_t*>(flush_tasks_));
232
164
}
233
234
371
void PerIsolatePlatformData::FlushTasks(uv_async_t* handle) {
235
371
  auto platform_data = static_cast<PerIsolatePlatformData*>(handle->data);
236
371
  platform_data->FlushForegroundTasksInternal();
237
371
}
238
239
void PerIsolatePlatformData::PostIdleTask(std::unique_ptr<v8::IdleTask> task) {
240
  UNREACHABLE();
241
}
242
243
452
void PerIsolatePlatformData::PostTask(std::unique_ptr<Task> task) {
244
452
  CHECK_NE(flush_tasks_, nullptr);
245
452
  foreground_tasks_.Push(std::move(task));
246
452
  uv_async_send(flush_tasks_);
247
452
}
248
249
10
void PerIsolatePlatformData::PostDelayedTask(
250
    std::unique_ptr<Task> task, double delay_in_seconds) {
251
10
  CHECK_NE(flush_tasks_, nullptr);
252
10
  std::unique_ptr<DelayedTask> delayed(new DelayedTask());
253
10
  delayed->task = std::move(task);
254
10
  delayed->platform_data = shared_from_this();
255
10
  delayed->timeout = delay_in_seconds;
256
10
  foreground_delayed_tasks_.Push(std::move(delayed));
257
10
  uv_async_send(flush_tasks_);
258
10
}
259
260
306
PerIsolatePlatformData::~PerIsolatePlatformData() {
261
153
  Shutdown();
262
153
}
263
264
307
void PerIsolatePlatformData::Shutdown() {
265
307
  if (flush_tasks_ == nullptr)
266
460
    return;
267
268
154
  CHECK_NULL(foreground_delayed_tasks_.Pop());
269
154
  CHECK_NULL(foreground_tasks_.Pop());
270
154
  CancelPendingDelayedTasks();
271
272
  uv_close(reinterpret_cast<uv_handle_t*>(flush_tasks_),
273
154
           [](uv_handle_t* handle) {
274
    delete reinterpret_cast<uv_async_t*>(handle);
275
308
  });
276
154
  flush_tasks_ = nullptr;
277
}
278
279
164
void PerIsolatePlatformData::ref() {
280
164
  ref_count_++;
281
164
}
282
283
308
int PerIsolatePlatformData::unref() {
284
308
  return --ref_count_;
285
}
286
287
164
NodePlatform::NodePlatform(int thread_pool_size,
288
164
                           TracingController* tracing_controller) {
289
164
  if (tracing_controller) {
290
164
    tracing_controller_ = tracing_controller;
291
  } else {
292
    tracing_controller_ = new TracingController();
293
  }
294
328
  worker_thread_task_runner_ =
295
164
      std::make_shared<WorkerThreadsTaskRunner>(thread_pool_size);
296
164
}
297
298
328
void NodePlatform::RegisterIsolate(Isolate* isolate, uv_loop_t* loop) {
299
328
  Mutex::ScopedLock lock(per_isolate_mutex_);
300
656
  std::shared_ptr<PerIsolatePlatformData> existing = per_isolate_[isolate];
301
328
  if (existing) {
302
164
    CHECK_EQ(loop, existing->event_loop());
303
164
    existing->ref();
304
  } else {
305
328
    per_isolate_[isolate] =
306
164
        std::make_shared<PerIsolatePlatformData>(isolate, loop);
307
328
  }
308
328
}
309
310
308
void NodePlatform::UnregisterIsolate(Isolate* isolate) {
311
308
  Mutex::ScopedLock lock(per_isolate_mutex_);
312
616
  std::shared_ptr<PerIsolatePlatformData> existing = per_isolate_[isolate];
313
308
  CHECK(existing);
314
308
  if (existing->unref() == 0) {
315
154
    existing->Shutdown();
316
154
    per_isolate_.erase(isolate);
317
308
  }
318
308
}
319
320
164
void NodePlatform::Shutdown() {
321
164
  worker_thread_task_runner_->Shutdown();
322
323
  {
324
164
    Mutex::ScopedLock lock(per_isolate_mutex_);
325
164
    per_isolate_.clear();
326
  }
327
164
}
328
329
198
int NodePlatform::NumberOfWorkerThreads() {
330
198
  return worker_thread_task_runner_->NumberOfWorkerThreads();
331
}
332
333
442
void PerIsolatePlatformData::RunForegroundTask(std::unique_ptr<Task> task) {
334
442
  Isolate* isolate = Isolate::GetCurrent();
335
442
  HandleScope scope(isolate);
336
442
  Environment* env = Environment::GetCurrent(isolate);
337
  InternalCallbackScope cb_scope(env, Local<Object>(), { 0, 0 },
338
1326
                                 InternalCallbackScope::kAllowEmptyResource);
339
884
  task->Run();
340
442
}
341
342
void PerIsolatePlatformData::DeleteFromScheduledTasks(DelayedTask* task) {
343
  auto it = std::find_if(scheduled_delayed_tasks_.begin(),
344
                         scheduled_delayed_tasks_.end(),
345
                         [task](const DelayedTaskPointer& delayed) -> bool {
346
          return delayed.get() == task;
347
      });
348
  CHECK_NE(it, scheduled_delayed_tasks_.end());
349
  scheduled_delayed_tasks_.erase(it);
350
}
351
352
void PerIsolatePlatformData::RunForegroundTask(uv_timer_t* handle) {
353
  DelayedTask* delayed = static_cast<DelayedTask*>(handle->data);
354
  RunForegroundTask(std::move(delayed->task));
355
  delayed->platform_data->DeleteFromScheduledTasks(delayed);
356
}
357
358
308
void PerIsolatePlatformData::CancelPendingDelayedTasks() {
359
308
  scheduled_delayed_tasks_.clear();
360
308
}
361
362
308
void NodePlatform::DrainTasks(Isolate* isolate) {
363
308
  std::shared_ptr<PerIsolatePlatformData> per_isolate = ForIsolate(isolate);
364
365
370
  do {
366
    // Worker tasks aren't associated with an Isolate.
367
370
    worker_thread_task_runner_->BlockingDrain();
368
678
  } while (per_isolate->FlushForegroundTasksInternal());
369
308
}
370
371
741
bool PerIsolatePlatformData::FlushForegroundTasksInternal() {
372
741
  bool did_work = false;
373
374
  while (std::unique_ptr<DelayedTask> delayed =
375
751
      foreground_delayed_tasks_.Pop()) {
376
10
    did_work = true;
377
    uint64_t delay_millis =
378
10
        static_cast<uint64_t>(delayed->timeout + 0.5) * 1000;
379
10
    delayed->timer.data = static_cast<void*>(delayed.get());
380
10
    uv_timer_init(loop_, &delayed->timer);
381
    // Timers may not guarantee queue ordering of events with the same delay if
382
    // the delay is non-zero. This should not be a problem in practice.
383
10
    uv_timer_start(&delayed->timer, RunForegroundTask, delay_millis, 0);
384
10
    uv_unref(reinterpret_cast<uv_handle_t*>(&delayed->timer));
385
386
10
    scheduled_delayed_tasks_.emplace_back(delayed.release(),
387
1
                                          [](DelayedTask* delayed) {
388
      uv_close(reinterpret_cast<uv_handle_t*>(&delayed->timer),
389
1
               [](uv_handle_t* handle) {
390
        delete static_cast<DelayedTask*>(handle->data);
391
2
      });
392
11
    });
393
10
  }
394
  // Move all foreground tasks into a separate queue and flush that queue.
395
  // This way tasks that are posted while flushing the queue will be run on the
396
  // next call of FlushForegroundTasksInternal.
397
741
  std::queue<std::unique_ptr<Task>> tasks = foreground_tasks_.PopAll();
398
1924
  while (!tasks.empty()) {
399
442
    std::unique_ptr<Task> task = std::move(tasks.front());
400
442
    tasks.pop();
401
442
    did_work = true;
402
442
    RunForegroundTask(std::move(task));
403
442
  }
404
751
  return did_work;
405
}
406
407
20810
void NodePlatform::CallOnWorkerThread(std::unique_ptr<v8::Task> task) {
408
20810
  worker_thread_task_runner_->PostTask(std::move(task));
409
20810
}
410
411
void NodePlatform::CallDelayedOnWorkerThread(std::unique_ptr<v8::Task> task,
412
                                             double delay_in_seconds) {
413
  worker_thread_task_runner_->PostDelayedTask(std::move(task),
414
                                              delay_in_seconds);
415
}
416
417
418
std::shared_ptr<PerIsolatePlatformData>
419
1242
NodePlatform::ForIsolate(Isolate* isolate) {
420
1242
  Mutex::ScopedLock lock(per_isolate_mutex_);
421
1242
  std::shared_ptr<PerIsolatePlatformData> data = per_isolate_[isolate];
422
1242
  CHECK(data);
423
1242
  return data;
424
}
425
426
void NodePlatform::CallOnForegroundThread(Isolate* isolate, Task* task) {
427
  ForIsolate(isolate)->PostTask(std::unique_ptr<Task>(task));
428
}
429
430
void NodePlatform::CallDelayedOnForegroundThread(Isolate* isolate,
431
                                                 Task* task,
432
                                                 double delay_in_seconds) {
433
  ForIsolate(isolate)->PostDelayedTask(
434
    std::unique_ptr<Task>(task), delay_in_seconds);
435
}
436
437
bool NodePlatform::FlushForegroundTasks(v8::Isolate* isolate) {
438
  return ForIsolate(isolate)->FlushForegroundTasksInternal();
439
}
440
441
154
void NodePlatform::CancelPendingDelayedTasks(v8::Isolate* isolate) {
442
154
  ForIsolate(isolate)->CancelPendingDelayedTasks();
443
154
}
444
445
1434
bool NodePlatform::IdleTasksEnabled(Isolate* isolate) { return false; }
446
447
std::shared_ptr<v8::TaskRunner>
448
780
NodePlatform::GetForegroundTaskRunner(Isolate* isolate) {
449
780
  return ForIsolate(isolate);
450
}
451
452
182096
double NodePlatform::MonotonicallyIncreasingTime() {
453
  // Convert nanos to seconds.
454
182096
  return uv_hrtime() / 1e9;
455
}
456
457
6876847
double NodePlatform::CurrentClockTimeMillis() {
458
6876847
  return SystemClockTimeMillis();
459
}
460
461
9133
TracingController* NodePlatform::GetTracingController() {
462
9133
  return tracing_controller_;
463
}
464
465
template <class T>
466
656
TaskQueue<T>::TaskQueue()
467
    : lock_(), tasks_available_(), tasks_drained_(),
468
656
      outstanding_tasks_(0), stopped_(false), task_queue_() { }
469
470
template <class T>
471
21436
void TaskQueue<T>::Push(std::unique_ptr<T> task) {
472
21436
  Mutex::ScopedLock scoped_lock(lock_);
473
21436
  outstanding_tasks_++;
474
21436
  task_queue_.push(std::move(task));
475
21436
  tasks_available_.Signal(scoped_lock);
476
21436
}
477
478
template <class T>
479
1387
std::unique_ptr<T> TaskQueue<T>::Pop() {
480
1387
  Mutex::ScopedLock scoped_lock(lock_);
481

1387
  if (task_queue_.empty()) {
482
1213
    return std::unique_ptr<T>(nullptr);
483
  }
484
348
  std::unique_ptr<T> result = std::move(task_queue_.front());
485
174
  task_queue_.pop();
486
1561
  return result;
487
}
488
489
template <class T>
490
21465
std::unique_ptr<T> TaskQueue<T>::BlockingPop() {
491
21465
  Mutex::ScopedLock scoped_lock(lock_);
492

59896
  while (task_queue_.empty() && !stopped_) {
493
16964
    tasks_available_.Wait(scoped_lock);
494
  }
495
21466
  if (stopped_) {
496
656
    return std::unique_ptr<T>(nullptr);
497
  }
498
41620
  std::unique_ptr<T> result = std::move(task_queue_.front());
499
20810
  task_queue_.pop();
500
42276
  return result;
501
}
502
503
template <class T>
504
20774
void TaskQueue<T>::NotifyOfCompletion() {
505
20774
  Mutex::ScopedLock scoped_lock(lock_);
506
20810
  if (--outstanding_tasks_ == 0) {
507
7310
    tasks_drained_.Broadcast(scoped_lock);
508
20810
  }
509
20794
}
510
511
template <class T>
512
370
void TaskQueue<T>::BlockingDrain() {
513
370
  Mutex::ScopedLock scoped_lock(lock_);
514
746
  while (outstanding_tasks_ > 0) {
515
6
    tasks_drained_.Wait(scoped_lock);
516
370
  }
517
370
}
518
519
template <class T>
520
164
void TaskQueue<T>::Stop() {
521
164
  Mutex::ScopedLock scoped_lock(lock_);
522
164
  stopped_ = true;
523
164
  tasks_available_.Broadcast(scoped_lock);
524
164
}
525
526
template <class T>
527
741
std::queue<std::unique_ptr<T>> TaskQueue<T>::PopAll() {
528
741
  Mutex::ScopedLock scoped_lock(lock_);
529
741
  std::queue<std::unique_ptr<T>> result;
530
741
  result.swap(task_queue_);
531
741
  return result;
532
}
533
534
}  // namespace node