GCC Code Coverage Report
Directory: ../ Exec Total Coverage
File: /home/iojs/build/workspace/node-test-commit-linux-coverage/nodes/benchmark/out/../src/tracing/node_trace_buffer.cc Lines: 88 106 83.0 %
Date: 2019-01-07 12:15:22 Branches: 25 52 48.1 %

Line Branch Exec Source
1
#include "tracing/node_trace_buffer.h"
2
3
namespace node {
4
namespace tracing {
5
6
134
InternalTraceBuffer::InternalTraceBuffer(size_t max_chunks, uint32_t id,
7
                                         Agent* agent)
8
    : flushing_(false), max_chunks_(max_chunks),
9
134
      agent_(agent), id_(id) {
10
134
  chunks_.resize(max_chunks);
11
134
}
12
13
2278
TraceObject* InternalTraceBuffer::AddTraceEvent(uint64_t* handle) {
14
2278
  Mutex::ScopedLock scoped_lock(mutex_);
15
  // Create new chunk if last chunk is full or there is no chunk.
16

2278
  if (total_chunks_ == 0 || chunks_[total_chunks_ - 1]->IsFull()) {
17
85
    auto& chunk = chunks_[total_chunks_++];
18
85
    if (chunk) {
19
      chunk->Reset(current_chunk_seq_++);
20
    } else {
21
85
      chunk.reset(new TraceBufferChunk(current_chunk_seq_++));
22
    }
23
  }
24
2278
  auto& chunk = chunks_[total_chunks_ - 1];
25
  size_t event_index;
26
2278
  TraceObject* trace_object = chunk->AddTraceEvent(&event_index);
27
2278
  *handle = MakeHandle(total_chunks_ - 1, chunk->seq(), event_index);
28
2278
  return trace_object;
29
}
30
31
108
TraceObject* InternalTraceBuffer::GetEventByHandle(uint64_t handle) {
32
108
  Mutex::ScopedLock scoped_lock(mutex_);
33
108
  if (handle == 0) {
34
    // A handle value of zero never has a trace event associated with it.
35
    return nullptr;
36
  }
37
  size_t chunk_index, event_index;
38
  uint32_t buffer_id, chunk_seq;
39
108
  ExtractHandle(handle, &buffer_id, &chunk_index, &chunk_seq, &event_index);
40

108
  if (buffer_id != id_ || chunk_index >= total_chunks_) {
41
    // Either the chunk belongs to the other buffer, or is outside the current
42
    // range of chunks loaded in memory (the latter being true suggests that
43
    // the chunk has already been flushed and is no longer in memory.)
44
    return nullptr;
45
  }
46
108
  auto& chunk = chunks_[chunk_index];
47
108
  if (chunk->seq() != chunk_seq) {
48
    // Chunk is no longer in memory.
49
    return nullptr;
50
  }
51
108
  return chunk->GetEventAt(event_index);
52
}
53
54
274
void InternalTraceBuffer::Flush(bool blocking) {
55
  {
56
274
    Mutex::ScopedLock scoped_lock(mutex_);
57
274
    if (total_chunks_ > 0) {
58
62
      flushing_ = true;
59
147
      for (size_t i = 0; i < total_chunks_; ++i) {
60
85
        auto& chunk = chunks_[i];
61
2363
        for (size_t j = 0; j < chunk->size(); ++j) {
62
2278
          TraceObject* trace_event = chunk->GetEventAt(j);
63
          // Another thread may have added a trace that is yet to be
64
          // initialized. Skip such traces.
65
          // https://github.com/nodejs/node/issues/21038.
66
2278
          if (trace_event->name()) {
67
2278
            agent_->AppendTraceEvent(trace_event);
68
          }
69
        }
70
      }
71
62
      total_chunks_ = 0;
72
62
      flushing_ = false;
73
274
    }
74
  }
75
274
  agent_->Flush(blocking);
76
274
}
77
78
2278
uint64_t InternalTraceBuffer::MakeHandle(
79
    size_t chunk_index, uint32_t chunk_seq, size_t event_index) const {
80
4556
  return ((static_cast<uint64_t>(chunk_seq) * Capacity() +
81
4556
          chunk_index * TraceBufferChunk::kChunkSize + event_index) << 1) + id_;
82
}
83
84
108
void InternalTraceBuffer::ExtractHandle(
85
    uint64_t handle, uint32_t* buffer_id, size_t* chunk_index,
86
    uint32_t* chunk_seq, size_t* event_index) const {
87
108
  *buffer_id = static_cast<uint32_t>(handle & 0x1);
88
108
  handle >>= 1;
89
108
  *chunk_seq = static_cast<uint32_t>(handle / Capacity());
90
108
  size_t indices = handle % Capacity();
91
108
  *chunk_index = indices / TraceBufferChunk::kChunkSize;
92
108
  *event_index = indices % TraceBufferChunk::kChunkSize;
93
108
}
94
95
67
NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks,
96
    Agent* agent, uv_loop_t* tracing_loop)
97
    : tracing_loop_(tracing_loop),
98
      buffer1_(max_chunks, 0, agent),
99
67
      buffer2_(max_chunks, 1, agent) {
100
67
  current_buf_.store(&buffer1_);
101
102
67
  flush_signal_.data = this;
103
  int err = uv_async_init(tracing_loop_, &flush_signal_,
104
67
                          NonBlockingFlushSignalCb);
105
67
  CHECK_EQ(err, 0);
106
107
67
  exit_signal_.data = this;
108
67
  err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb);
109
67
  CHECK_EQ(err, 0);
110
67
}
111
112
201
NodeTraceBuffer::~NodeTraceBuffer() {
113
67
  uv_async_send(&exit_signal_);
114
67
  Mutex::ScopedLock scoped_lock(exit_mutex_);
115
186
  while (!exited_) {
116
52
    exit_cond_.Wait(scoped_lock);
117
67
  }
118
134
}
119
120
2278
TraceObject* NodeTraceBuffer::AddTraceEvent(uint64_t* handle) {
121
  // If the buffer is full, attempt to perform a flush.
122
2278
  if (!TryLoadAvailableBuffer()) {
123
    // Assign a value of zero as the trace event handle.
124
    // This is equivalent to calling InternalTraceBuffer::MakeHandle(0, 0, 0),
125
    // and will cause GetEventByHandle to return NULL if passed as an argument.
126
    *handle = 0;
127
    return nullptr;
128
  }
129
2278
  return current_buf_.load()->AddTraceEvent(handle);
130
}
131
132
108
TraceObject* NodeTraceBuffer::GetEventByHandle(uint64_t handle) {
133
108
  return current_buf_.load()->GetEventByHandle(handle);
134
}
135
136
137
bool NodeTraceBuffer::Flush() {
137
137
  buffer1_.Flush(true);
138
137
  buffer2_.Flush(true);
139
137
  return true;
140
}
141
142
// Attempts to set current_buf_ such that it references a buffer that can
143
// write at least one trace event. If both buffers are unavailable this
144
// method returns false; otherwise it returns true.
145
2278
bool NodeTraceBuffer::TryLoadAvailableBuffer() {
146
2278
  InternalTraceBuffer* prev_buf = current_buf_.load();
147
2278
  if (prev_buf->IsFull()) {
148
    uv_async_send(&flush_signal_);  // trigger flush on a separate thread
149
    InternalTraceBuffer* other_buf = prev_buf == &buffer1_ ?
150
      &buffer2_ : &buffer1_;
151
    if (!other_buf->IsFull()) {
152
      current_buf_.store(other_buf);
153
    } else {
154
      return false;
155
    }
156
  }
157
2278
  return true;
158
}
159
160
// static
161
void NodeTraceBuffer::NonBlockingFlushSignalCb(uv_async_t* signal) {
162
  NodeTraceBuffer* buffer = reinterpret_cast<NodeTraceBuffer*>(signal->data);
163
  if (buffer->buffer1_.IsFull() && !buffer->buffer1_.IsFlushing()) {
164
    buffer->buffer1_.Flush(false);
165
  }
166
  if (buffer->buffer2_.IsFull() && !buffer->buffer2_.IsFlushing()) {
167
    buffer->buffer2_.Flush(false);
168
  }
169
}
170
171
// static
172
67
void NodeTraceBuffer::ExitSignalCb(uv_async_t* signal) {
173
67
  NodeTraceBuffer* buffer = reinterpret_cast<NodeTraceBuffer*>(signal->data);
174
67
  uv_close(reinterpret_cast<uv_handle_t*>(&buffer->flush_signal_), nullptr);
175
  uv_close(reinterpret_cast<uv_handle_t*>(&buffer->exit_signal_),
176
201
           [](uv_handle_t* signal) {
177
      NodeTraceBuffer* buffer =
178
67
          reinterpret_cast<NodeTraceBuffer*>(signal->data);
179
67
      Mutex::ScopedLock scoped_lock(buffer->exit_mutex_);
180
67
      buffer->exited_ = true;
181
67
      buffer->exit_cond_.Signal(scoped_lock);
182
268
  });
183
67
}
184
185
}  // namespace tracing
186
}  // namespace node