GCC Code Coverage Report
Directory: ../ Exec Total Coverage
File: /home/iojs/build/workspace/node-test-commit-linux-coverage-daily/nodes/benchmark/out/../src/tracing/node_trace_buffer.cc Lines: 90 108 83.3 %
Date: 2019-02-23 22:23:05 Branches: 25 52 48.1 %

Line Branch Exec Source
1
#include "tracing/node_trace_buffer.h"
2
#include "util-inl.h"
3
4
namespace node {
5
namespace tracing {
6
7
144
InternalTraceBuffer::InternalTraceBuffer(size_t max_chunks, uint32_t id,
8
                                         Agent* agent)
9
    : flushing_(false), max_chunks_(max_chunks),
10
144
      agent_(agent), id_(id) {
11
144
  chunks_.resize(max_chunks);
12
144
}
13
14
2711
TraceObject* InternalTraceBuffer::AddTraceEvent(uint64_t* handle) {
15
2711
  Mutex::ScopedLock scoped_lock(mutex_);
16
  // Create new chunk if last chunk is full or there is no chunk.
17

2711
  if (total_chunks_ == 0 || chunks_[total_chunks_ - 1]->IsFull()) {
18
93
    auto& chunk = chunks_[total_chunks_++];
19
93
    if (chunk) {
20
      chunk->Reset(current_chunk_seq_++);
21
    } else {
22
93
      chunk.reset(new TraceBufferChunk(current_chunk_seq_++));
23
    }
24
  }
25
2711
  auto& chunk = chunks_[total_chunks_ - 1];
26
  size_t event_index;
27
2711
  TraceObject* trace_object = chunk->AddTraceEvent(&event_index);
28
2711
  *handle = MakeHandle(total_chunks_ - 1, chunk->seq(), event_index);
29
2711
  return trace_object;
30
}
31
32
162
TraceObject* InternalTraceBuffer::GetEventByHandle(uint64_t handle) {
33
162
  Mutex::ScopedLock scoped_lock(mutex_);
34
162
  if (handle == 0) {
35
    // A handle value of zero never has a trace event associated with it.
36
    return nullptr;
37
  }
38
  size_t chunk_index, event_index;
39
  uint32_t buffer_id, chunk_seq;
40
162
  ExtractHandle(handle, &buffer_id, &chunk_index, &chunk_seq, &event_index);
41

162
  if (buffer_id != id_ || chunk_index >= total_chunks_) {
42
    // Either the chunk belongs to the other buffer, or is outside the current
43
    // range of chunks loaded in memory (the latter being true suggests that
44
    // the chunk has already been flushed and is no longer in memory.)
45
    return nullptr;
46
  }
47
162
  auto& chunk = chunks_[chunk_index];
48
162
  if (chunk->seq() != chunk_seq) {
49
    // Chunk is no longer in memory.
50
    return nullptr;
51
  }
52
162
  return chunk->GetEventAt(event_index);
53
}
54
55
336
void InternalTraceBuffer::Flush(bool blocking) {
56
  {
57
336
    Mutex::ScopedLock scoped_lock(mutex_);
58
336
    if (total_chunks_ > 0) {
59
66
      flushing_ = true;
60
159
      for (size_t i = 0; i < total_chunks_; ++i) {
61
93
        auto& chunk = chunks_[i];
62
2804
        for (size_t j = 0; j < chunk->size(); ++j) {
63
2711
          TraceObject* trace_event = chunk->GetEventAt(j);
64
          // Another thread may have added a trace that is yet to be
65
          // initialized. Skip such traces.
66
          // https://github.com/nodejs/node/issues/21038.
67
2711
          if (trace_event->name()) {
68
2711
            agent_->AppendTraceEvent(trace_event);
69
          }
70
        }
71
      }
72
66
      total_chunks_ = 0;
73
66
      flushing_ = false;
74
336
    }
75
  }
76
336
  agent_->Flush(blocking);
77
336
}
78
79
2711
uint64_t InternalTraceBuffer::MakeHandle(
80
    size_t chunk_index, uint32_t chunk_seq, size_t event_index) const {
81
5422
  return ((static_cast<uint64_t>(chunk_seq) * Capacity() +
82
5422
          chunk_index * TraceBufferChunk::kChunkSize + event_index) << 1) + id_;
83
}
84
85
162
void InternalTraceBuffer::ExtractHandle(
86
    uint64_t handle, uint32_t* buffer_id, size_t* chunk_index,
87
    uint32_t* chunk_seq, size_t* event_index) const {
88
162
  *buffer_id = static_cast<uint32_t>(handle & 0x1);
89
162
  handle >>= 1;
90
162
  *chunk_seq = static_cast<uint32_t>(handle / Capacity());
91
162
  size_t indices = handle % Capacity();
92
162
  *chunk_index = indices / TraceBufferChunk::kChunkSize;
93
162
  *event_index = indices % TraceBufferChunk::kChunkSize;
94
162
}
95
96
72
NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks,
97
    Agent* agent, uv_loop_t* tracing_loop)
98
    : tracing_loop_(tracing_loop),
99
      buffer1_(max_chunks, 0, agent),
100
72
      buffer2_(max_chunks, 1, agent) {
101
72
  current_buf_.store(&buffer1_);
102
103
72
  flush_signal_.data = this;
104
  int err = uv_async_init(tracing_loop_, &flush_signal_,
105
72
                          NonBlockingFlushSignalCb);
106
72
  CHECK_EQ(err, 0);
107
108
72
  exit_signal_.data = this;
109
72
  err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb);
110
72
  CHECK_EQ(err, 0);
111
72
}
112
113
216
NodeTraceBuffer::~NodeTraceBuffer() {
114
72
  uv_async_send(&exit_signal_);
115
72
  Mutex::ScopedLock scoped_lock(exit_mutex_);
116
203
  while (!exited_) {
117
59
    exit_cond_.Wait(scoped_lock);
118
72
  }
119
144
}
120
121
2711
TraceObject* NodeTraceBuffer::AddTraceEvent(uint64_t* handle) {
122
  // If the buffer is full, attempt to perform a flush.
123
2711
  if (!TryLoadAvailableBuffer()) {
124
    // Assign a value of zero as the trace event handle.
125
    // This is equivalent to calling InternalTraceBuffer::MakeHandle(0, 0, 0),
126
    // and will cause GetEventByHandle to return NULL if passed as an argument.
127
    *handle = 0;
128
    return nullptr;
129
  }
130
2711
  return current_buf_.load()->AddTraceEvent(handle);
131
}
132
133
162
TraceObject* NodeTraceBuffer::GetEventByHandle(uint64_t handle) {
134
162
  return current_buf_.load()->GetEventByHandle(handle);
135
}
136
137
168
bool NodeTraceBuffer::Flush() {
138
168
  buffer1_.Flush(true);
139
168
  buffer2_.Flush(true);
140
168
  return true;
141
}
142
143
// Attempts to set current_buf_ such that it references a buffer that can
144
// write at least one trace event. If both buffers are unavailable this
145
// method returns false; otherwise it returns true.
146
2711
bool NodeTraceBuffer::TryLoadAvailableBuffer() {
147
2711
  InternalTraceBuffer* prev_buf = current_buf_.load();
148
2711
  if (prev_buf->IsFull()) {
149
    uv_async_send(&flush_signal_);  // trigger flush on a separate thread
150
    InternalTraceBuffer* other_buf = prev_buf == &buffer1_ ?
151
      &buffer2_ : &buffer1_;
152
    if (!other_buf->IsFull()) {
153
      current_buf_.store(other_buf);
154
    } else {
155
      return false;
156
    }
157
  }
158
2711
  return true;
159
}
160
161
// static
162
void NodeTraceBuffer::NonBlockingFlushSignalCb(uv_async_t* signal) {
163
  NodeTraceBuffer* buffer = static_cast<NodeTraceBuffer*>(signal->data);
164
  if (buffer->buffer1_.IsFull() && !buffer->buffer1_.IsFlushing()) {
165
    buffer->buffer1_.Flush(false);
166
  }
167
  if (buffer->buffer2_.IsFull() && !buffer->buffer2_.IsFlushing()) {
168
    buffer->buffer2_.Flush(false);
169
  }
170
}
171
172
// static
173
72
void NodeTraceBuffer::ExitSignalCb(uv_async_t* signal) {
174
  NodeTraceBuffer* buffer =
175
72
      ContainerOf(&NodeTraceBuffer::exit_signal_, signal);
176
177
  // Close both flush_signal_ and exit_signal_.
178
  uv_close(reinterpret_cast<uv_handle_t*>(&buffer->flush_signal_),
179
216
           [](uv_handle_t* signal) {
180
    NodeTraceBuffer* buffer =
181
        ContainerOf(&NodeTraceBuffer::flush_signal_,
182
72
                    reinterpret_cast<uv_async_t*>(signal));
183
184
    uv_close(reinterpret_cast<uv_handle_t*>(&buffer->exit_signal_),
185
216
             [](uv_handle_t* signal) {
186
      NodeTraceBuffer* buffer =
187
          ContainerOf(&NodeTraceBuffer::exit_signal_,
188
72
                      reinterpret_cast<uv_async_t*>(signal));
189
72
        Mutex::ScopedLock scoped_lock(buffer->exit_mutex_);
190
72
        buffer->exited_ = true;
191
72
        buffer->exit_cond_.Signal(scoped_lock);
192
288
    });
193
288
  });
194
72
}
195
196
}  // namespace tracing
197
}  // namespace node