GCC Code Coverage Report
Directory: ../ Exec Total Coverage
File: /home/iojs/build/workspace/node-test-commit-linux-coverage-daily/nodes/benchmark/out/../src/tracing/node_trace_buffer.cc Lines: 0 98 0.0 %
Date: 2019-02-01 22:03:38 Branches: 0 52 0.0 %

Line Branch Exec Source
1
#include "tracing/node_trace_buffer.h"
2
3
namespace node {
4
namespace tracing {
5
6
InternalTraceBuffer::InternalTraceBuffer(size_t max_chunks, uint32_t id,
7
                                         Agent* agent)
8
    : flushing_(false), max_chunks_(max_chunks),
9
      agent_(agent), id_(id) {
10
  chunks_.resize(max_chunks);
11
}
12
13
TraceObject* InternalTraceBuffer::AddTraceEvent(uint64_t* handle) {
14
  Mutex::ScopedLock scoped_lock(mutex_);
15
  // Create new chunk if last chunk is full or there is no chunk.
16
  if (total_chunks_ == 0 || chunks_[total_chunks_ - 1]->IsFull()) {
17
    auto& chunk = chunks_[total_chunks_++];
18
    if (chunk) {
19
      chunk->Reset(current_chunk_seq_++);
20
    } else {
21
      chunk.reset(new TraceBufferChunk(current_chunk_seq_++));
22
    }
23
  }
24
  auto& chunk = chunks_[total_chunks_ - 1];
25
  size_t event_index;
26
  TraceObject* trace_object = chunk->AddTraceEvent(&event_index);
27
  *handle = MakeHandle(total_chunks_ - 1, chunk->seq(), event_index);
28
  return trace_object;
29
}
30
31
TraceObject* InternalTraceBuffer::GetEventByHandle(uint64_t handle) {
32
  Mutex::ScopedLock scoped_lock(mutex_);
33
  if (handle == 0) {
34
    // A handle value of zero never has a trace event associated with it.
35
    return nullptr;
36
  }
37
  size_t chunk_index, event_index;
38
  uint32_t buffer_id, chunk_seq;
39
  ExtractHandle(handle, &buffer_id, &chunk_index, &chunk_seq, &event_index);
40
  if (buffer_id != id_ || chunk_index >= total_chunks_) {
41
    // Either the chunk belongs to the other buffer, or is outside the current
42
    // range of chunks loaded in memory (the latter being true suggests that
43
    // the chunk has already been flushed and is no longer in memory.)
44
    return nullptr;
45
  }
46
  auto& chunk = chunks_[chunk_index];
47
  if (chunk->seq() != chunk_seq) {
48
    // Chunk is no longer in memory.
49
    return nullptr;
50
  }
51
  return chunk->GetEventAt(event_index);
52
}
53
54
void InternalTraceBuffer::Flush(bool blocking) {
55
  {
56
    Mutex::ScopedLock scoped_lock(mutex_);
57
    if (total_chunks_ > 0) {
58
      flushing_ = true;
59
      for (size_t i = 0; i < total_chunks_; ++i) {
60
        auto& chunk = chunks_[i];
61
        for (size_t j = 0; j < chunk->size(); ++j) {
62
          TraceObject* trace_event = chunk->GetEventAt(j);
63
          // Another thread may have added a trace that is yet to be
64
          // initialized. Skip such traces.
65
          // https://github.com/nodejs/node/issues/21038.
66
          if (trace_event->name()) {
67
            agent_->AppendTraceEvent(trace_event);
68
          }
69
        }
70
      }
71
      total_chunks_ = 0;
72
      flushing_ = false;
73
    }
74
  }
75
  agent_->Flush(blocking);
76
}
77
78
uint64_t InternalTraceBuffer::MakeHandle(
79
    size_t chunk_index, uint32_t chunk_seq, size_t event_index) const {
80
  return ((static_cast<uint64_t>(chunk_seq) * Capacity() +
81
          chunk_index * TraceBufferChunk::kChunkSize + event_index) << 1) + id_;
82
}
83
84
void InternalTraceBuffer::ExtractHandle(
85
    uint64_t handle, uint32_t* buffer_id, size_t* chunk_index,
86
    uint32_t* chunk_seq, size_t* event_index) const {
87
  *buffer_id = static_cast<uint32_t>(handle & 0x1);
88
  handle >>= 1;
89
  *chunk_seq = static_cast<uint32_t>(handle / Capacity());
90
  size_t indices = handle % Capacity();
91
  *chunk_index = indices / TraceBufferChunk::kChunkSize;
92
  *event_index = indices % TraceBufferChunk::kChunkSize;
93
}
94
95
NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks,
96
    Agent* agent, uv_loop_t* tracing_loop)
97
    : tracing_loop_(tracing_loop),
98
      buffer1_(max_chunks, 0, agent),
99
      buffer2_(max_chunks, 1, agent) {
100
  current_buf_.store(&buffer1_);
101
102
  flush_signal_.data = this;
103
  int err = uv_async_init(tracing_loop_, &flush_signal_,
104
                          NonBlockingFlushSignalCb);
105
  CHECK_EQ(err, 0);
106
107
  exit_signal_.data = this;
108
  err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb);
109
  CHECK_EQ(err, 0);
110
}
111
112
NodeTraceBuffer::~NodeTraceBuffer() {
113
  uv_async_send(&exit_signal_);
114
  Mutex::ScopedLock scoped_lock(exit_mutex_);
115
  while (!exited_) {
116
    exit_cond_.Wait(scoped_lock);
117
  }
118
}
119
120
TraceObject* NodeTraceBuffer::AddTraceEvent(uint64_t* handle) {
121
  // If the buffer is full, attempt to perform a flush.
122
  if (!TryLoadAvailableBuffer()) {
123
    // Assign a value of zero as the trace event handle.
124
    // This is equivalent to calling InternalTraceBuffer::MakeHandle(0, 0, 0),
125
    // and will cause GetEventByHandle to return NULL if passed as an argument.
126
    *handle = 0;
127
    return nullptr;
128
  }
129
  return current_buf_.load()->AddTraceEvent(handle);
130
}
131
132
TraceObject* NodeTraceBuffer::GetEventByHandle(uint64_t handle) {
133
  return current_buf_.load()->GetEventByHandle(handle);
134
}
135
136
bool NodeTraceBuffer::Flush() {
137
  buffer1_.Flush(true);
138
  buffer2_.Flush(true);
139
  return true;
140
}
141
142
// Attempts to set current_buf_ such that it references a buffer that can
143
// write at least one trace event. If both buffers are unavailable this
144
// method returns false; otherwise it returns true.
145
bool NodeTraceBuffer::TryLoadAvailableBuffer() {
146
  InternalTraceBuffer* prev_buf = current_buf_.load();
147
  if (prev_buf->IsFull()) {
148
    uv_async_send(&flush_signal_);  // trigger flush on a separate thread
149
    InternalTraceBuffer* other_buf = prev_buf == &buffer1_ ?
150
      &buffer2_ : &buffer1_;
151
    if (!other_buf->IsFull()) {
152
      current_buf_.store(other_buf);
153
    } else {
154
      return false;
155
    }
156
  }
157
  return true;
158
}
159
160
// static
161
void NodeTraceBuffer::NonBlockingFlushSignalCb(uv_async_t* signal) {
162
  NodeTraceBuffer* buffer = reinterpret_cast<NodeTraceBuffer*>(signal->data);
163
  if (buffer->buffer1_.IsFull() && !buffer->buffer1_.IsFlushing()) {
164
    buffer->buffer1_.Flush(false);
165
  }
166
  if (buffer->buffer2_.IsFull() && !buffer->buffer2_.IsFlushing()) {
167
    buffer->buffer2_.Flush(false);
168
  }
169
}
170
171
// static
172
void NodeTraceBuffer::ExitSignalCb(uv_async_t* signal) {
173
  NodeTraceBuffer* buffer = reinterpret_cast<NodeTraceBuffer*>(signal->data);
174
  uv_close(reinterpret_cast<uv_handle_t*>(&buffer->flush_signal_), nullptr);
175
  uv_close(reinterpret_cast<uv_handle_t*>(&buffer->exit_signal_),
176
           [](uv_handle_t* signal) {
177
      NodeTraceBuffer* buffer =
178
          reinterpret_cast<NodeTraceBuffer*>(signal->data);
179
      Mutex::ScopedLock scoped_lock(buffer->exit_mutex_);
180
      buffer->exited_ = true;
181
      buffer->exit_cond_.Signal(scoped_lock);
182
  });
183
}
184
185
}  // namespace tracing
186
}  // namespace node