GCC Code Coverage Report
Directory: ./ Exec Total Coverage
File: tracing/node_trace_buffer.cc Lines: 95 112 84.8 %
Date: 2022-08-17 04:19:55 Branches: 24 50 48.0 %

Line Branch Exec Source
1
#include "tracing/node_trace_buffer.h"
2
3
#include <memory>
4
#include "util-inl.h"
5
6
namespace node {
7
namespace tracing {
8
9
116
InternalTraceBuffer::InternalTraceBuffer(size_t max_chunks, uint32_t id,
10
116
                                         Agent* agent)
11
    : flushing_(false), max_chunks_(max_chunks),
12
116
      agent_(agent), id_(id) {
13
116
  chunks_.resize(max_chunks);
14
116
}
15
16
889
TraceObject* InternalTraceBuffer::AddTraceEvent(uint64_t* handle) {
17
889
  Mutex::ScopedLock scoped_lock(mutex_);
18
  // Create new chunk if last chunk is full or there is no chunk.
19

889
  if (total_chunks_ == 0 || chunks_[total_chunks_ - 1]->IsFull()) {
20
57
    auto& chunk = chunks_[total_chunks_++];
21
57
    if (chunk) {
22
      chunk->Reset(current_chunk_seq_++);
23
    } else {
24
57
      chunk = std::make_unique<TraceBufferChunk>(current_chunk_seq_++);
25
    }
26
  }
27
889
  auto& chunk = chunks_[total_chunks_ - 1];
28
  size_t event_index;
29
889
  TraceObject* trace_object = chunk->AddTraceEvent(&event_index);
30
889
  *handle = MakeHandle(total_chunks_ - 1, chunk->seq(), event_index);
31
889
  return trace_object;
32
}
33
34
115
TraceObject* InternalTraceBuffer::GetEventByHandle(uint64_t handle) {
35
230
  Mutex::ScopedLock scoped_lock(mutex_);
36
115
  if (handle == 0) {
37
    // A handle value of zero never has a trace event associated with it.
38
    return nullptr;
39
  }
40
  size_t chunk_index, event_index;
41
  uint32_t buffer_id, chunk_seq;
42
115
  ExtractHandle(handle, &buffer_id, &chunk_index, &chunk_seq, &event_index);
43

115
  if (buffer_id != id_ || chunk_index >= total_chunks_) {
44
    // Either the chunk belongs to the other buffer, or is outside the current
45
    // range of chunks loaded in memory (the latter being true suggests that
46
    // the chunk has already been flushed and is no longer in memory.)
47
    return nullptr;
48
  }
49
115
  auto& chunk = chunks_[chunk_index];
50
115
  if (chunk->seq() != chunk_seq) {
51
    // Chunk is no longer in memory.
52
    return nullptr;
53
  }
54
115
  return chunk->GetEventAt(event_index);
55
}
56
57
280
void InternalTraceBuffer::Flush(bool blocking) {
58
  {
59
560
    Mutex::ScopedLock scoped_lock(mutex_);
60
280
    if (total_chunks_ > 0) {
61
52
      flushing_ = true;
62
109
      for (size_t i = 0; i < total_chunks_; ++i) {
63
57
        auto& chunk = chunks_[i];
64
946
        for (size_t j = 0; j < chunk->size(); ++j) {
65
889
          TraceObject* trace_event = chunk->GetEventAt(j);
66
          // Another thread may have added a trace that is yet to be
67
          // initialized. Skip such traces.
68
          // https://github.com/nodejs/node/issues/21038.
69
889
          if (trace_event->name()) {
70
889
            agent_->AppendTraceEvent(trace_event);
71
          }
72
        }
73
      }
74
52
      total_chunks_ = 0;
75
52
      flushing_ = false;
76
    }
77
  }
78
280
  agent_->Flush(blocking);
79
280
}
80
81
889
uint64_t InternalTraceBuffer::MakeHandle(
82
    size_t chunk_index, uint32_t chunk_seq, size_t event_index) const {
83
889
  return ((static_cast<uint64_t>(chunk_seq) * Capacity() +
84
889
          chunk_index * TraceBufferChunk::kChunkSize + event_index) << 1) + id_;
85
}
86
87
115
void InternalTraceBuffer::ExtractHandle(
88
    uint64_t handle, uint32_t* buffer_id, size_t* chunk_index,
89
    uint32_t* chunk_seq, size_t* event_index) const {
90
115
  *buffer_id = static_cast<uint32_t>(handle & 0x1);
91
115
  handle >>= 1;
92
115
  *chunk_seq = static_cast<uint32_t>(handle / Capacity());
93
115
  size_t indices = handle % Capacity();
94
115
  *chunk_index = indices / TraceBufferChunk::kChunkSize;
95
115
  *event_index = indices % TraceBufferChunk::kChunkSize;
96
115
}
97
98
58
NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks,
99
58
    Agent* agent, uv_loop_t* tracing_loop)
100
    : tracing_loop_(tracing_loop),
101
      buffer1_(max_chunks, 0, agent),
102
58
      buffer2_(max_chunks, 1, agent) {
103
58
  current_buf_.store(&buffer1_);
104
105
58
  flush_signal_.data = this;
106
58
  int err = uv_async_init(tracing_loop_, &flush_signal_,
107
58
                          NonBlockingFlushSignalCb);
108
58
  CHECK_EQ(err, 0);
109
110
58
  exit_signal_.data = this;
111
58
  err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb);
112
58
  CHECK_EQ(err, 0);
113
58
}
114
115
232
NodeTraceBuffer::~NodeTraceBuffer() {
116
116
  uv_async_send(&exit_signal_);
117
232
  Mutex::ScopedLock scoped_lock(exit_mutex_);
118
232
  while (!exited_) {
119
116
    exit_cond_.Wait(scoped_lock);
120
  }
121
232
}
122
123
889
TraceObject* NodeTraceBuffer::AddTraceEvent(uint64_t* handle) {
124
  // If the buffer is full, attempt to perform a flush.
125
889
  if (!TryLoadAvailableBuffer()) {
126
    // Assign a value of zero as the trace event handle.
127
    // This is equivalent to calling InternalTraceBuffer::MakeHandle(0, 0, 0),
128
    // and will cause GetEventByHandle to return NULL if passed as an argument.
129
    *handle = 0;
130
    return nullptr;
131
  }
132
889
  return current_buf_.load()->AddTraceEvent(handle);
133
}
134
135
115
TraceObject* NodeTraceBuffer::GetEventByHandle(uint64_t handle) {
136
115
  return current_buf_.load()->GetEventByHandle(handle);
137
}
138
139
140
bool NodeTraceBuffer::Flush() {
140
140
  buffer1_.Flush(true);
141
140
  buffer2_.Flush(true);
142
140
  return true;
143
}
144
145
// Attempts to set current_buf_ such that it references a buffer that can
146
// write at least one trace event. If both buffers are unavailable this
147
// method returns false; otherwise it returns true.
148
889
bool NodeTraceBuffer::TryLoadAvailableBuffer() {
149
889
  InternalTraceBuffer* prev_buf = current_buf_.load();
150
889
  if (prev_buf->IsFull()) {
151
    uv_async_send(&flush_signal_);  // trigger flush on a separate thread
152
    InternalTraceBuffer* other_buf = prev_buf == &buffer1_ ?
153
      &buffer2_ : &buffer1_;
154
    if (!other_buf->IsFull()) {
155
      current_buf_.store(other_buf);
156
    } else {
157
      return false;
158
    }
159
  }
160
889
  return true;
161
}
162
163
// static
164
void NodeTraceBuffer::NonBlockingFlushSignalCb(uv_async_t* signal) {
165
  NodeTraceBuffer* buffer = static_cast<NodeTraceBuffer*>(signal->data);
166
  if (buffer->buffer1_.IsFull() && !buffer->buffer1_.IsFlushing()) {
167
    buffer->buffer1_.Flush(false);
168
  }
169
  if (buffer->buffer2_.IsFull() && !buffer->buffer2_.IsFlushing()) {
170
    buffer->buffer2_.Flush(false);
171
  }
172
}
173
174
// static
175
58
void NodeTraceBuffer::ExitSignalCb(uv_async_t* signal) {
176
  NodeTraceBuffer* buffer =
177
58
      ContainerOf(&NodeTraceBuffer::exit_signal_, signal);
178
179
  // Close both flush_signal_ and exit_signal_.
180
58
  uv_close(reinterpret_cast<uv_handle_t*>(&buffer->flush_signal_),
181
58
           [](uv_handle_t* signal) {
182
    NodeTraceBuffer* buffer =
183
58
        ContainerOf(&NodeTraceBuffer::flush_signal_,
184
58
                    reinterpret_cast<uv_async_t*>(signal));
185
186
58
    uv_close(reinterpret_cast<uv_handle_t*>(&buffer->exit_signal_),
187
58
             [](uv_handle_t* signal) {
188
      NodeTraceBuffer* buffer =
189
58
          ContainerOf(&NodeTraceBuffer::exit_signal_,
190
58
                      reinterpret_cast<uv_async_t*>(signal));
191
116
        Mutex::ScopedLock scoped_lock(buffer->exit_mutex_);
192
58
        buffer->exited_ = true;
193
58
        buffer->exit_cond_.Signal(scoped_lock);
194
58
    });
195
58
  });
196
58
}
197
198
}  // namespace tracing
199
}  // namespace node