1 |
|
|
#include "tracing/node_trace_buffer.h" |
2 |
|
|
|
3 |
|
|
#include <memory> |
4 |
|
|
#include "util-inl.h" |
5 |
|
|
|
6 |
|
|
namespace node { |
7 |
|
|
namespace tracing { |
8 |
|
|
|
9 |
|
178 |
InternalTraceBuffer::InternalTraceBuffer(size_t max_chunks, uint32_t id, |
10 |
|
178 |
Agent* agent) |
11 |
|
|
: flushing_(false), max_chunks_(max_chunks), |
12 |
|
178 |
agent_(agent), id_(id) { |
13 |
|
178 |
chunks_.resize(max_chunks); |
14 |
|
178 |
} |
15 |
|
|
|
16 |
|
983 |
TraceObject* InternalTraceBuffer::AddTraceEvent(uint64_t* handle) { |
17 |
|
983 |
Mutex::ScopedLock scoped_lock(mutex_); |
18 |
|
|
// Create new chunk if last chunk is full or there is no chunk. |
19 |
✓✓✓✓ ✓✓ |
983 |
if (total_chunks_ == 0 || chunks_[total_chunks_ - 1]->IsFull()) { |
20 |
|
88 |
auto& chunk = chunks_[total_chunks_++]; |
21 |
✗✓ |
88 |
if (chunk) { |
22 |
|
|
chunk->Reset(current_chunk_seq_++); |
23 |
|
|
} else { |
24 |
|
88 |
chunk = std::make_unique<TraceBufferChunk>(current_chunk_seq_++); |
25 |
|
|
} |
26 |
|
|
} |
27 |
|
983 |
auto& chunk = chunks_[total_chunks_ - 1]; |
28 |
|
|
size_t event_index; |
29 |
|
983 |
TraceObject* trace_object = chunk->AddTraceEvent(&event_index); |
30 |
|
983 |
*handle = MakeHandle(total_chunks_ - 1, chunk->seq(), event_index); |
31 |
|
983 |
return trace_object; |
32 |
|
|
} |
33 |
|
|
|
34 |
|
117 |
TraceObject* InternalTraceBuffer::GetEventByHandle(uint64_t handle) { |
35 |
|
234 |
Mutex::ScopedLock scoped_lock(mutex_); |
36 |
✗✓ |
117 |
if (handle == 0) { |
37 |
|
|
// A handle value of zero never has a trace event associated with it. |
38 |
|
|
return nullptr; |
39 |
|
|
} |
40 |
|
|
size_t chunk_index, event_index; |
41 |
|
|
uint32_t buffer_id, chunk_seq; |
42 |
|
117 |
ExtractHandle(handle, &buffer_id, &chunk_index, &chunk_seq, &event_index); |
43 |
✓✗✗✓
|
117 |
if (buffer_id != id_ || chunk_index >= total_chunks_) { |
44 |
|
|
// Either the chunk belongs to the other buffer, or is outside the current |
45 |
|
|
// range of chunks loaded in memory (the latter being true suggests that |
46 |
|
|
// the chunk has already been flushed and is no longer in memory.) |
47 |
|
|
return nullptr; |
48 |
|
|
} |
49 |
|
117 |
auto& chunk = chunks_[chunk_index]; |
50 |
✗✓ |
117 |
if (chunk->seq() != chunk_seq) { |
51 |
|
|
// Chunk is no longer in memory. |
52 |
|
|
return nullptr; |
53 |
|
|
} |
54 |
|
117 |
return chunk->GetEventAt(event_index); |
55 |
|
|
} |
56 |
|
|
|
57 |
|
404 |
void InternalTraceBuffer::Flush(bool blocking) { |
58 |
|
|
{ |
59 |
|
808 |
Mutex::ScopedLock scoped_lock(mutex_); |
60 |
✓✓ |
404 |
if (total_chunks_ > 0) { |
61 |
|
83 |
flushing_ = true; |
62 |
✓✓ |
171 |
for (size_t i = 0; i < total_chunks_; ++i) { |
63 |
|
88 |
auto& chunk = chunks_[i]; |
64 |
✓✓ |
1071 |
for (size_t j = 0; j < chunk->size(); ++j) { |
65 |
|
983 |
TraceObject* trace_event = chunk->GetEventAt(j); |
66 |
|
|
// Another thread may have added a trace that is yet to be |
67 |
|
|
// initialized. Skip such traces. |
68 |
|
|
// https://github.com/nodejs/node/issues/21038. |
69 |
✓✗ |
983 |
if (trace_event->name()) { |
70 |
|
983 |
agent_->AppendTraceEvent(trace_event); |
71 |
|
|
} |
72 |
|
|
} |
73 |
|
|
} |
74 |
|
83 |
total_chunks_ = 0; |
75 |
|
83 |
flushing_ = false; |
76 |
|
|
} |
77 |
|
|
} |
78 |
|
404 |
agent_->Flush(blocking); |
79 |
|
404 |
} |
80 |
|
|
|
81 |
|
983 |
uint64_t InternalTraceBuffer::MakeHandle( |
82 |
|
|
size_t chunk_index, uint32_t chunk_seq, size_t event_index) const { |
83 |
|
983 |
return ((static_cast<uint64_t>(chunk_seq) * Capacity() + |
84 |
|
983 |
chunk_index * TraceBufferChunk::kChunkSize + event_index) << 1) + id_; |
85 |
|
|
} |
86 |
|
|
|
87 |
|
117 |
void InternalTraceBuffer::ExtractHandle( |
88 |
|
|
uint64_t handle, uint32_t* buffer_id, size_t* chunk_index, |
89 |
|
|
uint32_t* chunk_seq, size_t* event_index) const { |
90 |
|
117 |
*buffer_id = static_cast<uint32_t>(handle & 0x1); |
91 |
|
117 |
handle >>= 1; |
92 |
|
117 |
*chunk_seq = static_cast<uint32_t>(handle / Capacity()); |
93 |
|
117 |
size_t indices = handle % Capacity(); |
94 |
|
117 |
*chunk_index = indices / TraceBufferChunk::kChunkSize; |
95 |
|
117 |
*event_index = indices % TraceBufferChunk::kChunkSize; |
96 |
|
117 |
} |
97 |
|
|
|
98 |
|
89 |
NodeTraceBuffer::NodeTraceBuffer(size_t max_chunks, |
99 |
|
89 |
Agent* agent, uv_loop_t* tracing_loop) |
100 |
|
|
: tracing_loop_(tracing_loop), |
101 |
|
|
buffer1_(max_chunks, 0, agent), |
102 |
|
89 |
buffer2_(max_chunks, 1, agent) { |
103 |
|
89 |
current_buf_.store(&buffer1_); |
104 |
|
|
|
105 |
|
89 |
flush_signal_.data = this; |
106 |
|
89 |
int err = uv_async_init(tracing_loop_, &flush_signal_, |
107 |
|
89 |
NonBlockingFlushSignalCb); |
108 |
✗✓ |
89 |
CHECK_EQ(err, 0); |
109 |
|
|
|
110 |
|
89 |
exit_signal_.data = this; |
111 |
|
89 |
err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb); |
112 |
✗✓ |
89 |
CHECK_EQ(err, 0); |
113 |
|
89 |
} |
114 |
|
|
|
115 |
|
356 |
NodeTraceBuffer::~NodeTraceBuffer() { |
116 |
|
178 |
uv_async_send(&exit_signal_); |
117 |
|
356 |
Mutex::ScopedLock scoped_lock(exit_mutex_); |
118 |
✓✓ |
356 |
while (!exited_) { |
119 |
|
178 |
exit_cond_.Wait(scoped_lock); |
120 |
|
|
} |
121 |
|
356 |
} |
122 |
|
|
|
123 |
|
983 |
TraceObject* NodeTraceBuffer::AddTraceEvent(uint64_t* handle) { |
124 |
|
|
// If the buffer is full, attempt to perform a flush. |
125 |
✗✓ |
983 |
if (!TryLoadAvailableBuffer()) { |
126 |
|
|
// Assign a value of zero as the trace event handle. |
127 |
|
|
// This is equivalent to calling InternalTraceBuffer::MakeHandle(0, 0, 0), |
128 |
|
|
// and will cause GetEventByHandle to return NULL if passed as an argument. |
129 |
|
|
*handle = 0; |
130 |
|
|
return nullptr; |
131 |
|
|
} |
132 |
|
983 |
return current_buf_.load()->AddTraceEvent(handle); |
133 |
|
|
} |
134 |
|
|
|
135 |
|
117 |
TraceObject* NodeTraceBuffer::GetEventByHandle(uint64_t handle) { |
136 |
|
117 |
return current_buf_.load()->GetEventByHandle(handle); |
137 |
|
|
} |
138 |
|
|
|
139 |
|
202 |
bool NodeTraceBuffer::Flush() { |
140 |
|
202 |
buffer1_.Flush(true); |
141 |
|
202 |
buffer2_.Flush(true); |
142 |
|
202 |
return true; |
143 |
|
|
} |
144 |
|
|
|
145 |
|
|
// Attempts to set current_buf_ such that it references a buffer that can |
146 |
|
|
// write at least one trace event. If both buffers are unavailable this |
147 |
|
|
// method returns false; otherwise it returns true. |
148 |
|
983 |
bool NodeTraceBuffer::TryLoadAvailableBuffer() { |
149 |
|
983 |
InternalTraceBuffer* prev_buf = current_buf_.load(); |
150 |
✗✓ |
983 |
if (prev_buf->IsFull()) { |
151 |
|
|
uv_async_send(&flush_signal_); // trigger flush on a separate thread |
152 |
|
|
InternalTraceBuffer* other_buf = prev_buf == &buffer1_ ? |
153 |
|
|
&buffer2_ : &buffer1_; |
154 |
|
|
if (!other_buf->IsFull()) { |
155 |
|
|
current_buf_.store(other_buf); |
156 |
|
|
} else { |
157 |
|
|
return false; |
158 |
|
|
} |
159 |
|
|
} |
160 |
|
983 |
return true; |
161 |
|
|
} |
162 |
|
|
|
163 |
|
|
// static |
164 |
|
|
void NodeTraceBuffer::NonBlockingFlushSignalCb(uv_async_t* signal) { |
165 |
|
|
NodeTraceBuffer* buffer = static_cast<NodeTraceBuffer*>(signal->data); |
166 |
|
|
if (buffer->buffer1_.IsFull() && !buffer->buffer1_.IsFlushing()) { |
167 |
|
|
buffer->buffer1_.Flush(false); |
168 |
|
|
} |
169 |
|
|
if (buffer->buffer2_.IsFull() && !buffer->buffer2_.IsFlushing()) { |
170 |
|
|
buffer->buffer2_.Flush(false); |
171 |
|
|
} |
172 |
|
|
} |
173 |
|
|
|
174 |
|
|
// static |
175 |
|
89 |
void NodeTraceBuffer::ExitSignalCb(uv_async_t* signal) { |
176 |
|
|
NodeTraceBuffer* buffer = |
177 |
|
89 |
ContainerOf(&NodeTraceBuffer::exit_signal_, signal); |
178 |
|
|
|
179 |
|
|
// Close both flush_signal_ and exit_signal_. |
180 |
|
89 |
uv_close(reinterpret_cast<uv_handle_t*>(&buffer->flush_signal_), |
181 |
|
89 |
[](uv_handle_t* signal) { |
182 |
|
|
NodeTraceBuffer* buffer = |
183 |
|
89 |
ContainerOf(&NodeTraceBuffer::flush_signal_, |
184 |
|
89 |
reinterpret_cast<uv_async_t*>(signal)); |
185 |
|
|
|
186 |
|
89 |
uv_close(reinterpret_cast<uv_handle_t*>(&buffer->exit_signal_), |
187 |
|
89 |
[](uv_handle_t* signal) { |
188 |
|
|
NodeTraceBuffer* buffer = |
189 |
|
89 |
ContainerOf(&NodeTraceBuffer::exit_signal_, |
190 |
|
89 |
reinterpret_cast<uv_async_t*>(signal)); |
191 |
|
178 |
Mutex::ScopedLock scoped_lock(buffer->exit_mutex_); |
192 |
|
89 |
buffer->exited_ = true; |
193 |
|
89 |
buffer->exit_cond_.Signal(scoped_lock); |
194 |
|
89 |
}); |
195 |
|
89 |
}); |
196 |
|
89 |
} |
197 |
|
|
|
198 |
|
|
} // namespace tracing |
199 |
|
|
} // namespace node |