1 |
|
|
#include "tracing/node_trace_writer.h" |
2 |
|
|
|
3 |
|
|
#include "util-inl.h" |
4 |
|
|
|
5 |
|
|
#include <fcntl.h> |
6 |
|
|
#include <cstring> |
7 |
|
|
|
8 |
|
|
namespace node { |
9 |
|
|
namespace tracing { |
10 |
|
|
|
11 |
|
56 |
NodeTraceWriter::NodeTraceWriter(const std::string& log_file_pattern) |
12 |
|
56 |
: log_file_pattern_(log_file_pattern) {} |
13 |
|
|
|
14 |
|
56 |
void NodeTraceWriter::InitializeOnThread(uv_loop_t* loop) { |
15 |
✗✓ |
56 |
CHECK_NULL(tracing_loop_); |
16 |
|
56 |
tracing_loop_ = loop; |
17 |
|
|
|
18 |
|
56 |
flush_signal_.data = this; |
19 |
|
56 |
int err = uv_async_init(tracing_loop_, &flush_signal_, |
20 |
|
220 |
[](uv_async_t* signal) { |
21 |
|
|
NodeTraceWriter* trace_writer = |
22 |
|
220 |
ContainerOf(&NodeTraceWriter::flush_signal_, signal); |
23 |
|
220 |
trace_writer->FlushPrivate(); |
24 |
|
276 |
}); |
25 |
✗✓ |
56 |
CHECK_EQ(err, 0); |
26 |
|
|
|
27 |
|
56 |
exit_signal_.data = this; |
28 |
|
56 |
err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb); |
29 |
✗✓ |
56 |
CHECK_EQ(err, 0); |
30 |
|
56 |
} |
31 |
|
|
|
32 |
|
56 |
void NodeTraceWriter::WriteSuffix() { |
33 |
|
|
// If our final log file has traces, then end the file appropriately. |
34 |
|
|
// This means that if no trace events are recorded, then no trace file is |
35 |
|
|
// produced. |
36 |
|
56 |
bool should_flush = false; |
37 |
|
|
{ |
38 |
|
112 |
Mutex::ScopedLock scoped_lock(stream_mutex_); |
39 |
✓✗ |
56 |
if (total_traces_ > 0) { |
40 |
|
56 |
total_traces_ = kTracesPerFile; // Act as if we reached the file limit. |
41 |
|
56 |
should_flush = true; |
42 |
|
|
} |
43 |
|
|
} |
44 |
✓✗ |
56 |
if (should_flush) { |
45 |
|
56 |
Flush(true); |
46 |
|
|
} |
47 |
|
56 |
} |
48 |
|
|
|
49 |
|
224 |
NodeTraceWriter::~NodeTraceWriter() { |
50 |
|
112 |
WriteSuffix(); |
51 |
|
|
uv_fs_t req; |
52 |
✓✗ |
112 |
if (fd_ != -1) { |
53 |
✗✓ |
112 |
CHECK_EQ(0, uv_fs_close(nullptr, &req, fd_, nullptr)); |
54 |
|
112 |
uv_fs_req_cleanup(&req); |
55 |
|
|
} |
56 |
|
112 |
uv_async_send(&exit_signal_); |
57 |
|
224 |
Mutex::ScopedLock scoped_lock(request_mutex_); |
58 |
✓✓ |
224 |
while (!exited_) { |
59 |
|
112 |
exit_cond_.Wait(scoped_lock); |
60 |
|
|
} |
61 |
|
224 |
} |
62 |
|
|
|
63 |
|
112 |
void replace_substring(std::string* target, |
64 |
|
|
const std::string& search, |
65 |
|
|
const std::string& insert) { |
66 |
|
112 |
size_t pos = target->find(search); |
67 |
✓✓ |
171 |
for (; pos != std::string::npos; pos = target->find(search, pos)) { |
68 |
|
59 |
target->replace(pos, search.size(), insert); |
69 |
|
59 |
pos += insert.size(); |
70 |
|
|
} |
71 |
|
112 |
} |
72 |
|
|
|
73 |
|
56 |
void NodeTraceWriter::OpenNewFileForStreaming() { |
74 |
|
56 |
++file_num_; |
75 |
|
|
uv_fs_t req; |
76 |
|
|
|
77 |
|
|
// Evaluate a JS-style template string, it accepts the values ${pid} and |
78 |
|
|
// ${rotation} |
79 |
|
112 |
std::string filepath(log_file_pattern_); |
80 |
|
56 |
replace_substring(&filepath, "${pid}", std::to_string(uv_os_getpid())); |
81 |
|
56 |
replace_substring(&filepath, "${rotation}", std::to_string(file_num_)); |
82 |
|
|
|
83 |
✗✓ |
56 |
if (fd_ != -1) { |
84 |
|
|
CHECK_EQ(uv_fs_close(nullptr, &req, fd_, nullptr), 0); |
85 |
|
|
uv_fs_req_cleanup(&req); |
86 |
|
|
} |
87 |
|
|
|
88 |
|
56 |
fd_ = uv_fs_open(nullptr, &req, filepath.c_str(), |
89 |
|
|
O_CREAT | O_WRONLY | O_TRUNC, 0644, nullptr); |
90 |
|
56 |
uv_fs_req_cleanup(&req); |
91 |
✗✓ |
56 |
if (fd_ < 0) { |
92 |
|
|
fprintf(stderr, "Could not open trace file %s: %s\n", |
93 |
|
|
filepath.c_str(), |
94 |
|
|
uv_strerror(fd_)); |
95 |
|
|
fd_ = -1; |
96 |
|
|
} |
97 |
|
56 |
} |
98 |
|
|
|
99 |
|
2120 |
void NodeTraceWriter::AppendTraceEvent(TraceObject* trace_event) { |
100 |
|
4240 |
Mutex::ScopedLock scoped_lock(stream_mutex_); |
101 |
|
|
// If this is the first trace event, open a new file for streaming. |
102 |
✓✓ |
2120 |
if (total_traces_ == 0) { |
103 |
|
56 |
OpenNewFileForStreaming(); |
104 |
|
|
// Constructing a new JSONTraceWriter object appends "{\"traceEvents\":[" |
105 |
|
|
// to stream_. |
106 |
|
|
// In other words, the constructor initializes the serialization stream |
107 |
|
|
// to a state where we can start writing trace events to it. |
108 |
|
|
// Repeatedly constructing and destroying json_trace_writer_ allows |
109 |
|
|
// us to use V8's JSON writer instead of implementing our own. |
110 |
|
56 |
json_trace_writer_.reset(TraceWriter::CreateJSONTraceWriter(stream_)); |
111 |
|
|
} |
112 |
|
2120 |
++total_traces_; |
113 |
|
2120 |
json_trace_writer_->AppendTraceEvent(trace_event); |
114 |
|
2120 |
} |
115 |
|
|
|
116 |
|
220 |
void NodeTraceWriter::FlushPrivate() { |
117 |
|
440 |
std::string str; |
118 |
|
|
int highest_request_id; |
119 |
|
|
{ |
120 |
|
440 |
Mutex::ScopedLock stream_scoped_lock(stream_mutex_); |
121 |
✓✓ |
220 |
if (total_traces_ >= kTracesPerFile) { |
122 |
|
56 |
total_traces_ = 0; |
123 |
|
|
// Destroying the member JSONTraceWriter object appends "]}" to |
124 |
|
|
// stream_ - in other words, ending a JSON file. |
125 |
|
56 |
json_trace_writer_.reset(); |
126 |
|
|
} |
127 |
|
|
// str() makes a copy of the contents of the stream. |
128 |
|
220 |
str = stream_.str(); |
129 |
|
220 |
stream_.str(""); |
130 |
|
220 |
stream_.clear(); |
131 |
|
|
} |
132 |
|
|
{ |
133 |
|
220 |
Mutex::ScopedLock request_scoped_lock(request_mutex_); |
134 |
|
220 |
highest_request_id = num_write_requests_; |
135 |
|
|
} |
136 |
|
220 |
WriteToFile(std::move(str), highest_request_id); |
137 |
|
220 |
} |
138 |
|
|
|
139 |
|
220 |
void NodeTraceWriter::Flush(bool blocking) { |
140 |
|
220 |
Mutex::ScopedLock scoped_lock(request_mutex_); |
141 |
|
|
{ |
142 |
|
|
// We need to lock the mutexes here in a nested fashion; stream_mutex_ |
143 |
|
|
// protects json_trace_writer_, and without request_mutex_ there might be |
144 |
|
|
// a time window in which the stream state changes? |
145 |
|
220 |
Mutex::ScopedLock stream_mutex_lock(stream_mutex_); |
146 |
✗✓ |
220 |
if (!json_trace_writer_) |
147 |
|
|
return; |
148 |
|
|
} |
149 |
|
220 |
int request_id = ++num_write_requests_; |
150 |
|
220 |
int err = uv_async_send(&flush_signal_); |
151 |
✗✓ |
220 |
CHECK_EQ(err, 0); |
152 |
✓✗ |
220 |
if (blocking) { |
153 |
|
|
// Wait until data associated with this request id has been written to disk. |
154 |
|
|
// This guarantees that data from all earlier requests have also been |
155 |
|
|
// written. |
156 |
✓✓ |
440 |
while (request_id > highest_request_id_completed_) { |
157 |
|
220 |
request_cond_.Wait(scoped_lock); |
158 |
|
|
} |
159 |
|
|
} |
160 |
|
|
} |
161 |
|
|
|
162 |
|
220 |
void NodeTraceWriter::WriteToFile(std::string&& str, int highest_request_id) { |
163 |
✗✓ |
220 |
if (fd_ == -1) return; |
164 |
|
|
|
165 |
|
220 |
uv_buf_t buf = uv_buf_init(nullptr, 0); |
166 |
|
|
{ |
167 |
|
440 |
Mutex::ScopedLock lock(request_mutex_); |
168 |
|
440 |
write_req_queue_.emplace(WriteRequest { |
169 |
|
220 |
std::move(str), highest_request_id |
170 |
|
440 |
}); |
171 |
✓✗ |
220 |
if (write_req_queue_.size() == 1) { |
172 |
|
|
buf = uv_buf_init( |
173 |
|
220 |
const_cast<char*>(write_req_queue_.front().str.c_str()), |
174 |
|
440 |
write_req_queue_.front().str.length()); |
175 |
|
|
} |
176 |
|
|
} |
177 |
|
|
// Only one write request for the same file descriptor should be active at |
178 |
|
|
// a time. |
179 |
✓✗✓✗
|
220 |
if (buf.base != nullptr && fd_ != -1) { |
180 |
|
220 |
StartWrite(buf); |
181 |
|
|
} |
182 |
|
|
} |
183 |
|
|
|
184 |
|
220 |
void NodeTraceWriter::StartWrite(uv_buf_t buf) { |
185 |
|
220 |
int err = uv_fs_write( |
186 |
|
|
tracing_loop_, &write_req_, fd_, &buf, 1, -1, |
187 |
|
220 |
[](uv_fs_t* req) { |
188 |
|
|
NodeTraceWriter* writer = |
189 |
|
220 |
ContainerOf(&NodeTraceWriter::write_req_, req); |
190 |
|
220 |
writer->AfterWrite(); |
191 |
|
440 |
}); |
192 |
✗✓ |
220 |
CHECK_EQ(err, 0); |
193 |
|
220 |
} |
194 |
|
|
|
195 |
|
220 |
void NodeTraceWriter::AfterWrite() { |
196 |
✗✓ |
220 |
CHECK_GE(write_req_.result, 0); |
197 |
|
220 |
uv_fs_req_cleanup(&write_req_); |
198 |
|
|
|
199 |
|
220 |
uv_buf_t buf = uv_buf_init(nullptr, 0); |
200 |
|
|
{ |
201 |
|
440 |
Mutex::ScopedLock scoped_lock(request_mutex_); |
202 |
|
220 |
int highest_request_id = write_req_queue_.front().highest_request_id; |
203 |
|
220 |
write_req_queue_.pop(); |
204 |
|
220 |
highest_request_id_completed_ = highest_request_id; |
205 |
|
220 |
request_cond_.Broadcast(scoped_lock); |
206 |
✗✓ |
220 |
if (!write_req_queue_.empty()) { |
207 |
|
|
buf = uv_buf_init( |
208 |
|
|
const_cast<char*>(write_req_queue_.front().str.c_str()), |
209 |
|
|
write_req_queue_.front().str.length()); |
210 |
|
|
} |
211 |
|
|
} |
212 |
✗✓✗✗
|
220 |
if (buf.base != nullptr && fd_ != -1) { |
213 |
|
|
StartWrite(buf); |
214 |
|
|
} |
215 |
|
220 |
} |
216 |
|
|
|
217 |
|
|
// static |
218 |
|
56 |
void NodeTraceWriter::ExitSignalCb(uv_async_t* signal) { |
219 |
|
|
NodeTraceWriter* trace_writer = |
220 |
|
56 |
ContainerOf(&NodeTraceWriter::exit_signal_, signal); |
221 |
|
|
// Close both flush_signal_ and exit_signal_. |
222 |
|
56 |
uv_close(reinterpret_cast<uv_handle_t*>(&trace_writer->flush_signal_), |
223 |
|
56 |
[](uv_handle_t* signal) { |
224 |
|
|
NodeTraceWriter* trace_writer = |
225 |
|
56 |
ContainerOf(&NodeTraceWriter::flush_signal_, |
226 |
|
56 |
reinterpret_cast<uv_async_t*>(signal)); |
227 |
|
56 |
uv_close( |
228 |
|
56 |
reinterpret_cast<uv_handle_t*>(&trace_writer->exit_signal_), |
229 |
|
56 |
[](uv_handle_t* signal) { |
230 |
|
|
NodeTraceWriter* trace_writer = |
231 |
|
56 |
ContainerOf(&NodeTraceWriter::exit_signal_, |
232 |
|
56 |
reinterpret_cast<uv_async_t*>(signal)); |
233 |
|
112 |
Mutex::ScopedLock scoped_lock(trace_writer->request_mutex_); |
234 |
|
56 |
trace_writer->exited_ = true; |
235 |
|
56 |
trace_writer->exit_cond_.Signal(scoped_lock); |
236 |
|
56 |
}); |
237 |
|
56 |
}); |
238 |
|
56 |
} |
239 |
|
|
} // namespace tracing |
240 |
|
|
} // namespace node |