GCC Code Coverage Report
Directory: ../ Exec Total Coverage
File: /home/iojs/build/workspace/node-test-commit-linux-coverage/nodes/benchmark/out/../src/tracing/node_trace_writer.cc Lines: 109 113 96.5 %
Date: 2017-10-21 Branches: 29 44 65.9 %

Line Branch Exec Source
1
#include "tracing/node_trace_writer.h"
2
3
#include <string.h>
4
#include <fcntl.h>
5
6
#include "util.h"
7
8
namespace node {
9
namespace tracing {
10
11
3
NodeTraceWriter::NodeTraceWriter(uv_loop_t* tracing_loop)
12
3
    : tracing_loop_(tracing_loop) {
13
3
  flush_signal_.data = this;
14
3
  int err = uv_async_init(tracing_loop_, &flush_signal_, FlushSignalCb);
15
3
  CHECK_EQ(err, 0);
16
17
3
  exit_signal_.data = this;
18
3
  err = uv_async_init(tracing_loop_, &exit_signal_, ExitSignalCb);
19
3
  CHECK_EQ(err, 0);
20
3
}
21
22
3
void NodeTraceWriter::WriteSuffix() {
23
  // If our final log file has traces, then end the file appropriately.
24
  // This means that if no trace events are recorded, then no trace file is
25
  // produced.
26
3
  bool should_flush = false;
27
  {
28
3
    Mutex::ScopedLock scoped_lock(stream_mutex_);
29
3
    if (total_traces_ > 0) {
30
2
      total_traces_ = 0;  // so we don't write it again in FlushPrivate
31
      // Appends "]}" to stream_.
32
2
      delete json_trace_writer_;
33
2
      should_flush = true;
34
3
    }
35
  }
36
3
  if (should_flush) {
37
2
    Flush(true);
38
  }
39
3
}
40
41
9
NodeTraceWriter::~NodeTraceWriter() {
42
3
  WriteSuffix();
43
  uv_fs_t req;
44
  int err;
45
3
  if (fd_ != -1) {
46
2
    err = uv_fs_close(tracing_loop_, &req, fd_, nullptr);
47
2
    CHECK_EQ(err, 0);
48
2
    uv_fs_req_cleanup(&req);
49
  }
50
3
  uv_async_send(&exit_signal_);
51
3
  Mutex::ScopedLock scoped_lock(request_mutex_);
52
9
  while (!exited_) {
53
3
    exit_cond_.Wait(scoped_lock);
54
3
  }
55
6
}
56
57
2
void NodeTraceWriter::OpenNewFileForStreaming() {
58
2
  ++file_num_;
59
  uv_fs_t req;
60
2
  std::ostringstream log_file;
61
2
  log_file << "node_trace." << file_num_ << ".log";
62
  fd_ = uv_fs_open(tracing_loop_, &req, log_file.str().c_str(),
63
2
      O_CREAT | O_WRONLY | O_TRUNC, 0644, NULL);
64
2
  CHECK_NE(fd_, -1);
65
2
  uv_fs_req_cleanup(&req);
66
2
}
67
68
469
void NodeTraceWriter::AppendTraceEvent(TraceObject* trace_event) {
69
469
  Mutex::ScopedLock scoped_lock(stream_mutex_);
70
  // If this is the first trace event, open a new file for streaming.
71
469
  if (total_traces_ == 0) {
72
2
    OpenNewFileForStreaming();
73
    // Constructing a new JSONTraceWriter object appends "{\"traceEvents\":["
74
    // to stream_.
75
    // In other words, the constructor initializes the serialization stream
76
    // to a state where we can start writing trace events to it.
77
    // Repeatedly constructing and destroying json_trace_writer_ allows
78
    // us to use V8's JSON writer instead of implementing our own.
79
2
    json_trace_writer_ = TraceWriter::CreateJSONTraceWriter(stream_);
80
  }
81
469
  ++total_traces_;
82
469
  json_trace_writer_->AppendTraceEvent(trace_event);
83
469
}
84
85
6
void NodeTraceWriter::FlushPrivate() {
86
6
  std::string str;
87
  int highest_request_id;
88
  {
89
6
    Mutex::ScopedLock stream_scoped_lock(stream_mutex_);
90
6
    if (total_traces_ >= kTracesPerFile) {
91
      total_traces_ = 0;
92
      // Destroying the member JSONTraceWriter object appends "]}" to
93
      // stream_ - in other words, ending a JSON file.
94
      delete json_trace_writer_;
95
    }
96
    // str() makes a copy of the contents of the stream.
97
6
    str = stream_.str();
98
6
    stream_.str("");
99
6
    stream_.clear();
100
  }
101
  {
102
6
    Mutex::ScopedLock request_scoped_lock(request_mutex_);
103
6
    highest_request_id = num_write_requests_;
104
  }
105
6
  WriteToFile(std::move(str), highest_request_id);
106
6
}
107
108
6
void NodeTraceWriter::FlushSignalCb(uv_async_t* signal) {
109
6
  NodeTraceWriter* trace_writer = static_cast<NodeTraceWriter*>(signal->data);
110
6
  trace_writer->FlushPrivate();
111
6
}
112
113
// TODO(matthewloring): Remove (is it necessary to change the API?
114
// Since because of WriteSuffix it no longer matters whether it's true or false)
115
void NodeTraceWriter::Flush() {
116
  Flush(true);
117
}
118
119
8
void NodeTraceWriter::Flush(bool blocking) {
120
8
  Mutex::ScopedLock scoped_lock(request_mutex_);
121
8
  if (!json_trace_writer_) {
122
10
    return;
123
  }
124
6
  int request_id = ++num_write_requests_;
125
6
  int err = uv_async_send(&flush_signal_);
126
6
  CHECK_EQ(err, 0);
127
6
  if (blocking) {
128
    // Wait until data associated with this request id has been written to disk.
129
    // This guarantees that data from all earlier requests have also been
130
    // written.
131

18
    while (request_id > highest_request_id_completed_) {
132
6
      request_cond_.Wait(scoped_lock);
133
    }
134
6
  }
135
}
136
137
6
void NodeTraceWriter::WriteToFile(std::string&& str, int highest_request_id) {
138
6
  WriteRequest* write_req = new WriteRequest();
139
6
  write_req->str = std::move(str);
140
6
  write_req->writer = this;
141
6
  write_req->highest_request_id = highest_request_id;
142
6
  uv_buf_t uv_buf = uv_buf_init(const_cast<char*>(write_req->str.c_str()),
143
12
      write_req->str.length());
144
6
  request_mutex_.Lock();
145
  // Manage a queue of WriteRequest objects because the behavior of uv_write is
146
  // is undefined if the same WriteRequest object is used more than once
147
  // between WriteCb calls. In addition, this allows us to keep track of the id
148
  // of the latest write request that actually been completed.
149
6
  write_req_queue_.push(write_req);
150
6
  request_mutex_.Unlock();
151
  int err = uv_fs_write(tracing_loop_, reinterpret_cast<uv_fs_t*>(write_req),
152
6
      fd_, &uv_buf, 1, -1, WriteCb);
153
6
  CHECK_EQ(err, 0);
154
6
}
155
156
6
void NodeTraceWriter::WriteCb(uv_fs_t* req) {
157
6
  WriteRequest* write_req = reinterpret_cast<WriteRequest*>(req);
158
6
  CHECK_GE(write_req->req.result, 0);
159
160
6
  NodeTraceWriter* writer = write_req->writer;
161
6
  int highest_request_id = write_req->highest_request_id;
162
  {
163
6
    Mutex::ScopedLock scoped_lock(writer->request_mutex_);
164
6
    CHECK_EQ(write_req, writer->write_req_queue_.front());
165
6
    writer->write_req_queue_.pop();
166
6
    writer->highest_request_id_completed_ = highest_request_id;
167
6
    writer->request_cond_.Broadcast(scoped_lock);
168
  }
169
6
  delete write_req;
170
6
}
171
172
// static
173
3
void NodeTraceWriter::ExitSignalCb(uv_async_t* signal) {
174
3
  NodeTraceWriter* trace_writer = static_cast<NodeTraceWriter*>(signal->data);
175
  uv_close(reinterpret_cast<uv_handle_t*>(&trace_writer->flush_signal_),
176
3
           nullptr);
177
  uv_close(reinterpret_cast<uv_handle_t*>(&trace_writer->exit_signal_),
178
9
           [](uv_handle_t* signal) {
179
      NodeTraceWriter* trace_writer =
180
3
          static_cast<NodeTraceWriter*>(signal->data);
181
3
      Mutex::ScopedLock scoped_lock(trace_writer->request_mutex_);
182
3
      trace_writer->exited_ = true;
183
3
      trace_writer->exit_cond_.Signal(scoped_lock);
184
12
  });
185
3
}
186
187
}  // namespace tracing
188
}  // namespace node