1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "RecordReadThread.h"
18 
19 #include <sys/resource.h>
20 #include <unistd.h>
21 
22 #include <algorithm>
23 #include <unordered_map>
24 
25 #include "environment.h"
26 #include "event_type.h"
27 #include "record.h"
28 #include "utils.h"
29 
30 namespace simpleperf {
31 
32 static constexpr size_t kDefaultLowBufferLevel = 10 * 1024 * 1024u;
33 static constexpr size_t kDefaultCriticalBufferLevel = 5 * 1024 * 1024u;
34 
RecordBuffer(size_t buffer_size)35 RecordBuffer::RecordBuffer(size_t buffer_size)
36     : read_head_(0), write_head_(0), buffer_size_(buffer_size), buffer_(new char[buffer_size]) {
37 }
38 
GetFreeSize() const39 size_t RecordBuffer::GetFreeSize() const {
40     size_t write_head = write_head_.load(std::memory_order_relaxed);
41     size_t read_head = read_head_.load(std::memory_order_relaxed);
42     size_t write_tail = read_head > 0 ? read_head - 1 : buffer_size_ - 1;
43     if (write_head <= write_tail) {
44       return write_tail - write_head;
45     }
46     return buffer_size_ - write_head + write_tail;
47 }
48 
AllocWriteSpace(size_t record_size)49 char* RecordBuffer::AllocWriteSpace(size_t record_size) {
50   size_t write_head = write_head_.load(std::memory_order_relaxed);
51   size_t read_head = read_head_.load(std::memory_order_acquire);
52   size_t write_tail = read_head > 0 ? read_head - 1 : buffer_size_ - 1;
53   cur_write_record_size_ = record_size;
54   if (write_head < write_tail) {
55     if (write_head + record_size > write_tail) {
56       return nullptr;
57     }
58   } else if (write_head + record_size > buffer_size_) {
59     // Not enough space at the end of the buffer, need to wrap to the start of the buffer.
60     if (write_tail < record_size) {
61       return nullptr;
62     }
63     if (buffer_size_ - write_head >= sizeof(perf_event_header)) {
64       // Set the size field in perf_event_header to 0. So GetCurrentRecord() can wrap to the start
65       // of the buffer when size is 0.
66       memset(buffer_.get() + write_head, 0, sizeof(perf_event_header));
67     }
68     cur_write_record_size_ += buffer_size_ - write_head;
69     write_head = 0;
70   }
71   return buffer_.get() + write_head;
72 }
73 
FinishWrite()74 void RecordBuffer::FinishWrite() {
75   size_t write_head = write_head_.load(std::memory_order_relaxed);
76   write_head = (write_head + cur_write_record_size_) % buffer_size_;
77   write_head_.store(write_head, std::memory_order_release);
78 }
79 
GetCurrentRecord()80 char* RecordBuffer::GetCurrentRecord() {
81   size_t write_head = write_head_.load(std::memory_order_acquire);
82   size_t read_head = read_head_.load(std::memory_order_relaxed);
83   if (read_head == write_head) {
84     return nullptr;
85   }
86   perf_event_header header;
87   if (read_head > write_head) {
88     if (buffer_size_ - read_head < sizeof(header) ||
89         (memcpy(&header, buffer_.get() + read_head, sizeof(header)) && header.size == 0)) {
90       // Need to wrap to the start of the buffer.
91       cur_read_record_size_ += buffer_size_ - read_head;
92       read_head = 0;
93       memcpy(&header, buffer_.get(), sizeof(header));
94     }
95   } else {
96     memcpy(&header, buffer_.get() + read_head, sizeof(header));
97   }
98   cur_read_record_size_ += header.size;
99   return buffer_.get() + read_head;
100 }
101 
MoveToNextRecord()102 void RecordBuffer::MoveToNextRecord() {
103   size_t read_head = read_head_.load(std::memory_order_relaxed);
104   read_head = (read_head + cur_read_record_size_) % buffer_size_;
105   read_head_.store(read_head, std::memory_order_release);
106   cur_read_record_size_ = 0;
107 }
108 
RecordParser(const perf_event_attr & attr)109 RecordParser::RecordParser(const perf_event_attr& attr)
110     : sample_type_(attr.sample_type),
111       sample_regs_count_(__builtin_popcountll(attr.sample_regs_user)) {
112   size_t pos = sizeof(perf_event_header);
113   uint64_t mask = PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_IP;
114   pos += __builtin_popcountll(sample_type_ & mask) * sizeof(uint64_t);
115   if (sample_type_ & PERF_SAMPLE_TID) {
116     pid_pos_in_sample_records_ = pos;
117     pos += sizeof(uint64_t);
118   }
119   if (sample_type_ & PERF_SAMPLE_TIME) {
120     time_pos_in_sample_records_ = pos;
121     pos += sizeof(uint64_t);
122   }
123   mask = PERF_SAMPLE_ADDR | PERF_SAMPLE_ID | PERF_SAMPLE_STREAM_ID | PERF_SAMPLE_CPU |
124       PERF_SAMPLE_PERIOD;
125   pos += __builtin_popcountll(sample_type_ & mask) * sizeof(uint64_t);
126   callchain_pos_in_sample_records_ = pos;
127   if ((sample_type_ & PERF_SAMPLE_TIME) && attr.sample_id_all) {
128     mask = PERF_SAMPLE_IDENTIFIER | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | PERF_SAMPLE_ID;
129     time_rpos_in_non_sample_records_ = (__builtin_popcountll(sample_type_ & mask) + 1) *
130         sizeof(uint64_t);
131   }
132 }
133 
GetTimePos(const perf_event_header & header) const134 size_t RecordParser::GetTimePos(const perf_event_header& header) const {
135   if (header.type == PERF_RECORD_SAMPLE) {
136     return time_pos_in_sample_records_;
137   }
138   if (time_rpos_in_non_sample_records_ != 0u &&
139       time_rpos_in_non_sample_records_ < header.size - sizeof(perf_event_header)) {
140     return header.size - time_rpos_in_non_sample_records_;
141   }
142   return 0;
143 }
144 
GetStackSizePos(const std::function<void (size_t,size_t,void *)> & read_record_fn) const145 size_t RecordParser::GetStackSizePos(
146     const std::function<void(size_t,size_t,void*)>& read_record_fn) const{
147   size_t pos = callchain_pos_in_sample_records_;
148   if (sample_type_ & PERF_SAMPLE_CALLCHAIN) {
149     uint64_t ip_nr;
150     read_record_fn(pos, sizeof(ip_nr), &ip_nr);
151     pos += (ip_nr + 1) * sizeof(uint64_t);
152   }
153   if (sample_type_ & PERF_SAMPLE_RAW) {
154     uint32_t size;
155     read_record_fn(pos, sizeof(size), &size);
156     pos += size + sizeof(uint32_t);
157   }
158   if (sample_type_ & PERF_SAMPLE_BRANCH_STACK) {
159     uint64_t stack_nr;
160     read_record_fn(pos, sizeof(stack_nr), &stack_nr);
161     pos += sizeof(uint64_t) + stack_nr * sizeof(BranchStackItemType);
162   }
163   if (sample_type_ & PERF_SAMPLE_REGS_USER) {
164     uint64_t abi;
165     read_record_fn(pos, sizeof(abi), &abi);
166     pos += (1 + (abi == 0 ? 0 : sample_regs_count_)) * sizeof(uint64_t);
167   }
168   return (sample_type_ & PERF_SAMPLE_STACK_USER) ? pos : 0;
169 }
170 
KernelRecordReader(EventFd * event_fd)171 KernelRecordReader::KernelRecordReader(EventFd* event_fd) : event_fd_(event_fd) {
172   size_t buffer_size;
173   buffer_ = event_fd_->GetMappedBuffer(buffer_size);
174   buffer_mask_ = buffer_size - 1;
175 }
176 
GetDataFromKernelBuffer()177 bool KernelRecordReader::GetDataFromKernelBuffer() {
178   data_size_ = event_fd_->GetAvailableMmapDataSize(data_pos_);
179   if (data_size_ == 0) {
180     return false;
181   }
182   init_data_size_ = data_size_;
183   record_header_.size = 0;
184   return true;
185 }
186 
ReadRecord(size_t pos,size_t size,void * dest)187 void KernelRecordReader::ReadRecord(size_t pos, size_t size, void* dest) {
188   pos = (pos + data_pos_) & buffer_mask_;
189   size_t copy_size = std::min(size, buffer_mask_ + 1 - pos);
190   memcpy(dest, buffer_ + pos, copy_size);
191   if (copy_size < size) {
192     memcpy(static_cast<char*>(dest) + copy_size, buffer_, size - copy_size);
193   }
194 }
195 
MoveToNextRecord(const RecordParser & parser)196 bool KernelRecordReader::MoveToNextRecord(const RecordParser& parser) {
197   data_pos_ = (data_pos_ + record_header_.size) & buffer_mask_;
198   data_size_ -= record_header_.size;
199   if (data_size_ == 0) {
200     event_fd_->DiscardMmapData(init_data_size_);
201     init_data_size_ = 0;
202     return false;
203   }
204   ReadRecord(0, sizeof(record_header_), &record_header_);
205   size_t time_pos = parser.GetTimePos(record_header_);
206   if (time_pos != 0) {
207     ReadRecord(time_pos, sizeof(record_time_), &record_time_);
208   }
209   return true;
210 }
211 
RecordReadThread(size_t record_buffer_size,const perf_event_attr & attr,size_t min_mmap_pages,size_t max_mmap_pages,size_t aux_buffer_size,bool allow_cutting_samples,bool exclude_perf)212 RecordReadThread::RecordReadThread(size_t record_buffer_size, const perf_event_attr& attr,
213                                    size_t min_mmap_pages, size_t max_mmap_pages,
214                                    size_t aux_buffer_size, bool allow_cutting_samples,
215                                    bool exclude_perf)
216     : record_buffer_(record_buffer_size),
217       record_parser_(attr),
218       attr_(attr),
219       min_mmap_pages_(min_mmap_pages),
220       max_mmap_pages_(max_mmap_pages),
221       aux_buffer_size_(aux_buffer_size) {
222   if (attr.sample_type & PERF_SAMPLE_STACK_USER) {
223     stack_size_in_sample_record_ = attr.sample_stack_user;
224   }
225   record_buffer_low_level_ = std::min(record_buffer_size / 4, kDefaultLowBufferLevel);
226   record_buffer_critical_level_ = std::min(record_buffer_size / 6, kDefaultCriticalBufferLevel);
227   if (!allow_cutting_samples) {
228     record_buffer_low_level_ = record_buffer_critical_level_;
229   }
230   if (exclude_perf) {
231     exclude_pid_ = getpid();
232   }
233 }
234 
~RecordReadThread()235 RecordReadThread::~RecordReadThread() {
236   if (read_thread_) {
237     StopReadThread();
238   }
239 }
240 
RegisterDataCallback(IOEventLoop & loop,const std::function<bool ()> & data_callback)241 bool RecordReadThread::RegisterDataCallback(IOEventLoop& loop,
242                                             const std::function<bool()>& data_callback) {
243   int cmd_fd[2];
244   int data_fd[2];
245   if (pipe2(cmd_fd, O_CLOEXEC) != 0 || pipe2(data_fd, O_CLOEXEC) != 0) {
246     PLOG(ERROR) << "pipe2";
247     return false;
248   }
249   read_cmd_fd_.reset(cmd_fd[0]);
250   write_cmd_fd_.reset(cmd_fd[1]);
251   cmd_ = NO_CMD;
252   read_data_fd_.reset(data_fd[0]);
253   write_data_fd_.reset(data_fd[1]);
254   has_data_notification_ = false;
255   if (!loop.AddReadEvent(read_data_fd_, data_callback)) {
256     return false;
257   }
258   read_thread_.reset(new std::thread([&]() { RunReadThread(); }));
259   return true;
260 }
261 
AddEventFds(const std::vector<EventFd * > & event_fds)262 bool RecordReadThread::AddEventFds(const std::vector<EventFd*>& event_fds) {
263   return SendCmdToReadThread(CMD_ADD_EVENT_FDS, const_cast<std::vector<EventFd*>*>(&event_fds));
264 }
265 
RemoveEventFds(const std::vector<EventFd * > & event_fds)266 bool RecordReadThread::RemoveEventFds(const std::vector<EventFd*>& event_fds) {
267   return SendCmdToReadThread(CMD_REMOVE_EVENT_FDS, const_cast<std::vector<EventFd*>*>(&event_fds));
268 }
269 
SyncKernelBuffer()270 bool RecordReadThread::SyncKernelBuffer() {
271   return SendCmdToReadThread(CMD_SYNC_KERNEL_BUFFER, nullptr);
272 }
273 
StopReadThread()274 bool RecordReadThread::StopReadThread() {
275   bool result = SendCmdToReadThread(CMD_STOP_THREAD, nullptr);
276   if (result) {
277     read_thread_->join();
278     read_thread_ = nullptr;
279   }
280   return result;
281 }
282 
SendCmdToReadThread(Cmd cmd,void * cmd_arg)283 bool RecordReadThread::SendCmdToReadThread(Cmd cmd, void* cmd_arg) {
284   {
285     std::lock_guard<std::mutex> lock(cmd_mutex_);
286     cmd_ = cmd;
287     cmd_arg_ = cmd_arg;
288   }
289   char unused = 0;
290   if (TEMP_FAILURE_RETRY(write(write_cmd_fd_, &unused, 1)) != 1) {
291     return false;
292   }
293   std::unique_lock<std::mutex> lock(cmd_mutex_);
294   while (cmd_ != NO_CMD) {
295     cmd_finish_cond_.wait(lock);
296   }
297   return cmd_result_;
298 }
299 
GetRecord()300 std::unique_ptr<Record> RecordReadThread::GetRecord() {
301   record_buffer_.MoveToNextRecord();
302   char* p = record_buffer_.GetCurrentRecord();
303   if (p != nullptr) {
304     std::unique_ptr<Record> r = ReadRecordFromBuffer(attr_, p);
305     if (r->type() == PERF_RECORD_AUXTRACE) {
306       auto auxtrace = static_cast<AuxTraceRecord*>(r.get());
307       record_buffer_.AddCurrentRecordSize(auxtrace->data->aux_size);
308       auxtrace->location.addr = r->Binary() + r->size();
309     }
310     return r;
311   }
312   if (has_data_notification_) {
313     char unused;
314     TEMP_FAILURE_RETRY(read(read_data_fd_, &unused, 1));
315     has_data_notification_ = false;
316   }
317   return nullptr;
318 }
319 
RunReadThread()320 void RecordReadThread::RunReadThread() {
321   IncreaseThreadPriority();
322   IOEventLoop loop;
323   CHECK(loop.AddReadEvent(read_cmd_fd_, [&]() { return HandleCmd(loop); }));
324   loop.RunLoop();
325 }
326 
IncreaseThreadPriority()327 void RecordReadThread::IncreaseThreadPriority() {
328   // TODO: use real time priority for root.
329   rlimit rlim;
330   int result = getrlimit(RLIMIT_NICE, &rlim);
331   if (result == 0 && rlim.rlim_cur == 40) {
332     result = setpriority(PRIO_PROCESS, gettid(), -20);
333     if (result == 0) {
334       LOG(VERBOSE) << "Priority of record read thread is increased";
335     }
336   }
337 }
338 
GetCmd()339 RecordReadThread::Cmd RecordReadThread::GetCmd() {
340   std::lock_guard<std::mutex> lock(cmd_mutex_);
341   return cmd_;
342 }
343 
HandleCmd(IOEventLoop & loop)344 bool RecordReadThread::HandleCmd(IOEventLoop& loop) {
345   char unused;
346   TEMP_FAILURE_RETRY(read(read_cmd_fd_, &unused, 1));
347   bool result = true;
348   switch (GetCmd()) {
349     case CMD_ADD_EVENT_FDS:
350       result = HandleAddEventFds(loop, *static_cast<std::vector<EventFd*>*>(cmd_arg_));
351       break;
352     case CMD_REMOVE_EVENT_FDS:
353       result = HandleRemoveEventFds(*static_cast<std::vector<EventFd*>*>(cmd_arg_));
354       break;
355     case CMD_SYNC_KERNEL_BUFFER:
356       result = ReadRecordsFromKernelBuffer();
357       break;
358     case CMD_STOP_THREAD:
359       result = loop.ExitLoop();
360       break;
361     default:
362       LOG(ERROR) << "Unknown cmd: " << GetCmd();
363       result = false;
364       break;
365   }
366   std::lock_guard<std::mutex> lock(cmd_mutex_);
367   cmd_ = NO_CMD;
368   cmd_result_ = result;
369   cmd_finish_cond_.notify_one();
370   return true;
371 }
372 
HandleAddEventFds(IOEventLoop & loop,const std::vector<EventFd * > & event_fds)373 bool RecordReadThread::HandleAddEventFds(IOEventLoop& loop,
374                                          const std::vector<EventFd*>& event_fds) {
375   std::unordered_map<int, EventFd*> cpu_map;
376   for (size_t pages = max_mmap_pages_; pages >= min_mmap_pages_; pages >>= 1) {
377     bool success = true;
378     bool report_error = pages == min_mmap_pages_;
379     for (EventFd* fd : event_fds) {
380       auto it = cpu_map.find(fd->Cpu());
381       if (it == cpu_map.end()) {
382         if (!fd->CreateMappedBuffer(pages, report_error)) {
383           success = false;
384           break;
385         }
386         if (IsEtmEventType(fd->attr().type)) {
387           if (!fd->CreateAuxBuffer(aux_buffer_size_, report_error)) {
388             fd->DestroyMappedBuffer();
389             success = false;
390             break;
391           }
392         }
393         cpu_map[fd->Cpu()] = fd;
394       } else {
395         if (!fd->ShareMappedBuffer(*(it->second), pages == min_mmap_pages_)) {
396           success = false;
397           break;
398         }
399       }
400     }
401     if (success) {
402       LOG(VERBOSE) << "Each kernel buffer is " << pages << " pages.";
403       break;
404     }
405     for (auto& pair : cpu_map) {
406       pair.second->DestroyMappedBuffer();
407       pair.second->DestroyAuxBuffer();
408     }
409     cpu_map.clear();
410   }
411   if (cpu_map.empty()) {
412     return false;
413   }
414   for (auto& pair : cpu_map) {
415     if (!pair.second->StartPolling(loop, [this]() { return ReadRecordsFromKernelBuffer(); })) {
416       return false;
417     }
418     kernel_record_readers_.emplace_back(pair.second);
419   }
420   return true;
421 }
422 
HandleRemoveEventFds(const std::vector<EventFd * > & event_fds)423 bool RecordReadThread::HandleRemoveEventFds(const std::vector<EventFd*>& event_fds) {
424   for (auto& event_fd : event_fds) {
425     if (event_fd->HasMappedBuffer()) {
426       auto it = std::find_if(kernel_record_readers_.begin(), kernel_record_readers_.end(),
427                              [&](const KernelRecordReader& reader) {
428                                return reader.GetEventFd() == event_fd;
429       });
430       if (it != kernel_record_readers_.end()) {
431         kernel_record_readers_.erase(it);
432         event_fd->StopPolling();
433         event_fd->DestroyMappedBuffer();
434         event_fd->DestroyAuxBuffer();
435       }
436     }
437   }
438   return true;
439 }
440 
CompareRecordTime(KernelRecordReader * r1,KernelRecordReader * r2)441 static bool CompareRecordTime(KernelRecordReader* r1, KernelRecordReader* r2) {
442   return r1->RecordTime() > r2->RecordTime();
443 }
444 
445 // When reading from mmap buffers, we prefer reading from all buffers at once rather than reading
446 // one buffer at a time. Because by reading all buffers at once, we can merge records from
447 // different buffers easily in memory. Otherwise, we have to sort records with greater effort.
ReadRecordsFromKernelBuffer()448 bool RecordReadThread::ReadRecordsFromKernelBuffer() {
449   do {
450     std::vector<KernelRecordReader*> readers;
451     for (auto& reader : kernel_record_readers_) {
452       if (reader.GetDataFromKernelBuffer()) {
453         readers.push_back(&reader);
454       }
455     }
456     bool has_data = false;
457     if (!readers.empty()) {
458       has_data = true;
459       if (readers.size() == 1u) {
460         // Only one buffer has data, process it directly.
461         while (readers[0]->MoveToNextRecord(record_parser_)) {
462           PushRecordToRecordBuffer(readers[0]);
463         }
464       } else {
465         // Use a binary heap to merge records from different buffers. As records from the same
466         // buffer are already ordered by time, we only need to merge the first record from all
467         // buffers. And each time a record is popped from the heap, we put the next record from its
468         // buffer into the heap.
469         for (auto& reader : readers) {
470           reader->MoveToNextRecord(record_parser_);
471         }
472         std::make_heap(readers.begin(), readers.end(), CompareRecordTime);
473         size_t size = readers.size();
474         while (size > 0) {
475           std::pop_heap(readers.begin(), readers.begin() + size, CompareRecordTime);
476           PushRecordToRecordBuffer(readers[size - 1]);
477           if (readers[size - 1]->MoveToNextRecord(record_parser_)) {
478             std::push_heap(readers.begin(), readers.begin() + size, CompareRecordTime);
479           } else {
480             size--;
481           }
482         }
483       }
484     }
485     ReadAuxDataFromKernelBuffer(&has_data);
486     if (!has_data) {
487       break;
488     }
489     if (!SendDataNotificationToMainThread()) {
490       return false;
491     }
492     // If there are no commands, we can loop until there is no more data from the kernel.
493   } while (GetCmd() == NO_CMD);
494   return true;
495 }
496 
PushRecordToRecordBuffer(KernelRecordReader * kernel_record_reader)497 void RecordReadThread::PushRecordToRecordBuffer(KernelRecordReader* kernel_record_reader) {
498   const perf_event_header& header = kernel_record_reader->RecordHeader();
499   if (header.type == PERF_RECORD_SAMPLE && exclude_pid_ != -1) {
500     uint32_t pid;
501     kernel_record_reader->ReadRecord(record_parser_.GetPidPosInSampleRecord(), sizeof(pid), &pid);
502     if (pid == exclude_pid_) {
503       return;
504     }
505   }
506   if (header.type == PERF_RECORD_SAMPLE && stack_size_in_sample_record_ > 1024) {
507     size_t free_size = record_buffer_.GetFreeSize();
508     if (free_size < record_buffer_critical_level_) {
509       // When the free size in record buffer is below critical level, drop sample records to save
510       // space for more important records (like mmap or fork records).
511       stat_.lost_samples++;
512       return;
513     }
514     size_t stack_size_limit = stack_size_in_sample_record_;
515     if (free_size < record_buffer_low_level_) {
516       // When the free size in record buffer is below low level, cut the stack data in sample
517       // records to 1K. This makes the unwinder unwind only part of the callchains, but hopefully
518       // the call chain joiner can complete the callchains.
519       stack_size_limit = 1024;
520     }
521     size_t stack_size_pos = record_parser_.GetStackSizePos(
522         [&](size_t pos, size_t size, void* dest) {
523           return kernel_record_reader->ReadRecord(pos, size, dest);
524     });
525     uint64_t stack_size;
526     kernel_record_reader->ReadRecord(stack_size_pos, sizeof(stack_size), &stack_size);
527     if (stack_size > 0) {
528       size_t dyn_stack_size_pos = stack_size_pos + sizeof(stack_size) + stack_size;
529       uint64_t dyn_stack_size;
530       kernel_record_reader->ReadRecord(dyn_stack_size_pos, sizeof(dyn_stack_size), &dyn_stack_size);
531       if (dyn_stack_size == 0) {
532         // If stack_user_data.dyn_size == 0, it may be because the kernel misses the patch to
533         // update dyn_size, like in N9 (See b/22612370). So assume all stack data is valid if
534         // dyn_size == 0.
535         // TODO: Add cts test.
536         dyn_stack_size = stack_size;
537       }
538       // When simpleperf requests the kernel to dump 64K stack per sample, it will allocate 64K
539       // space in each sample to store stack data. However, a thread may use less stack than 64K.
540       // So not all the 64K stack data in a sample is valid, and we only need to keep valid stack
541       // data, whose size is dyn_stack_size.
542       uint64_t new_stack_size = std::min<uint64_t>(dyn_stack_size, stack_size_limit);
543       if (stack_size > new_stack_size) {
544         // Remove part of the stack data.
545         perf_event_header new_header = header;
546         new_header.size -= stack_size - new_stack_size;
547         char* p = record_buffer_.AllocWriteSpace(new_header.size);
548         if (p != nullptr) {
549           memcpy(p, &new_header, sizeof(new_header));
550           size_t pos = sizeof(new_header);
551           kernel_record_reader->ReadRecord(pos, stack_size_pos - pos, p + pos);
552           memcpy(p + stack_size_pos, &new_stack_size, sizeof(uint64_t));
553           pos = stack_size_pos + sizeof(uint64_t);
554           kernel_record_reader->ReadRecord(pos, new_stack_size, p + pos);
555           memcpy(p + pos + new_stack_size, &new_stack_size, sizeof(uint64_t));
556           record_buffer_.FinishWrite();
557           if (new_stack_size < dyn_stack_size) {
558             stat_.cut_stack_samples++;
559           }
560         } else {
561           stat_.lost_samples++;
562         }
563         return;
564       }
565     }
566   }
567   char* p = record_buffer_.AllocWriteSpace(header.size);
568   if (p != nullptr) {
569     kernel_record_reader->ReadRecord(0, header.size, p);
570     record_buffer_.FinishWrite();
571   } else {
572     if (header.type == PERF_RECORD_SAMPLE) {
573       stat_.lost_samples++;
574     } else {
575       stat_.lost_non_samples++;
576     }
577   }
578 }
579 
ReadAuxDataFromKernelBuffer(bool * has_data)580 void RecordReadThread::ReadAuxDataFromKernelBuffer(bool* has_data) {
581   for (auto& reader : kernel_record_readers_) {
582     EventFd* event_fd = reader.GetEventFd();
583     if (event_fd->HasAuxBuffer()) {
584       char* buf[2];
585       size_t size[2];
586       uint64_t offset = event_fd->GetAvailableAuxData(&buf[0], &size[0], &buf[1], &size[1]);
587       size_t aux_size = size[0] + size[1];
588       if (aux_size == 0) {
589         continue;
590       }
591       *has_data = true;
592       AuxTraceRecord auxtrace(Align(aux_size, 8), offset, event_fd->Cpu(), 0, event_fd->Cpu());
593       size_t alloc_size = auxtrace.size() + auxtrace.data->aux_size;
594       if (record_buffer_.GetFreeSize() < alloc_size + record_buffer_critical_level_) {
595         stat_.lost_aux_data_size += aux_size;
596       } else {
597         char* p = record_buffer_.AllocWriteSpace(alloc_size);
598         CHECK(p != nullptr);
599         MoveToBinaryFormat(auxtrace.Binary(), auxtrace.size(), p);
600         MoveToBinaryFormat(buf[0], size[0], p);
601         if (size[1] != 0) {
602           MoveToBinaryFormat(buf[1], size[1], p);
603         }
604         size_t pad_size = auxtrace.data->aux_size - aux_size;
605         if (pad_size != 0) {
606           uint64_t pad = 0;
607           memcpy(p, &pad, pad_size);
608         }
609         record_buffer_.FinishWrite();
610         stat_.aux_data_size += aux_size;
611         LOG(DEBUG) << "record aux data " << aux_size << " bytes";
612       }
613       event_fd->DiscardAuxData(aux_size);
614     }
615   }
616 }
617 
SendDataNotificationToMainThread()618 bool RecordReadThread::SendDataNotificationToMainThread() {
619   if (!has_data_notification_.load(std::memory_order_relaxed)) {
620     has_data_notification_ = true;
621     char unused = 0;
622     if (TEMP_FAILURE_RETRY(write(write_data_fd_, &unused, 1)) != 1) {
623       PLOG(ERROR) << "write";
624       return false;
625     }
626   }
627   return true;
628 }
629 
630 }  // namespace simpleperf
631