1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "event_selection_set.h"
18
19 #include <algorithm>
20 #include <atomic>
21 #include <thread>
22
23 #include <android-base/logging.h>
24 #include <android-base/strings.h>
25 #include <android-base/stringprintf.h>
26
27 #include "environment.h"
28 #include "ETMRecorder.h"
29 #include "event_attr.h"
30 #include "event_type.h"
31 #include "IOEventLoop.h"
32 #include "perf_regs.h"
33 #include "tracing.h"
34 #include "utils.h"
35 #include "RecordReadThread.h"
36
37 using android::base::StringPrintf;
38 using namespace simpleperf;
39
IsBranchSamplingSupported()40 bool IsBranchSamplingSupported() {
41 const EventType* type = FindEventTypeByName("cpu-cycles");
42 if (type == nullptr) {
43 return false;
44 }
45 perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
46 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
47 attr.branch_sample_type = PERF_SAMPLE_BRANCH_ANY;
48 return IsEventAttrSupported(attr, type->name);
49 }
50
IsDwarfCallChainSamplingSupported()51 bool IsDwarfCallChainSamplingSupported() {
52 const EventType* type = FindEventTypeByName("cpu-clock");
53 if (type == nullptr) {
54 return false;
55 }
56 perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
57 attr.sample_type |=
58 PERF_SAMPLE_CALLCHAIN | PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER;
59 attr.exclude_callchain_user = 1;
60 attr.sample_regs_user = GetSupportedRegMask(GetBuildArch());
61 attr.sample_stack_user = 8192;
62 return IsEventAttrSupported(attr, type->name);
63 }
64
IsDumpingRegsForTracepointEventsSupported()65 bool IsDumpingRegsForTracepointEventsSupported() {
66 const EventType* event_type = FindEventTypeByName("sched:sched_switch", false);
67 if (event_type == nullptr) {
68 return false;
69 }
70 std::atomic<bool> done(false);
71 std::atomic<pid_t> thread_id(0);
72 std::thread thread([&]() {
73 thread_id = gettid();
74 while (!done) {
75 usleep(1);
76 }
77 usleep(1); // Make a sched out to generate one sample.
78 });
79 while (thread_id == 0) {
80 usleep(1);
81 }
82 perf_event_attr attr = CreateDefaultPerfEventAttr(*event_type);
83 attr.freq = 0;
84 attr.sample_period = 1;
85 std::unique_ptr<EventFd> event_fd =
86 EventFd::OpenEventFile(attr, thread_id, -1, nullptr, event_type->name);
87 if (event_fd == nullptr || !event_fd->CreateMappedBuffer(4, true)) {
88 done = true;
89 thread.join();
90 return false;
91 }
92 done = true;
93 thread.join();
94
95 std::vector<char> buffer = event_fd->GetAvailableMmapData();
96 std::vector<std::unique_ptr<Record>> records =
97 ReadRecordsFromBuffer(attr, buffer.data(), buffer.size());
98 for (auto& r : records) {
99 if (r->type() == PERF_RECORD_SAMPLE) {
100 auto& record = *static_cast<SampleRecord*>(r.get());
101 if (record.ip_data.ip != 0) {
102 return true;
103 }
104 }
105 }
106 return false;
107 }
108
IsSettingClockIdSupported()109 bool IsSettingClockIdSupported() {
110 // Do the real check only once and keep the result in a static variable.
111 static int is_supported = -1;
112 if (is_supported == -1) {
113 const EventType* type = FindEventTypeByName("cpu-clock");
114 if (type == nullptr) {
115 is_supported = 0;
116 } else {
117 // Check if the kernel supports setting clockid, which was added in kernel 4.0. Just check
118 // with one clockid is enough. Because all needed clockids were supported before kernel 4.0.
119 perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
120 attr.use_clockid = 1;
121 attr.clockid = CLOCK_MONOTONIC;
122 is_supported = IsEventAttrSupported(attr, type->name) ? 1 : 0;
123 }
124 }
125 return is_supported;
126 }
127
IsMmap2Supported()128 bool IsMmap2Supported() {
129 const EventType* type = FindEventTypeByName("cpu-clock");
130 if (type == nullptr) {
131 return false;
132 }
133 perf_event_attr attr = CreateDefaultPerfEventAttr(*type);
134 attr.mmap2 = 1;
135 return IsEventAttrSupported(attr, type->name);
136 }
137
ToString() const138 std::string AddrFilter::ToString() const {
139 switch (type) {
140 case FILE_RANGE:
141 return StringPrintf("filter 0x%" PRIx64 "/0x%" PRIx64 "@%s", addr, size, file_path.c_str());
142 case AddrFilter::FILE_START:
143 return StringPrintf("start 0x%" PRIx64 "@%s", addr, file_path.c_str());
144 case AddrFilter::FILE_STOP:
145 return StringPrintf("stop 0x%" PRIx64 "@%s", addr, file_path.c_str());
146 case AddrFilter::KERNEL_RANGE:
147 return StringPrintf("filter 0x%" PRIx64 "/0x%" PRIx64, addr, size);
148 case AddrFilter::KERNEL_START:
149 return StringPrintf("start 0x%" PRIx64, addr);
150 case AddrFilter::KERNEL_STOP:
151 return StringPrintf("stop 0x%" PRIx64, addr);
152 }
153 }
154
EventSelectionSet(bool for_stat_cmd)155 EventSelectionSet::EventSelectionSet(bool for_stat_cmd)
156 : for_stat_cmd_(for_stat_cmd), loop_(new IOEventLoop) {}
157
~EventSelectionSet()158 EventSelectionSet::~EventSelectionSet() {}
159
BuildAndCheckEventSelection(const std::string & event_name,bool first_event,EventSelection * selection)160 bool EventSelectionSet::BuildAndCheckEventSelection(const std::string& event_name, bool first_event,
161 EventSelection* selection) {
162 std::unique_ptr<EventTypeAndModifier> event_type = ParseEventType(event_name);
163 if (event_type == nullptr) {
164 return false;
165 }
166 if (for_stat_cmd_) {
167 if (event_type->event_type.name == "cpu-clock" ||
168 event_type->event_type.name == "task-clock") {
169 if (event_type->exclude_user || event_type->exclude_kernel) {
170 LOG(ERROR) << "Modifier u and modifier k used in event type "
171 << event_type->event_type.name
172 << " are not supported by the kernel.";
173 return false;
174 }
175 }
176 }
177 selection->event_type_modifier = *event_type;
178 selection->event_attr = CreateDefaultPerfEventAttr(event_type->event_type);
179 selection->event_attr.exclude_user = event_type->exclude_user;
180 selection->event_attr.exclude_kernel = event_type->exclude_kernel;
181 selection->event_attr.exclude_hv = event_type->exclude_hv;
182 selection->event_attr.exclude_host = event_type->exclude_host;
183 selection->event_attr.exclude_guest = event_type->exclude_guest;
184 selection->event_attr.precise_ip = event_type->precise_ip;
185 if (IsEtmEventType(event_type->event_type.type)) {
186 auto& etm_recorder = ETMRecorder::GetInstance();
187 if (!etm_recorder.CheckEtmSupport()) {
188 return false;
189 }
190 ETMRecorder::GetInstance().SetEtmPerfEventAttr(&selection->event_attr);
191 }
192 bool set_default_sample_freq = false;
193 if (!for_stat_cmd_) {
194 if (event_type->event_type.type == PERF_TYPE_TRACEPOINT) {
195 selection->event_attr.freq = 0;
196 selection->event_attr.sample_period = DEFAULT_SAMPLE_PERIOD_FOR_TRACEPOINT_EVENT;
197 } else if (IsEtmEventType(event_type->event_type.type)) {
198 // ETM recording has no sample frequency to adjust. Using sample frequency only wastes time
199 // enabling/disabling etm devices. So don't adjust frequency by default.
200 selection->event_attr.freq = 0;
201 selection->event_attr.sample_period = 1;
202 } else {
203 selection->event_attr.freq = 1;
204 // Set default sample freq here may print msg "Adjust sample freq to max allowed sample
205 // freq". But this is misleading. Because default sample freq may not be the final sample
206 // freq we use. So use minimum sample freq (1) here.
207 selection->event_attr.sample_freq = 1;
208 set_default_sample_freq = true;
209 }
210 // We only need to dump mmap and comm records for the first event type. Because all event types
211 // are monitoring the same processes.
212 if (first_event) {
213 selection->event_attr.mmap = 1;
214 selection->event_attr.comm = 1;
215 if (IsMmap2Supported()) {
216 selection->event_attr.mmap2 = 1;
217 }
218 }
219 }
220 // PMU events are provided by kernel, so they should be supported
221 if (!event_type->event_type.IsPmuEvent() &&
222 !IsEventAttrSupported(selection->event_attr, selection->event_type_modifier.name)) {
223 LOG(ERROR) << "Event type '" << event_type->name
224 << "' is not supported on the device";
225 return false;
226 }
227 if (set_default_sample_freq) {
228 selection->event_attr.sample_freq = DEFAULT_SAMPLE_FREQ_FOR_NONTRACEPOINT_EVENT;
229 }
230
231 selection->event_fds.clear();
232
233 for (const auto& group : groups_) {
234 for (const auto& sel : group) {
235 if (sel.event_type_modifier.name == selection->event_type_modifier.name) {
236 LOG(ERROR) << "Event type '" << sel.event_type_modifier.name
237 << "' appears more than once";
238 return false;
239 }
240 }
241 }
242 return true;
243 }
244
AddEventType(const std::string & event_name,size_t * group_id)245 bool EventSelectionSet::AddEventType(const std::string& event_name, size_t* group_id) {
246 return AddEventGroup(std::vector<std::string>(1, event_name), group_id);
247 }
248
AddEventGroup(const std::vector<std::string> & event_names,size_t * group_id)249 bool EventSelectionSet::AddEventGroup(
250 const std::vector<std::string>& event_names, size_t* group_id) {
251 EventSelectionGroup group;
252 bool first_event = groups_.empty();
253 bool first_in_group = true;
254 for (const auto& event_name : event_names) {
255 EventSelection selection;
256 if (!BuildAndCheckEventSelection(event_name, first_event, &selection)) {
257 return false;
258 }
259 if (IsEtmEventType(selection.event_attr.type)) {
260 has_aux_trace_ = true;
261 }
262 if (first_in_group) {
263 auto& event_type = selection.event_type_modifier.event_type;
264 if (event_type.IsPmuEvent()) {
265 selection.allowed_cpus = event_type.GetPmuCpumask();
266 }
267 }
268 first_event = false;
269 first_in_group = false;
270 group.push_back(std::move(selection));
271 }
272 groups_.push_back(std::move(group));
273 UnionSampleType();
274 if (group_id != nullptr) {
275 *group_id = groups_.size() - 1;
276 }
277 return true;
278 }
279
GetEvents() const280 std::vector<const EventType*> EventSelectionSet::GetEvents() const {
281 std::vector<const EventType*> result;
282 for (const auto& group : groups_) {
283 for (const auto& selection : group) {
284 result.push_back(&selection.event_type_modifier.event_type);
285 }
286 }
287 return result;
288 }
289
GetTracepointEvents() const290 std::vector<const EventType*> EventSelectionSet::GetTracepointEvents() const {
291 std::vector<const EventType*> result;
292 for (const auto& group : groups_) {
293 for (const auto& selection : group) {
294 if (selection.event_type_modifier.event_type.type ==
295 PERF_TYPE_TRACEPOINT) {
296 result.push_back(&selection.event_type_modifier.event_type);
297 }
298 }
299 }
300 return result;
301 }
302
ExcludeKernel() const303 bool EventSelectionSet::ExcludeKernel() const {
304 for (const auto& group : groups_) {
305 for (const auto& selection : group) {
306 if (!selection.event_type_modifier.exclude_kernel) {
307 return false;
308 }
309 }
310 }
311 return true;
312 }
313
GetEventAttrWithId() const314 std::vector<EventAttrWithId> EventSelectionSet::GetEventAttrWithId() const {
315 std::vector<EventAttrWithId> result;
316 for (const auto& group : groups_) {
317 for (const auto& selection : group) {
318 EventAttrWithId attr_id;
319 attr_id.attr = &selection.event_attr;
320 for (const auto& fd : selection.event_fds) {
321 attr_id.ids.push_back(fd->Id());
322 }
323 result.push_back(attr_id);
324 }
325 }
326 return result;
327 }
328
329 // Union the sample type of different event attrs can make reading sample
330 // records in perf.data easier.
UnionSampleType()331 void EventSelectionSet::UnionSampleType() {
332 uint64_t sample_type = 0;
333 for (const auto& group : groups_) {
334 for (const auto& selection : group) {
335 sample_type |= selection.event_attr.sample_type;
336 }
337 }
338 for (auto& group : groups_) {
339 for (auto& selection : group) {
340 selection.event_attr.sample_type = sample_type;
341 }
342 }
343 }
344
SetEnableOnExec(bool enable)345 void EventSelectionSet::SetEnableOnExec(bool enable) {
346 for (auto& group : groups_) {
347 for (auto& selection : group) {
348 // If sampling is enabled on exec, then it is disabled at startup,
349 // otherwise it should be enabled at startup. Don't use
350 // ioctl(PERF_EVENT_IOC_ENABLE) to enable it after perf_event_open().
351 // Because some android kernels can't handle ioctl() well when cpu-hotplug
352 // happens. See http://b/25193162.
353 if (enable) {
354 selection.event_attr.enable_on_exec = 1;
355 selection.event_attr.disabled = 1;
356 } else {
357 selection.event_attr.enable_on_exec = 0;
358 selection.event_attr.disabled = 0;
359 }
360 }
361 }
362 }
363
GetEnableOnExec()364 bool EventSelectionSet::GetEnableOnExec() {
365 for (const auto& group : groups_) {
366 for (const auto& selection : group) {
367 if (selection.event_attr.enable_on_exec == 0) {
368 return false;
369 }
370 }
371 }
372 return true;
373 }
374
SampleIdAll()375 void EventSelectionSet::SampleIdAll() {
376 for (auto& group : groups_) {
377 for (auto& selection : group) {
378 selection.event_attr.sample_id_all = 1;
379 }
380 }
381 }
382
SetSampleSpeed(size_t group_id,const SampleSpeed & speed)383 void EventSelectionSet::SetSampleSpeed(size_t group_id, const SampleSpeed& speed) {
384 CHECK_LT(group_id, groups_.size());
385 for (auto& selection : groups_[group_id]) {
386 if (speed.UseFreq()) {
387 selection.event_attr.freq = 1;
388 selection.event_attr.sample_freq = speed.sample_freq;
389 } else {
390 selection.event_attr.freq = 0;
391 selection.event_attr.sample_period = speed.sample_period;
392 }
393 }
394 }
395
SetBranchSampling(uint64_t branch_sample_type)396 bool EventSelectionSet::SetBranchSampling(uint64_t branch_sample_type) {
397 if (branch_sample_type != 0 &&
398 (branch_sample_type &
399 (PERF_SAMPLE_BRANCH_ANY | PERF_SAMPLE_BRANCH_ANY_CALL |
400 PERF_SAMPLE_BRANCH_ANY_RETURN | PERF_SAMPLE_BRANCH_IND_CALL)) == 0) {
401 LOG(ERROR) << "Invalid branch_sample_type: 0x" << std::hex
402 << branch_sample_type;
403 return false;
404 }
405 if (branch_sample_type != 0 && !IsBranchSamplingSupported()) {
406 LOG(ERROR) << "branch stack sampling is not supported on this device.";
407 return false;
408 }
409 for (auto& group : groups_) {
410 for (auto& selection : group) {
411 perf_event_attr& attr = selection.event_attr;
412 if (branch_sample_type != 0) {
413 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
414 } else {
415 attr.sample_type &= ~PERF_SAMPLE_BRANCH_STACK;
416 }
417 attr.branch_sample_type = branch_sample_type;
418 }
419 }
420 return true;
421 }
422
EnableFpCallChainSampling()423 void EventSelectionSet::EnableFpCallChainSampling() {
424 for (auto& group : groups_) {
425 for (auto& selection : group) {
426 selection.event_attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
427 }
428 }
429 }
430
EnableDwarfCallChainSampling(uint32_t dump_stack_size)431 bool EventSelectionSet::EnableDwarfCallChainSampling(uint32_t dump_stack_size) {
432 if (!IsDwarfCallChainSamplingSupported()) {
433 LOG(ERROR) << "dwarf callchain sampling is not supported on this device.";
434 return false;
435 }
436 for (auto& group : groups_) {
437 for (auto& selection : group) {
438 selection.event_attr.sample_type |= PERF_SAMPLE_CALLCHAIN |
439 PERF_SAMPLE_REGS_USER |
440 PERF_SAMPLE_STACK_USER;
441 selection.event_attr.exclude_callchain_user = 1;
442 selection.event_attr.sample_regs_user =
443 GetSupportedRegMask(GetMachineArch());
444 selection.event_attr.sample_stack_user = dump_stack_size;
445 }
446 }
447 return true;
448 }
449
SetInherit(bool enable)450 void EventSelectionSet::SetInherit(bool enable) {
451 for (auto& group : groups_) {
452 for (auto& selection : group) {
453 selection.event_attr.inherit = (enable ? 1 : 0);
454 }
455 }
456 }
457
SetClockId(int clock_id)458 void EventSelectionSet::SetClockId(int clock_id) {
459 for (auto& group : groups_) {
460 for (auto& selection : group) {
461 selection.event_attr.use_clockid = 1;
462 selection.event_attr.clockid = clock_id;
463 }
464 }
465 }
466
NeedKernelSymbol() const467 bool EventSelectionSet::NeedKernelSymbol() const {
468 for (const auto& group : groups_) {
469 for (const auto& selection : group) {
470 if (!selection.event_type_modifier.exclude_kernel) {
471 return true;
472 }
473 }
474 }
475 return false;
476 }
477
SetRecordNotExecutableMaps(bool record)478 void EventSelectionSet::SetRecordNotExecutableMaps(bool record) {
479 // We only need to dump non-executable mmap records for the first event type.
480 groups_[0][0].event_attr.mmap_data = record ? 1 : 0;
481 }
482
RecordNotExecutableMaps() const483 bool EventSelectionSet::RecordNotExecutableMaps() const {
484 return groups_[0][0].event_attr.mmap_data == 1;
485 }
486
SetTracepointFilter(const std::string & filter)487 bool EventSelectionSet::SetTracepointFilter(const std::string& filter) {
488 // 1. Find the tracepoint event to set filter.
489 EventSelection* selection = nullptr;
490 if (!groups_.empty()) {
491 auto& group = groups_.back();
492 if (group.size() == 1) {
493 if (group[0].event_attr.type == PERF_TYPE_TRACEPOINT) {
494 selection = &group[0];
495 }
496 }
497 }
498 if (selection == nullptr) {
499 LOG(ERROR) << "No tracepoint event before filter: " << filter;
500 return false;
501 }
502
503 // 2. Check the format of the filter.
504 int kernel_major;
505 int kernel_minor;
506 bool use_quote = false;
507 // Quotes are needed for string operands in kernel >= 4.19, probably after patch "tracing: Rewrite
508 // filter logic to be simpler and faster".
509 if (GetKernelVersion(&kernel_major, &kernel_minor)) {
510 if (kernel_major >= 5 || (kernel_major == 4 && kernel_minor >= 19)) {
511 use_quote = true;
512 }
513 }
514
515 FieldNameSet used_fields;
516 auto adjusted_filter = AdjustTracepointFilter(filter, use_quote, &used_fields);
517 if (!adjusted_filter) {
518 return false;
519 }
520
521 // 3. Check if used fields are available in the tracepoint event.
522 auto& event_type = selection->event_type_modifier.event_type;
523 if (auto opt_fields = GetFieldNamesForTracepointEvent(event_type); opt_fields) {
524 FieldNameSet& fields = opt_fields.value();
525 for (const auto& field : used_fields) {
526 if (fields.find(field) == fields.end()) {
527 LOG(ERROR) << "field name " << field << " used in \"" << filter << "\" doesn't exist in "
528 << event_type.name << ". Available fields are "
529 << android::base::Join(fields, ",");
530 return false;
531 }
532 }
533 }
534
535 // 4. Connect the filter to the event.
536 selection->tracepoint_filter = adjusted_filter.value();
537 return true;
538 }
539
CheckIfCpusOnline(const std::vector<int> & cpus)540 static bool CheckIfCpusOnline(const std::vector<int>& cpus) {
541 std::vector<int> online_cpus = GetOnlineCpus();
542 for (const auto& cpu : cpus) {
543 if (std::find(online_cpus.begin(), online_cpus.end(), cpu) ==
544 online_cpus.end()) {
545 LOG(ERROR) << "cpu " << cpu << " is not online.";
546 return false;
547 }
548 }
549 return true;
550 }
551
OpenEventFilesOnGroup(EventSelectionGroup & group,pid_t tid,int cpu,std::string * failed_event_type)552 bool EventSelectionSet::OpenEventFilesOnGroup(EventSelectionGroup& group,
553 pid_t tid, int cpu,
554 std::string* failed_event_type) {
555 std::vector<std::unique_ptr<EventFd>> event_fds;
556 // Given a tid and cpu, events on the same group should be all opened
557 // successfully or all failed to open.
558 EventFd* group_fd = nullptr;
559 for (auto& selection : group) {
560 std::unique_ptr<EventFd> event_fd = EventFd::OpenEventFile(
561 selection.event_attr, tid, cpu, group_fd, selection.event_type_modifier.name, false);
562 if (!event_fd) {
563 *failed_event_type = selection.event_type_modifier.name;
564 return false;
565 }
566 LOG(VERBOSE) << "OpenEventFile for " << event_fd->Name();
567 event_fds.push_back(std::move(event_fd));
568 if (group_fd == nullptr) {
569 group_fd = event_fds.back().get();
570 }
571 }
572 for (size_t i = 0; i < group.size(); ++i) {
573 group[i].event_fds.push_back(std::move(event_fds[i]));
574 }
575 return true;
576 }
577
PrepareThreads(const std::set<pid_t> & processes,const std::set<pid_t> & threads)578 static std::set<pid_t> PrepareThreads(const std::set<pid_t>& processes,
579 const std::set<pid_t>& threads) {
580 std::set<pid_t> result = threads;
581 for (auto& pid : processes) {
582 std::vector<pid_t> tids = GetThreadsInProcess(pid);
583 result.insert(tids.begin(), tids.end());
584 }
585 return result;
586 }
587
OpenEventFiles(const std::vector<int> & cpus)588 bool EventSelectionSet::OpenEventFiles(const std::vector<int>& cpus) {
589 std::vector<int> monitored_cpus;
590 if (cpus.empty()) {
591 monitored_cpus = GetOnlineCpus();
592 } else if (cpus.size() == 1 && cpus[0] == -1) {
593 monitored_cpus = {-1};
594 } else {
595 if (!CheckIfCpusOnline(cpus)) {
596 return false;
597 }
598 monitored_cpus = cpus;
599 }
600 std::set<pid_t> threads = PrepareThreads(processes_, threads_);
601 for (auto& group : groups_) {
602 size_t success_count = 0;
603 std::string failed_event_type;
604 for (const auto tid : threads) {
605 const std::vector<int>* pcpus = &monitored_cpus;
606 if (!group[0].allowed_cpus.empty()) {
607 // override cpu list if event's PMU has a cpumask as those PMUs are
608 // agnostic to cpu and it's meaningless to specify cpus for them.
609 pcpus = &group[0].allowed_cpus;
610 }
611 for (const auto& cpu : *pcpus) {
612 if (OpenEventFilesOnGroup(group, tid, cpu, &failed_event_type)) {
613 success_count++;
614 }
615 }
616 }
617 // We can't guarantee to open perf event file successfully for each thread on each cpu.
618 // Because threads may exit between PrepareThreads() and OpenEventFilesOnGroup(), and
619 // cpus may be offlined between GetOnlineCpus() and OpenEventFilesOnGroup().
620 // So we only check that we can at least monitor one thread for each event group.
621 if (success_count == 0) {
622 int error_number = errno;
623 PLOG(ERROR) << "failed to open perf event file for event_type " << failed_event_type;
624 if (error_number == EMFILE) {
625 LOG(ERROR) << "Please increase hard limit of open file numbers.";
626 }
627 return false;
628 }
629 }
630 return ApplyFilters();
631 }
632
ApplyFilters()633 bool EventSelectionSet::ApplyFilters() {
634 return ApplyAddrFilters() && ApplyTracepointFilters();
635 }
636
ApplyAddrFilters()637 bool EventSelectionSet::ApplyAddrFilters() {
638 if (addr_filters_.empty()) {
639 return true;
640 }
641 if (!has_aux_trace_) {
642 LOG(ERROR) << "addr filters only take effect in cs-etm instruction tracing";
643 return false;
644 }
645
646 // Check filter count limit.
647 size_t required_etm_filter_count = 0;
648 for (auto& filter : addr_filters_) {
649 // A range filter needs two etm filters.
650 required_etm_filter_count +=
651 (filter.type == AddrFilter::FILE_RANGE || filter.type == AddrFilter::KERNEL_RANGE) ? 2 : 1;
652 }
653 size_t etm_filter_count = ETMRecorder::GetInstance().GetAddrFilterPairs() * 2;
654 if (etm_filter_count < required_etm_filter_count) {
655 LOG(ERROR) << "needed " << required_etm_filter_count << " etm filters, but only "
656 << etm_filter_count << " filters are available.";
657 return false;
658 }
659
660 std::string filter_str;
661 for (auto& filter : addr_filters_) {
662 if (!filter_str.empty()) {
663 filter_str += ',';
664 }
665 filter_str += filter.ToString();
666 }
667
668 for (auto& group : groups_) {
669 for (auto& selection : group) {
670 if (IsEtmEventType(selection.event_type_modifier.event_type.type)) {
671 for (auto& event_fd : selection.event_fds) {
672 if (!event_fd->SetFilter(filter_str)) {
673 return false;
674 }
675 }
676 }
677 }
678 }
679 return true;
680 }
681
ApplyTracepointFilters()682 bool EventSelectionSet::ApplyTracepointFilters() {
683 for (auto& group : groups_) {
684 for (auto& selection : group) {
685 if (!selection.tracepoint_filter.empty()) {
686 for (auto& event_fd : selection.event_fds) {
687 if (!event_fd->SetFilter(selection.tracepoint_filter)) {
688 return false;
689 }
690 }
691 }
692 }
693 }
694 return true;
695 }
696
ReadCounter(EventFd * event_fd,CounterInfo * counter)697 static bool ReadCounter(EventFd* event_fd, CounterInfo* counter) {
698 if (!event_fd->ReadCounter(&counter->counter)) {
699 return false;
700 }
701 counter->tid = event_fd->ThreadId();
702 counter->cpu = event_fd->Cpu();
703 return true;
704 }
705
ReadCounters(std::vector<CountersInfo> * counters)706 bool EventSelectionSet::ReadCounters(std::vector<CountersInfo>* counters) {
707 counters->clear();
708 for (size_t i = 0; i < groups_.size(); ++i) {
709 for (auto& selection : groups_[i]) {
710 CountersInfo counters_info;
711 counters_info.group_id = i;
712 counters_info.event_name = selection.event_type_modifier.event_type.name;
713 counters_info.event_modifier = selection.event_type_modifier.modifier;
714 counters_info.counters = selection.hotplugged_counters;
715 for (auto& event_fd : selection.event_fds) {
716 CounterInfo counter;
717 if (!ReadCounter(event_fd.get(), &counter)) {
718 return false;
719 }
720 counters_info.counters.push_back(counter);
721 }
722 counters->push_back(counters_info);
723 }
724 }
725 return true;
726 }
727
MmapEventFiles(size_t min_mmap_pages,size_t max_mmap_pages,size_t aux_buffer_size,size_t record_buffer_size,bool allow_cutting_samples,bool exclude_perf)728 bool EventSelectionSet::MmapEventFiles(size_t min_mmap_pages, size_t max_mmap_pages,
729 size_t aux_buffer_size, size_t record_buffer_size,
730 bool allow_cutting_samples, bool exclude_perf) {
731 record_read_thread_.reset(
732 new simpleperf::RecordReadThread(record_buffer_size, groups_[0][0].event_attr, min_mmap_pages,
733 max_mmap_pages, aux_buffer_size, allow_cutting_samples,
734 exclude_perf));
735 return true;
736 }
737
PrepareToReadMmapEventData(const std::function<bool (Record *)> & callback)738 bool EventSelectionSet::PrepareToReadMmapEventData(const std::function<bool(Record*)>& callback) {
739 // Prepare record callback function.
740 record_callback_ = callback;
741 if (!record_read_thread_->RegisterDataCallback(*loop_,
742 [this]() { return ReadMmapEventData(true); })) {
743 return false;
744 }
745 std::vector<EventFd*> event_fds;
746 for (auto& group : groups_) {
747 for (auto& selection : group) {
748 for (auto& event_fd : selection.event_fds) {
749 event_fds.push_back(event_fd.get());
750 }
751 }
752 }
753 return record_read_thread_->AddEventFds(event_fds);
754 }
755
SyncKernelBuffer()756 bool EventSelectionSet::SyncKernelBuffer() {
757 return record_read_thread_->SyncKernelBuffer();
758 }
759
760 // Read records from the RecordBuffer. If with_time_limit is false, read until the RecordBuffer is
761 // empty, otherwise stop after 100 ms or when the record buffer is empty.
ReadMmapEventData(bool with_time_limit)762 bool EventSelectionSet::ReadMmapEventData(bool with_time_limit) {
763 uint64_t start_time_in_ns;
764 if (with_time_limit) {
765 start_time_in_ns = GetSystemClock();
766 }
767 std::unique_ptr<Record> r;
768 while ((r = record_read_thread_->GetRecord()) != nullptr) {
769 if (!record_callback_(r.get())) {
770 return false;
771 }
772 if (with_time_limit && (GetSystemClock() - start_time_in_ns) >= 1e8) {
773 break;
774 }
775 }
776 return true;
777 }
778
FinishReadMmapEventData()779 bool EventSelectionSet::FinishReadMmapEventData() {
780 // Stop the read thread, so we don't get more records beyond current time.
781 if (!SyncKernelBuffer() || !record_read_thread_->StopReadThread()) {
782 return false;
783 }
784 if (!ReadMmapEventData(false)) {
785 return false;
786 }
787 return true;
788 }
789
StopWhenNoMoreTargets(double check_interval_in_sec)790 bool EventSelectionSet::StopWhenNoMoreTargets(double check_interval_in_sec) {
791 return loop_->AddPeriodicEvent(SecondToTimeval(check_interval_in_sec),
792 [&]() { return CheckMonitoredTargets(); });
793 }
794
CheckMonitoredTargets()795 bool EventSelectionSet::CheckMonitoredTargets() {
796 if (!HasSampler()) {
797 return loop_->ExitLoop();
798 }
799 for (const auto& tid : threads_) {
800 if (IsThreadAlive(tid)) {
801 return true;
802 }
803 }
804 for (const auto& pid : processes_) {
805 if (IsThreadAlive(pid)) {
806 return true;
807 }
808 }
809 return loop_->ExitLoop();
810 }
811
HasSampler()812 bool EventSelectionSet::HasSampler() {
813 for (auto& group : groups_) {
814 for (auto& sel : group) {
815 if (!sel.event_fds.empty()) {
816 return true;
817 }
818 }
819 }
820 return false;
821 }
822
SetEnableEvents(bool enable)823 bool EventSelectionSet::SetEnableEvents(bool enable) {
824 for (auto& group : groups_) {
825 for (auto& sel : group) {
826 for (auto& fd : sel.event_fds) {
827 if (!fd->SetEnableEvent(enable)) {
828 return false;
829 }
830 }
831 }
832 }
833 return true;
834 }
835