1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "storaged"
18
19 #include <stdint.h>
20 #include <time.h>
21
22 #include <string>
23 #include <unordered_map>
24 #include <unordered_set>
25
26 #include <android/content/pm/IPackageManagerNative.h>
27 #include <android-base/file.h>
28 #include <android-base/logging.h>
29 #include <android-base/macros.h>
30 #include <android-base/parseint.h>
31 #include <android-base/strings.h>
32 #include <android-base/stringprintf.h>
33 #include <binder/IServiceManager.h>
34 #include <log/log_event_list.h>
35
36 #include "storaged.h"
37 #include "storaged_uid_monitor.h"
38
39 using namespace android;
40 using namespace android::base;
41 using namespace android::content::pm;
42 using namespace android::os::storaged;
43 using namespace storaged_proto;
44
45 namespace {
46
47 bool refresh_uid_names;
48 const char* UID_IO_STATS_PATH = "/proc/uid_io/stats";
49
50 } // namepsace
51
get_uid_io_stats()52 std::unordered_map<uint32_t, uid_info> uid_monitor::get_uid_io_stats()
53 {
54 Mutex::Autolock _l(uidm_mutex_);
55 return get_uid_io_stats_locked();
56 };
57
58 /* return true on parse success and false on failure */
parse_uid_io_stats(std::string && s)59 bool uid_info::parse_uid_io_stats(std::string&& s)
60 {
61 std::vector<std::string> fields = Split(s, " ");
62 if (fields.size() < 11 ||
63 !ParseUint(fields[0], &uid) ||
64 !ParseUint(fields[1], &io[FOREGROUND].rchar) ||
65 !ParseUint(fields[2], &io[FOREGROUND].wchar) ||
66 !ParseUint(fields[3], &io[FOREGROUND].read_bytes) ||
67 !ParseUint(fields[4], &io[FOREGROUND].write_bytes) ||
68 !ParseUint(fields[5], &io[BACKGROUND].rchar) ||
69 !ParseUint(fields[6], &io[BACKGROUND].wchar) ||
70 !ParseUint(fields[7], &io[BACKGROUND].read_bytes) ||
71 !ParseUint(fields[8], &io[BACKGROUND].write_bytes) ||
72 !ParseUint(fields[9], &io[FOREGROUND].fsync) ||
73 !ParseUint(fields[10], &io[BACKGROUND].fsync)) {
74 LOG(WARNING) << "Invalid uid I/O stats: \"" << s << "\"";
75 return false;
76 }
77 return true;
78 }
79
80 /* return true on parse success and false on failure */
parse_task_io_stats(std::string && s)81 bool task_info::parse_task_io_stats(std::string&& s)
82 {
83 std::vector<std::string> fields = Split(s, ",");
84 size_t size = fields.size();
85 if (size < 13 ||
86 !ParseInt(fields[size - 11], &pid) ||
87 !ParseUint(fields[size - 10], &io[FOREGROUND].rchar) ||
88 !ParseUint(fields[size - 9], &io[FOREGROUND].wchar) ||
89 !ParseUint(fields[size - 8], &io[FOREGROUND].read_bytes) ||
90 !ParseUint(fields[size - 7], &io[FOREGROUND].write_bytes) ||
91 !ParseUint(fields[size - 6], &io[BACKGROUND].rchar) ||
92 !ParseUint(fields[size - 5], &io[BACKGROUND].wchar) ||
93 !ParseUint(fields[size - 4], &io[BACKGROUND].read_bytes) ||
94 !ParseUint(fields[size - 3], &io[BACKGROUND].write_bytes) ||
95 !ParseUint(fields[size - 2], &io[FOREGROUND].fsync) ||
96 !ParseUint(fields[size - 1], &io[BACKGROUND].fsync)) {
97 LOG(WARNING) << "Invalid task I/O stats: \"" << s << "\"";
98 return false;
99 }
100 comm = Join(std::vector<std::string>(
101 fields.begin() + 1, fields.end() - 11), ',');
102 return true;
103 }
104
is_zero() const105 bool io_usage::is_zero() const
106 {
107 for (int i = 0; i < IO_TYPES; i++) {
108 for (int j = 0; j < UID_STATS; j++) {
109 for (int k = 0; k < CHARGER_STATS; k++) {
110 if (bytes[i][j][k])
111 return false;
112 }
113 }
114 }
115 return true;
116 }
117
118 namespace {
119
get_uid_names(const vector<int> & uids,const vector<std::string * > & uid_names)120 void get_uid_names(const vector<int>& uids, const vector<std::string*>& uid_names)
121 {
122 sp<IServiceManager> sm = defaultServiceManager();
123 if (sm == NULL) {
124 LOG(ERROR) << "defaultServiceManager failed";
125 return;
126 }
127
128 sp<IBinder> binder = sm->getService(String16("package_native"));
129 if (binder == NULL) {
130 LOG(ERROR) << "getService package_native failed";
131 return;
132 }
133
134 sp<IPackageManagerNative> package_mgr = interface_cast<IPackageManagerNative>(binder);
135 std::vector<std::string> names;
136 binder::Status status = package_mgr->getNamesForUids(uids, &names);
137 if (!status.isOk()) {
138 LOG(ERROR) << "package_native::getNamesForUids failed: " << status.exceptionMessage();
139 return;
140 }
141
142 for (uint32_t i = 0; i < uid_names.size(); i++) {
143 if (!names[i].empty()) {
144 *uid_names[i] = names[i];
145 }
146 }
147
148 refresh_uid_names = false;
149 }
150
151 } // namespace
152
get_uid_io_stats_locked()153 std::unordered_map<uint32_t, uid_info> uid_monitor::get_uid_io_stats_locked()
154 {
155 std::unordered_map<uint32_t, uid_info> uid_io_stats;
156 std::string buffer;
157 if (!ReadFileToString(UID_IO_STATS_PATH, &buffer)) {
158 PLOG(ERROR) << UID_IO_STATS_PATH << ": ReadFileToString failed";
159 return uid_io_stats;
160 }
161
162 std::vector<std::string> io_stats = Split(std::move(buffer), "\n");
163 uid_info u;
164 vector<int> uids;
165 vector<std::string*> uid_names;
166
167 for (uint32_t i = 0; i < io_stats.size(); i++) {
168 if (io_stats[i].empty()) {
169 continue;
170 }
171
172 if (io_stats[i].compare(0, 4, "task")) {
173 if (!u.parse_uid_io_stats(std::move(io_stats[i])))
174 continue;
175 uid_io_stats[u.uid] = u;
176 uid_io_stats[u.uid].name = std::to_string(u.uid);
177 uids.push_back(u.uid);
178 uid_names.push_back(&uid_io_stats[u.uid].name);
179 if (last_uid_io_stats_.find(u.uid) == last_uid_io_stats_.end()) {
180 refresh_uid_names = true;
181 } else {
182 uid_io_stats[u.uid].name = last_uid_io_stats_[u.uid].name;
183 }
184 } else {
185 task_info t;
186 if (!t.parse_task_io_stats(std::move(io_stats[i])))
187 continue;
188 uid_io_stats[u.uid].tasks[t.pid] = t;
189 }
190 }
191
192 if (!uids.empty() && refresh_uid_names) {
193 get_uid_names(uids, uid_names);
194 }
195
196 return uid_io_stats;
197 }
198
199 namespace {
200
history_size(const std::map<uint64_t,struct uid_records> & history)201 inline size_t history_size(
202 const std::map<uint64_t, struct uid_records>& history)
203 {
204 size_t count = 0;
205 for (auto const& it : history) {
206 count += it.second.entries.size();
207 }
208 return count;
209 }
210
211 } // namespace
212
add_records_locked(uint64_t curr_ts)213 void uid_monitor::add_records_locked(uint64_t curr_ts)
214 {
215 // remove records more than 5 days old
216 if (curr_ts > 5 * DAY_TO_SEC) {
217 auto it = io_history_.lower_bound(curr_ts - 5 * DAY_TO_SEC);
218 io_history_.erase(io_history_.begin(), it);
219 }
220
221 struct uid_records new_records;
222 for (const auto& p : curr_io_stats_) {
223 struct uid_record record = {};
224 record.name = p.first;
225 if (!p.second.uid_ios.is_zero()) {
226 record.ios.user_id = p.second.user_id;
227 record.ios.uid_ios = p.second.uid_ios;
228 for (const auto& p_task : p.second.task_ios) {
229 if (!p_task.second.is_zero())
230 record.ios.task_ios[p_task.first] = p_task.second;
231 }
232 new_records.entries.push_back(record);
233 }
234 }
235
236 curr_io_stats_.clear();
237 new_records.start_ts = start_ts_;
238 start_ts_ = curr_ts;
239
240 if (new_records.entries.empty())
241 return;
242
243 // make some room for new records
244 maybe_shrink_history_for_items(new_records.entries.size());
245
246 io_history_[curr_ts] = new_records;
247 }
248
maybe_shrink_history_for_items(size_t nitems)249 void uid_monitor::maybe_shrink_history_for_items(size_t nitems) {
250 ssize_t overflow = history_size(io_history_) + nitems - MAX_UID_RECORDS_SIZE;
251 while (overflow > 0 && io_history_.size() > 0) {
252 auto del_it = io_history_.begin();
253 overflow -= del_it->second.entries.size();
254 io_history_.erase(io_history_.begin());
255 }
256 }
257
dump(double hours,uint64_t threshold,bool force_report)258 std::map<uint64_t, struct uid_records> uid_monitor::dump(
259 double hours, uint64_t threshold, bool force_report)
260 {
261 if (force_report) {
262 report(nullptr);
263 }
264
265 Mutex::Autolock _l(uidm_mutex_);
266
267 std::map<uint64_t, struct uid_records> dump_records;
268 uint64_t first_ts = 0;
269
270 if (hours != 0) {
271 first_ts = time(NULL) - hours * HOUR_TO_SEC;
272 }
273
274 for (auto it = io_history_.lower_bound(first_ts); it != io_history_.end(); ++it) {
275 const std::vector<struct uid_record>& recs = it->second.entries;
276 struct uid_records filtered;
277
278 for (const auto& rec : recs) {
279 const io_usage& uid_usage = rec.ios.uid_ios;
280 if (uid_usage.bytes[READ][FOREGROUND][CHARGER_ON] +
281 uid_usage.bytes[READ][FOREGROUND][CHARGER_OFF] +
282 uid_usage.bytes[READ][BACKGROUND][CHARGER_ON] +
283 uid_usage.bytes[READ][BACKGROUND][CHARGER_OFF] +
284 uid_usage.bytes[WRITE][FOREGROUND][CHARGER_ON] +
285 uid_usage.bytes[WRITE][FOREGROUND][CHARGER_OFF] +
286 uid_usage.bytes[WRITE][BACKGROUND][CHARGER_ON] +
287 uid_usage.bytes[WRITE][BACKGROUND][CHARGER_OFF] > threshold) {
288 filtered.entries.push_back(rec);
289 }
290 }
291
292 if (filtered.entries.empty())
293 continue;
294
295 filtered.start_ts = it->second.start_ts;
296 dump_records.insert(
297 std::pair<uint64_t, struct uid_records>(it->first, filtered));
298 }
299
300 return dump_records;
301 }
302
update_curr_io_stats_locked()303 void uid_monitor::update_curr_io_stats_locked()
304 {
305 std::unordered_map<uint32_t, uid_info> uid_io_stats =
306 get_uid_io_stats_locked();
307 if (uid_io_stats.empty()) {
308 return;
309 }
310
311 for (const auto& it : uid_io_stats) {
312 const uid_info& uid = it.second;
313 if (curr_io_stats_.find(uid.name) == curr_io_stats_.end()) {
314 curr_io_stats_[uid.name] = {};
315 }
316
317 struct uid_io_usage& usage = curr_io_stats_[uid.name];
318 usage.user_id = multiuser_get_user_id(uid.uid);
319
320 int64_t fg_rd_delta = uid.io[FOREGROUND].read_bytes -
321 last_uid_io_stats_[uid.uid].io[FOREGROUND].read_bytes;
322 int64_t bg_rd_delta = uid.io[BACKGROUND].read_bytes -
323 last_uid_io_stats_[uid.uid].io[BACKGROUND].read_bytes;
324 int64_t fg_wr_delta = uid.io[FOREGROUND].write_bytes -
325 last_uid_io_stats_[uid.uid].io[FOREGROUND].write_bytes;
326 int64_t bg_wr_delta = uid.io[BACKGROUND].write_bytes -
327 last_uid_io_stats_[uid.uid].io[BACKGROUND].write_bytes;
328
329 usage.uid_ios.bytes[READ][FOREGROUND][charger_stat_] +=
330 (fg_rd_delta < 0) ? 0 : fg_rd_delta;
331 usage.uid_ios.bytes[READ][BACKGROUND][charger_stat_] +=
332 (bg_rd_delta < 0) ? 0 : bg_rd_delta;
333 usage.uid_ios.bytes[WRITE][FOREGROUND][charger_stat_] +=
334 (fg_wr_delta < 0) ? 0 : fg_wr_delta;
335 usage.uid_ios.bytes[WRITE][BACKGROUND][charger_stat_] +=
336 (bg_wr_delta < 0) ? 0 : bg_wr_delta;
337
338 for (const auto& task_it : uid.tasks) {
339 const task_info& task = task_it.second;
340 const pid_t pid = task_it.first;
341 const std::string& comm = task_it.second.comm;
342 int64_t task_fg_rd_delta = task.io[FOREGROUND].read_bytes -
343 last_uid_io_stats_[uid.uid].tasks[pid].io[FOREGROUND].read_bytes;
344 int64_t task_bg_rd_delta = task.io[BACKGROUND].read_bytes -
345 last_uid_io_stats_[uid.uid].tasks[pid].io[BACKGROUND].read_bytes;
346 int64_t task_fg_wr_delta = task.io[FOREGROUND].write_bytes -
347 last_uid_io_stats_[uid.uid].tasks[pid].io[FOREGROUND].write_bytes;
348 int64_t task_bg_wr_delta = task.io[BACKGROUND].write_bytes -
349 last_uid_io_stats_[uid.uid].tasks[pid].io[BACKGROUND].write_bytes;
350
351 io_usage& task_usage = usage.task_ios[comm];
352 task_usage.bytes[READ][FOREGROUND][charger_stat_] +=
353 (task_fg_rd_delta < 0) ? 0 : task_fg_rd_delta;
354 task_usage.bytes[READ][BACKGROUND][charger_stat_] +=
355 (task_bg_rd_delta < 0) ? 0 : task_bg_rd_delta;
356 task_usage.bytes[WRITE][FOREGROUND][charger_stat_] +=
357 (task_fg_wr_delta < 0) ? 0 : task_fg_wr_delta;
358 task_usage.bytes[WRITE][BACKGROUND][charger_stat_] +=
359 (task_bg_wr_delta < 0) ? 0 : task_bg_wr_delta;
360 }
361 }
362
363 last_uid_io_stats_ = uid_io_stats;
364 }
365
report(unordered_map<int,StoragedProto> * protos)366 void uid_monitor::report(unordered_map<int, StoragedProto>* protos)
367 {
368 if (!enabled()) return;
369
370 Mutex::Autolock _l(uidm_mutex_);
371
372 update_curr_io_stats_locked();
373 add_records_locked(time(NULL));
374
375 if (protos) {
376 update_uid_io_proto(protos);
377 }
378 }
379
380 namespace {
381
set_io_usage_proto(IOUsage * usage_proto,const io_usage & usage)382 void set_io_usage_proto(IOUsage* usage_proto, const io_usage& usage)
383 {
384 usage_proto->set_rd_fg_chg_on(usage.bytes[READ][FOREGROUND][CHARGER_ON]);
385 usage_proto->set_rd_fg_chg_off(usage.bytes[READ][FOREGROUND][CHARGER_OFF]);
386 usage_proto->set_rd_bg_chg_on(usage.bytes[READ][BACKGROUND][CHARGER_ON]);
387 usage_proto->set_rd_bg_chg_off(usage.bytes[READ][BACKGROUND][CHARGER_OFF]);
388 usage_proto->set_wr_fg_chg_on(usage.bytes[WRITE][FOREGROUND][CHARGER_ON]);
389 usage_proto->set_wr_fg_chg_off(usage.bytes[WRITE][FOREGROUND][CHARGER_OFF]);
390 usage_proto->set_wr_bg_chg_on(usage.bytes[WRITE][BACKGROUND][CHARGER_ON]);
391 usage_proto->set_wr_bg_chg_off(usage.bytes[WRITE][BACKGROUND][CHARGER_OFF]);
392 }
393
get_io_usage_proto(io_usage * usage,const IOUsage & io_proto)394 void get_io_usage_proto(io_usage* usage, const IOUsage& io_proto)
395 {
396 usage->bytes[READ][FOREGROUND][CHARGER_ON] = io_proto.rd_fg_chg_on();
397 usage->bytes[READ][FOREGROUND][CHARGER_OFF] = io_proto.rd_fg_chg_off();
398 usage->bytes[READ][BACKGROUND][CHARGER_ON] = io_proto.rd_bg_chg_on();
399 usage->bytes[READ][BACKGROUND][CHARGER_OFF] = io_proto.rd_bg_chg_off();
400 usage->bytes[WRITE][FOREGROUND][CHARGER_ON] = io_proto.wr_fg_chg_on();
401 usage->bytes[WRITE][FOREGROUND][CHARGER_OFF] = io_proto.wr_fg_chg_off();
402 usage->bytes[WRITE][BACKGROUND][CHARGER_ON] = io_proto.wr_bg_chg_on();
403 usage->bytes[WRITE][BACKGROUND][CHARGER_OFF] = io_proto.wr_bg_chg_off();
404 }
405
406 } // namespace
407
update_uid_io_proto(unordered_map<int,StoragedProto> * protos)408 void uid_monitor::update_uid_io_proto(unordered_map<int, StoragedProto>* protos)
409 {
410 for (const auto& item : io_history_) {
411 const uint64_t& end_ts = item.first;
412 const struct uid_records& recs = item.second;
413 unordered_map<userid_t, UidIOItem*> user_items;
414
415 for (const auto& entry : recs.entries) {
416 userid_t user_id = entry.ios.user_id;
417 UidIOItem* item_proto = user_items[user_id];
418 if (item_proto == nullptr) {
419 item_proto = (*protos)[user_id].mutable_uid_io_usage()
420 ->add_uid_io_items();
421 user_items[user_id] = item_proto;
422 }
423 item_proto->set_end_ts(end_ts);
424
425 UidIORecords* recs_proto = item_proto->mutable_records();
426 recs_proto->set_start_ts(recs.start_ts);
427
428 UidRecord* rec_proto = recs_proto->add_entries();
429 rec_proto->set_uid_name(entry.name);
430 rec_proto->set_user_id(user_id);
431
432 IOUsage* uid_io_proto = rec_proto->mutable_uid_io();
433 const io_usage& uio_ios = entry.ios.uid_ios;
434 set_io_usage_proto(uid_io_proto, uio_ios);
435
436 for (const auto& task_io : entry.ios.task_ios) {
437 const std::string& task_name = task_io.first;
438 const io_usage& task_ios = task_io.second;
439
440 TaskIOUsage* task_io_proto = rec_proto->add_task_io();
441 task_io_proto->set_task_name(task_name);
442 set_io_usage_proto(task_io_proto->mutable_ios(), task_ios);
443 }
444 }
445 }
446 }
447
clear_user_history(userid_t user_id)448 void uid_monitor::clear_user_history(userid_t user_id)
449 {
450 Mutex::Autolock _l(uidm_mutex_);
451
452 for (auto& item : io_history_) {
453 vector<uid_record>* entries = &item.second.entries;
454 entries->erase(
455 remove_if(entries->begin(), entries->end(),
456 [user_id](const uid_record& rec) {
457 return rec.ios.user_id == user_id;}),
458 entries->end());
459 }
460
461 for (auto it = io_history_.begin(); it != io_history_.end(); ) {
462 if (it->second.entries.empty()) {
463 it = io_history_.erase(it);
464 } else {
465 it++;
466 }
467 }
468 }
469
load_uid_io_proto(userid_t user_id,const UidIOUsage & uid_io_proto)470 void uid_monitor::load_uid_io_proto(userid_t user_id, const UidIOUsage& uid_io_proto)
471 {
472 if (!enabled()) return;
473
474 Mutex::Autolock _l(uidm_mutex_);
475
476 for (const auto& item_proto : uid_io_proto.uid_io_items()) {
477 const UidIORecords& records_proto = item_proto.records();
478 struct uid_records* recs = &io_history_[item_proto.end_ts()];
479
480 // It's possible that the same uid_io_proto file gets loaded more than
481 // once, for example, if system_server crashes. In this case we avoid
482 // adding duplicate entries, so we build a quick way to check for
483 // duplicates.
484 std::unordered_set<std::string> existing_uids;
485 for (const auto& rec : recs->entries) {
486 if (rec.ios.user_id == user_id) {
487 existing_uids.emplace(rec.name);
488 }
489 }
490
491 recs->start_ts = records_proto.start_ts();
492 for (const auto& rec_proto : records_proto.entries()) {
493 if (existing_uids.find(rec_proto.uid_name()) != existing_uids.end()) {
494 continue;
495 }
496
497 struct uid_record record;
498 record.name = rec_proto.uid_name();
499 record.ios.user_id = rec_proto.user_id();
500 get_io_usage_proto(&record.ios.uid_ios, rec_proto.uid_io());
501
502 for (const auto& task_io_proto : rec_proto.task_io()) {
503 get_io_usage_proto(
504 &record.ios.task_ios[task_io_proto.task_name()],
505 task_io_proto.ios());
506 }
507 recs->entries.push_back(record);
508 }
509
510 // We already added items, so this will just cull down to the maximum
511 // length. We do not remove anything if there is only one entry.
512 if (io_history_.size() > 1) {
513 maybe_shrink_history_for_items(0);
514 }
515 }
516 }
517
set_charger_state(charger_stat_t stat)518 void uid_monitor::set_charger_state(charger_stat_t stat)
519 {
520 Mutex::Autolock _l(uidm_mutex_);
521
522 if (charger_stat_ == stat) {
523 return;
524 }
525
526 update_curr_io_stats_locked();
527 charger_stat_ = stat;
528 }
529
init(charger_stat_t stat)530 void uid_monitor::init(charger_stat_t stat)
531 {
532 charger_stat_ = stat;
533
534 start_ts_ = time(NULL);
535 last_uid_io_stats_ = get_uid_io_stats();
536 }
537
uid_monitor()538 uid_monitor::uid_monitor()
539 : enabled_(!access(UID_IO_STATS_PATH, R_OK)) {
540 }
541