1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <chrono>
18 #include <deque>
19 #include <fcntl.h>
20 #include <random>
21 #include <string.h>
22 #include <stdio.h>
23 #include <sys/stat.h>
24 #include <unistd.h>
25
26 #include <gtest/gtest.h>
27
28 #include <healthhalutils/HealthHalUtils.h>
29 #include <storaged.h> // data structures
30 #include <storaged_utils.h> // functions to test
31
32 #define MMC_DISK_STATS_PATH "/sys/block/mmcblk0/stat"
33 #define SDA_DISK_STATS_PATH "/sys/block/sda/stat"
34
35 using namespace std;
36 using namespace chrono;
37 using namespace storaged_proto;
38
39 namespace {
40
write_and_pause(uint32_t sec)41 void write_and_pause(uint32_t sec) {
42 const char* path = "/cache/test";
43 int fd = open(path, O_WRONLY | O_CREAT, 0600);
44 ASSERT_LT(-1, fd);
45 char buffer[2048];
46 memset(buffer, 1, sizeof(buffer));
47 int loop_size = 100;
48 for (int i = 0; i < loop_size; ++i) {
49 ASSERT_EQ(2048, write(fd, buffer, sizeof(buffer)));
50 }
51 fsync(fd);
52 close(fd);
53
54 fd = open(path, O_RDONLY);
55 ASSERT_LT(-1, fd);
56 for (int i = 0; i < loop_size; ++i) {
57 ASSERT_EQ(2048, read(fd, buffer, sizeof(buffer)));
58 }
59 close(fd);
60
61 sleep(sec);
62 }
63
64 } // namespace
65
66 // the return values of the tested functions should be the expected ones
67 const char* DISK_STATS_PATH;
TEST(storaged_test,retvals)68 TEST(storaged_test, retvals) {
69 struct disk_stats stats;
70 memset(&stats, 0, sizeof(struct disk_stats));
71
72 if (access(MMC_DISK_STATS_PATH, R_OK) >= 0) {
73 DISK_STATS_PATH = MMC_DISK_STATS_PATH;
74 } else if (access(SDA_DISK_STATS_PATH, R_OK) >= 0) {
75 DISK_STATS_PATH = SDA_DISK_STATS_PATH;
76 } else {
77 return;
78 }
79
80 EXPECT_TRUE(parse_disk_stats(DISK_STATS_PATH, &stats));
81
82 struct disk_stats old_stats;
83 memset(&old_stats, 0, sizeof(struct disk_stats));
84 old_stats = stats;
85
86 const char wrong_path[] = "/this/is/wrong";
87 EXPECT_FALSE(parse_disk_stats(wrong_path, &stats));
88
89 // reading a wrong path should not damage the output structure
90 EXPECT_EQ(stats, old_stats);
91 }
92
TEST(storaged_test,disk_stats)93 TEST(storaged_test, disk_stats) {
94 struct disk_stats stats = {};
95 ASSERT_TRUE(parse_disk_stats(DISK_STATS_PATH, &stats));
96
97 // every entry of stats (except io_in_flight) should all be greater than 0
98 for (uint i = 0; i < DISK_STATS_SIZE; ++i) {
99 if (i == 8) continue; // skip io_in_flight which can be 0
100 EXPECT_LT((uint64_t)0, *((uint64_t*)&stats + i));
101 }
102
103 // accumulation of the increments should be the same with the overall increment
104 struct disk_stats base = {}, tmp = {}, curr, acc = {}, inc[5];
105 for (uint i = 0; i < 5; ++i) {
106 ASSERT_TRUE(parse_disk_stats(DISK_STATS_PATH, &curr));
107 if (i == 0) {
108 base = curr;
109 tmp = curr;
110 sleep(5);
111 continue;
112 }
113 get_inc_disk_stats(&tmp, &curr, &inc[i]);
114 add_disk_stats(&inc[i], &acc);
115 tmp = curr;
116 write_and_pause(5);
117 }
118 struct disk_stats overall_inc = {};
119 get_inc_disk_stats(&base, &curr, &overall_inc);
120
121 EXPECT_EQ(overall_inc, acc);
122 }
123
mean(std::deque<uint32_t> nums)124 double mean(std::deque<uint32_t> nums) {
125 double sum = 0.0;
126 for (uint32_t i : nums) {
127 sum += i;
128 }
129 return sum / nums.size();
130 }
131
standard_deviation(std::deque<uint32_t> nums)132 double standard_deviation(std::deque<uint32_t> nums) {
133 double sum = 0.0;
134 double avg = mean(nums);
135 for (uint32_t i : nums) {
136 sum += ((double)i - avg) * ((double)i - avg);
137 }
138 return sqrt(sum / nums.size());
139 }
140
TEST(storaged_test,stream_stats)141 TEST(storaged_test, stream_stats) {
142 // 100 random numbers
143 std::vector<uint32_t> data = {8147,9058,1270,9134,6324,975,2785,5469,9575,9649,1576,9706,9572,4854,8003,1419,4218,9157,7922,9595,6557,357,8491,9340,6787,7577,7431,3922,6555,1712,7060,318,2769,462,971,8235,6948,3171,9502,344,4387,3816,7655,7952,1869,4898,4456,6463,7094,7547,2760,6797,6551,1626,1190,4984,9597,3404,5853,2238,7513,2551,5060,6991,8909,9593,5472,1386,1493,2575,8407,2543,8143,2435,9293,3500,1966,2511,6160,4733,3517,8308,5853,5497,9172,2858,7572,7537,3804,5678,759,540,5308,7792,9340,1299,5688,4694,119,3371};
144 std::deque<uint32_t> test_data;
145 stream_stats sstats;
146 for (uint32_t i : data) {
147 test_data.push_back(i);
148 sstats.add(i);
149
150 EXPECT_EQ((int)standard_deviation(test_data), (int)sstats.get_std());
151 EXPECT_EQ((int)mean(test_data), (int)sstats.get_mean());
152 }
153
154 for (uint32_t i : data) {
155 test_data.pop_front();
156 sstats.evict(i);
157
158 EXPECT_EQ((int)standard_deviation(test_data), (int)sstats.get_std());
159 EXPECT_EQ((int)mean(test_data), (int)sstats.get_mean());
160 }
161
162 // some real data
163 std::vector<uint32_t> another_data = {113875,81620,103145,28327,86855,207414,96526,52567,28553,250311};
164 test_data.clear();
165 uint32_t window_size = 2;
166 uint32_t idx;
167 stream_stats sstats1;
168 for (idx = 0; idx < window_size; ++idx) {
169 test_data.push_back(another_data[idx]);
170 sstats1.add(another_data[idx]);
171 }
172 EXPECT_EQ((int)standard_deviation(test_data), (int)sstats1.get_std());
173 EXPECT_EQ((int)mean(test_data), (int)sstats1.get_mean());
174 for (;idx < another_data.size(); ++idx) {
175 test_data.pop_front();
176 sstats1.evict(another_data[idx - window_size]);
177 test_data.push_back(another_data[idx]);
178 sstats1.add(another_data[idx]);
179 EXPECT_EQ((int)standard_deviation(test_data), (int)sstats1.get_std());
180 EXPECT_EQ((int)mean(test_data), (int)sstats1.get_mean());
181 }
182 }
183
disk_perf_multiply(struct disk_perf perf,double mul)184 struct disk_perf disk_perf_multiply(struct disk_perf perf, double mul) {
185 struct disk_perf retval;
186 retval.read_perf = (double)perf.read_perf * mul;
187 retval.read_ios = (double)perf.read_ios * mul;
188 retval.write_perf = (double)perf.write_perf * mul;
189 retval.write_ios = (double)perf.write_ios * mul;
190 retval.queue = (double)perf.queue * mul;
191
192 return retval;
193 }
194
disk_stats_add(struct disk_stats stats1,struct disk_stats stats2)195 struct disk_stats disk_stats_add(struct disk_stats stats1, struct disk_stats stats2) {
196 struct disk_stats retval;
197 retval.read_ios = stats1.read_ios + stats2.read_ios;
198 retval.read_merges = stats1.read_merges + stats2.read_merges;
199 retval.read_sectors = stats1.read_sectors + stats2.read_sectors;
200 retval.read_ticks = stats1.read_ticks + stats2.read_ticks;
201 retval.write_ios = stats1.write_ios + stats2.write_ios;
202 retval.write_merges = stats1.write_merges + stats2.write_merges;
203 retval.write_sectors = stats1.write_sectors + stats2.write_sectors;
204 retval.write_ticks = stats1.write_ticks + stats2.write_ticks;
205 retval.io_in_flight = stats1.io_in_flight + stats2.io_in_flight;
206 retval.io_ticks = stats1.io_ticks + stats2.io_ticks;
207 retval.io_in_queue = stats1.io_in_queue + stats2.io_in_queue;
208 retval.end_time = stats1.end_time + stats2.end_time;
209
210 return retval;
211 }
212
expect_increasing(struct disk_stats stats1,struct disk_stats stats2)213 void expect_increasing(struct disk_stats stats1, struct disk_stats stats2) {
214 EXPECT_LE(stats1.read_ios, stats2.read_ios);
215 EXPECT_LE(stats1.read_merges, stats2.read_merges);
216 EXPECT_LE(stats1.read_sectors, stats2.read_sectors);
217 EXPECT_LE(stats1.read_ticks, stats2.read_ticks);
218 EXPECT_LE(stats1.write_ios, stats2.write_ios);
219 EXPECT_LE(stats1.write_merges, stats2.write_merges);
220 EXPECT_LE(stats1.write_sectors, stats2.write_sectors);
221 EXPECT_LE(stats1.write_ticks, stats2.write_ticks);
222 EXPECT_LE(stats1.io_ticks, stats2.io_ticks);
223 EXPECT_LE(stats1.io_in_queue, stats2.io_in_queue);
224
225 EXPECT_TRUE(stats1.read_ios < stats2.read_ios ||
226 stats1.read_merges < stats2.read_merges ||
227 stats1.read_sectors < stats2.read_sectors ||
228 stats1.read_ticks < stats2.read_ticks ||
229 stats1.write_ios < stats2.write_ios ||
230 stats1.write_merges < stats2.write_merges ||
231 stats1.write_sectors < stats2.write_sectors ||
232 stats1.write_ticks < stats2.write_ticks ||
233 stats1.io_ticks < stats2.io_ticks ||
234 stats1.io_in_queue < stats2.io_in_queue);
235 }
236
TEST(storaged_test,disk_stats_monitor)237 TEST(storaged_test, disk_stats_monitor) {
238 using android::hardware::health::V2_0::get_health_service;
239
240 auto healthService = get_health_service();
241
242 // asserting that there is one file for diskstats
243 ASSERT_TRUE(healthService != nullptr || access(MMC_DISK_STATS_PATH, R_OK) >= 0 ||
244 access(SDA_DISK_STATS_PATH, R_OK) >= 0);
245
246 // testing if detect() will return the right value
247 disk_stats_monitor dsm_detect{healthService};
248 ASSERT_TRUE(dsm_detect.enabled());
249 // feed monitor with constant perf data for io perf baseline
250 // using constant perf is reasonable since the functionality of stream_stats
251 // has already been tested
252 struct disk_perf norm_perf = {
253 .read_perf = 10 * 1024,
254 .read_ios = 50,
255 .write_perf = 5 * 1024,
256 .write_ios = 25,
257 .queue = 5
258 };
259
260 std::random_device rd;
261 std::mt19937 gen(rd());
262 std::uniform_real_distribution<> rand(0.8, 1.2);
263
264 for (uint i = 0; i < dsm_detect.mWindow; ++i) {
265 struct disk_perf perf = disk_perf_multiply(norm_perf, rand(gen));
266
267 dsm_detect.add(&perf);
268 dsm_detect.mBuffer.push(perf);
269 EXPECT_EQ(dsm_detect.mBuffer.size(), (uint64_t)i + 1);
270 }
271
272 dsm_detect.mValid = true;
273 dsm_detect.update_mean();
274 dsm_detect.update_std();
275
276 for (double i = 0; i < 2 * dsm_detect.mSigma; i += 0.5) {
277 struct disk_perf test_perf;
278 struct disk_perf test_mean = dsm_detect.mMean;
279 struct disk_perf test_std = dsm_detect.mStd;
280
281 test_perf.read_perf = (double)test_mean.read_perf - i * test_std.read_perf;
282 test_perf.read_ios = (double)test_mean.read_ios - i * test_std.read_ios;
283 test_perf.write_perf = (double)test_mean.write_perf - i * test_std.write_perf;
284 test_perf.write_ios = (double)test_mean.write_ios - i * test_std.write_ios;
285 test_perf.queue = (double)test_mean.queue + i * test_std.queue;
286
287 EXPECT_EQ((i > dsm_detect.mSigma), dsm_detect.detect(&test_perf));
288 }
289
290 // testing if stalled disk_stats can be correctly accumulated in the monitor
291 disk_stats_monitor dsm_acc{healthService};
292 struct disk_stats norm_inc = {
293 .read_ios = 200,
294 .read_merges = 0,
295 .read_sectors = 200,
296 .read_ticks = 200,
297 .write_ios = 100,
298 .write_merges = 0,
299 .write_sectors = 100,
300 .write_ticks = 100,
301 .io_in_flight = 0,
302 .io_ticks = 600,
303 .io_in_queue = 300,
304 .start_time = 0,
305 .end_time = 100,
306 .counter = 0,
307 .io_avg = 0
308 };
309
310 struct disk_stats stall_inc = {
311 .read_ios = 200,
312 .read_merges = 0,
313 .read_sectors = 20,
314 .read_ticks = 200,
315 .write_ios = 100,
316 .write_merges = 0,
317 .write_sectors = 10,
318 .write_ticks = 100,
319 .io_in_flight = 0,
320 .io_ticks = 600,
321 .io_in_queue = 1200,
322 .start_time = 0,
323 .end_time = 100,
324 .counter = 0,
325 .io_avg = 0
326 };
327
328 struct disk_stats stats_base = {};
329 int loop_size = 100;
330 for (int i = 0; i < loop_size; ++i) {
331 stats_base = disk_stats_add(stats_base, norm_inc);
332 dsm_acc.update(&stats_base);
333 EXPECT_EQ(dsm_acc.mValid, (uint32_t)i >= dsm_acc.mWindow);
334 EXPECT_FALSE(dsm_acc.mStall);
335 }
336
337 stats_base = disk_stats_add(stats_base, stall_inc);
338 dsm_acc.update(&stats_base);
339 EXPECT_TRUE(dsm_acc.mValid);
340 EXPECT_TRUE(dsm_acc.mStall);
341
342 for (int i = 0; i < 10; ++i) {
343 stats_base = disk_stats_add(stats_base, norm_inc);
344 dsm_acc.update(&stats_base);
345 EXPECT_TRUE(dsm_acc.mValid);
346 EXPECT_FALSE(dsm_acc.mStall);
347 }
348
349 struct disk_stats stats_prev = {};
350 loop_size = 10;
351 write_and_pause(5);
352 for (int i = 0; i < loop_size; ++i) {
353 dsm_detect.update();
354 expect_increasing(stats_prev, dsm_detect.mPrevious);
355 stats_prev = dsm_detect.mPrevious;
356 write_and_pause(5);
357 }
358 }
359
TEST(storaged_test,storage_info_t)360 TEST(storaged_test, storage_info_t) {
361 storage_info_t si;
362 time_point<steady_clock> tp;
363 time_point<system_clock> stp;
364
365 // generate perf history [least_recent ------> most recent]
366 // day 1: 5, 10, 15, 20 | daily average 12
367 // day 2: 25, 30, 35, 40, 45 | daily average 35
368 // day 3: 50, 55, 60, 65, 70 | daily average 60
369 // day 4: 75, 80, 85, 90, 95 | daily average 85
370 // day 5: 100, 105, 110, 115, | daily average 107
371 // day 6: 120, 125, 130, 135, 140 | daily average 130
372 // day 7: 145, 150, 155, 160, 165 | daily average 155
373 // end of week 1: | weekly average 83
374 // day 1: 170, 175, 180, 185, 190 | daily average 180
375 // day 2: 195, 200, 205, 210, 215 | daily average 205
376 // day 3: 220, 225, 230, 235 | daily average 227
377 // day 4: 240, 245, 250, 255, 260 | daily average 250
378 // day 5: 265, 270, 275, 280, 285 | daily average 275
379 // day 6: 290, 295, 300, 305, 310 | daily average 300
380 // day 7: 315, 320, 325, 330, 335 | daily average 325
381 // end of week 2: | weekly average 251
382 // day 1: 340, 345, 350, 355 | daily average 347
383 // day 2: 360, 365, 370, 375
384 si.day_start_tp = {};
385 for (int i = 0; i < 75; i++) {
386 tp += hours(5);
387 stp = {};
388 stp += duration_cast<chrono::seconds>(tp.time_since_epoch());
389 si.update_perf_history((i + 1) * 5, stp);
390 }
391
392 vector<int> history = si.get_perf_history();
393 EXPECT_EQ(history.size(), 66UL);
394 size_t i = 0;
395 EXPECT_EQ(history[i++], 4);
396 EXPECT_EQ(history[i++], 7); // 7 days
397 EXPECT_EQ(history[i++], 52); // 52 weeks
398 // last 24 hours
399 EXPECT_EQ(history[i++], 375);
400 EXPECT_EQ(history[i++], 370);
401 EXPECT_EQ(history[i++], 365);
402 EXPECT_EQ(history[i++], 360);
403 // daily average of last 7 days
404 EXPECT_EQ(history[i++], 347);
405 EXPECT_EQ(history[i++], 325);
406 EXPECT_EQ(history[i++], 300);
407 EXPECT_EQ(history[i++], 275);
408 EXPECT_EQ(history[i++], 250);
409 EXPECT_EQ(history[i++], 227);
410 EXPECT_EQ(history[i++], 205);
411 // weekly average of last 52 weeks
412 EXPECT_EQ(history[i++], 251);
413 EXPECT_EQ(history[i++], 83);
414 for (; i < history.size(); i++) {
415 EXPECT_EQ(history[i], 0);
416 }
417 }
418
TEST(storaged_test,storage_info_t_proto)419 TEST(storaged_test, storage_info_t_proto) {
420 storage_info_t si;
421 si.day_start_tp = {};
422
423 IOPerfHistory proto;
424 proto.set_nr_samples(10);
425 proto.set_day_start_sec(0);
426 si.load_perf_history_proto(proto);
427
428 // Skip ahead > 1 day, with no data points in the previous day.
429 time_point<system_clock> stp;
430 stp += hours(36);
431 si.update_perf_history(100, stp);
432
433 vector<int> history = si.get_perf_history();
434 EXPECT_EQ(history.size(), 63UL);
435 EXPECT_EQ(history[0], 1);
436 EXPECT_EQ(history[1], 7);
437 EXPECT_EQ(history[2], 52);
438 EXPECT_EQ(history[3], 100);
439 for (size_t i = 4; i < history.size(); i++) {
440 EXPECT_EQ(history[i], 0);
441 }
442 }
443
TEST(storaged_test,uid_monitor)444 TEST(storaged_test, uid_monitor) {
445 uid_monitor uidm;
446 auto& io_history = uidm.io_history();
447
448 io_history[200] = {
449 .start_ts = 100,
450 .entries = {
451 { "app1", {
452 .user_id = 0,
453 .uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
454 }
455 },
456 { "app2", {
457 .user_id = 0,
458 .uid_ios.bytes[READ][FOREGROUND][CHARGER_OFF] = 1000,
459 }
460 },
461 { "app1", {
462 .user_id = 1,
463 .uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
464 .uid_ios.bytes[READ][FOREGROUND][CHARGER_ON] = 1000,
465 }
466 },
467 },
468 };
469
470 io_history[300] = {
471 .start_ts = 200,
472 .entries = {
473 { "app1", {
474 .user_id = 1,
475 .uid_ios.bytes[WRITE][FOREGROUND][CHARGER_OFF] = 1000,
476 }
477 },
478 { "app3", {
479 .user_id = 0,
480 .uid_ios.bytes[READ][BACKGROUND][CHARGER_OFF] = 1000,
481 }
482 },
483 },
484 };
485
486 unordered_map<int, StoragedProto> protos;
487
488 uidm.update_uid_io_proto(&protos);
489
490 EXPECT_EQ(protos.size(), 2U);
491 EXPECT_EQ(protos.count(0), 1UL);
492 EXPECT_EQ(protos.count(1), 1UL);
493
494 EXPECT_EQ(protos[0].uid_io_usage().uid_io_items_size(), 2);
495 const UidIOItem& user_0_item_0 = protos[0].uid_io_usage().uid_io_items(0);
496 EXPECT_EQ(user_0_item_0.end_ts(), 200UL);
497 EXPECT_EQ(user_0_item_0.records().start_ts(), 100UL);
498 EXPECT_EQ(user_0_item_0.records().entries_size(), 2);
499 EXPECT_EQ(user_0_item_0.records().entries(0).uid_name(), "app1");
500 EXPECT_EQ(user_0_item_0.records().entries(0).user_id(), 0UL);
501 EXPECT_EQ(user_0_item_0.records().entries(0).uid_io().wr_fg_chg_on(), 1000UL);
502 EXPECT_EQ(user_0_item_0.records().entries(1).uid_name(), "app2");
503 EXPECT_EQ(user_0_item_0.records().entries(1).user_id(), 0UL);
504 EXPECT_EQ(user_0_item_0.records().entries(1).uid_io().rd_fg_chg_off(), 1000UL);
505 const UidIOItem& user_0_item_1 = protos[0].uid_io_usage().uid_io_items(1);
506 EXPECT_EQ(user_0_item_1.end_ts(), 300UL);
507 EXPECT_EQ(user_0_item_1.records().start_ts(), 200UL);
508 EXPECT_EQ(user_0_item_1.records().entries_size(), 1);
509 EXPECT_EQ(user_0_item_1.records().entries(0).uid_name(), "app3");
510 EXPECT_EQ(user_0_item_1.records().entries(0).user_id(), 0UL);
511 EXPECT_EQ(user_0_item_1.records().entries(0).uid_io().rd_bg_chg_off(), 1000UL);
512
513 EXPECT_EQ(protos[1].uid_io_usage().uid_io_items_size(), 2);
514 const UidIOItem& user_1_item_0 = protos[1].uid_io_usage().uid_io_items(0);
515 EXPECT_EQ(user_1_item_0.end_ts(), 200UL);
516 EXPECT_EQ(user_1_item_0.records().start_ts(), 100UL);
517 EXPECT_EQ(user_1_item_0.records().entries_size(), 1);
518 EXPECT_EQ(user_1_item_0.records().entries(0).uid_name(), "app1");
519 EXPECT_EQ(user_1_item_0.records().entries(0).user_id(), 1UL);
520 EXPECT_EQ(user_1_item_0.records().entries(0).uid_io().rd_fg_chg_on(), 1000UL);
521 EXPECT_EQ(user_1_item_0.records().entries(0).uid_io().wr_fg_chg_on(), 1000UL);
522 const UidIOItem& user_1_item_1 = protos[1].uid_io_usage().uid_io_items(1);
523 EXPECT_EQ(user_1_item_1.end_ts(), 300UL);
524 EXPECT_EQ(user_1_item_1.records().start_ts(), 200UL);
525 EXPECT_EQ(user_1_item_1.records().entries_size(), 1);
526 EXPECT_EQ(user_1_item_1.records().entries(0).uid_name(), "app1");
527 EXPECT_EQ(user_1_item_1.records().entries(0).user_id(), 1UL);
528 EXPECT_EQ(user_1_item_1.records().entries(0).uid_io().wr_fg_chg_off(), 1000UL);
529
530 io_history.clear();
531
532 io_history[300] = {
533 .start_ts = 200,
534 .entries = {
535 { "app1", {
536 .user_id = 0,
537 .uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
538 }
539 },
540 },
541 };
542
543 io_history[400] = {
544 .start_ts = 300,
545 .entries = {
546 { "app1", {
547 .user_id = 0,
548 .uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
549 }
550 },
551 },
552 };
553
554 uidm.load_uid_io_proto(0, protos[0].uid_io_usage());
555 uidm.load_uid_io_proto(1, protos[1].uid_io_usage());
556
557 EXPECT_EQ(io_history.size(), 3UL);
558 EXPECT_EQ(io_history.count(200), 1UL);
559 EXPECT_EQ(io_history.count(300), 1UL);
560 EXPECT_EQ(io_history.count(400), 1UL);
561
562 EXPECT_EQ(io_history[200].start_ts, 100UL);
563 const vector<struct uid_record>& entries_0 = io_history[200].entries;
564 EXPECT_EQ(entries_0.size(), 3UL);
565 EXPECT_EQ(entries_0[0].name, "app1");
566 EXPECT_EQ(entries_0[0].ios.user_id, 0UL);
567 EXPECT_EQ(entries_0[0].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
568 EXPECT_EQ(entries_0[1].name, "app2");
569 EXPECT_EQ(entries_0[1].ios.user_id, 0UL);
570 EXPECT_EQ(entries_0[1].ios.uid_ios.bytes[READ][FOREGROUND][CHARGER_OFF], 1000UL);
571 EXPECT_EQ(entries_0[2].name, "app1");
572 EXPECT_EQ(entries_0[2].ios.user_id, 1UL);
573 EXPECT_EQ(entries_0[2].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
574 EXPECT_EQ(entries_0[2].ios.uid_ios.bytes[READ][FOREGROUND][CHARGER_ON], 1000UL);
575
576 EXPECT_EQ(io_history[300].start_ts, 200UL);
577 const vector<struct uid_record>& entries_1 = io_history[300].entries;
578 EXPECT_EQ(entries_1.size(), 3UL);
579 EXPECT_EQ(entries_1[0].name, "app1");
580 EXPECT_EQ(entries_1[0].ios.user_id, 0UL);
581 EXPECT_EQ(entries_1[0].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
582 EXPECT_EQ(entries_1[1].name, "app3");
583 EXPECT_EQ(entries_1[1].ios.user_id, 0UL);
584 EXPECT_EQ(entries_1[1].ios.uid_ios.bytes[READ][BACKGROUND][CHARGER_OFF], 1000UL);
585 EXPECT_EQ(entries_1[2].name, "app1");
586 EXPECT_EQ(entries_1[2].ios.user_id, 1UL);
587 EXPECT_EQ(entries_1[2].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_OFF], 1000UL);
588
589 EXPECT_EQ(io_history[400].start_ts, 300UL);
590 const vector<struct uid_record>& entries_2 = io_history[400].entries;
591 EXPECT_EQ(entries_2.size(), 1UL);
592 EXPECT_EQ(entries_2[0].name, "app1");
593 EXPECT_EQ(entries_2[0].ios.user_id, 0UL);
594 EXPECT_EQ(entries_2[0].ios.uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
595
596 map<string, io_usage> merged_entries_0 = merge_io_usage(entries_0);
597 EXPECT_EQ(merged_entries_0.size(), 2UL);
598 EXPECT_EQ(merged_entries_0.count("app1"), 1UL);
599 EXPECT_EQ(merged_entries_0.count("app2"), 1UL);
600 EXPECT_EQ(merged_entries_0["app1"].bytes[READ][FOREGROUND][CHARGER_ON], 1000UL);
601 EXPECT_EQ(merged_entries_0["app1"].bytes[WRITE][FOREGROUND][CHARGER_ON], 2000UL);
602 EXPECT_EQ(merged_entries_0["app2"].bytes[READ][FOREGROUND][CHARGER_OFF], 1000UL);
603
604 map<string, io_usage> merged_entries_1 = merge_io_usage(entries_1);
605 EXPECT_EQ(merged_entries_1.size(), 2UL);
606 EXPECT_EQ(merged_entries_1.count("app1"), 1UL);
607 EXPECT_EQ(merged_entries_1.count("app3"), 1UL);
608 EXPECT_EQ(merged_entries_1["app1"].bytes[WRITE][FOREGROUND][CHARGER_OFF], 1000UL);
609 EXPECT_EQ(merged_entries_1["app1"].bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
610 EXPECT_EQ(merged_entries_1["app3"].bytes[READ][BACKGROUND][CHARGER_OFF], 1000UL);
611
612 map<string, io_usage> merged_entries_2 = merge_io_usage(entries_2);
613 EXPECT_EQ(merged_entries_2.size(), 1UL);
614 EXPECT_EQ(merged_entries_2.count("app1"), 1UL);
615 EXPECT_EQ(merged_entries_2["app1"].bytes[WRITE][FOREGROUND][CHARGER_ON], 1000UL);
616
617 uidm.clear_user_history(0);
618
619 EXPECT_EQ(io_history.size(), 2UL);
620 EXPECT_EQ(io_history.count(200), 1UL);
621 EXPECT_EQ(io_history.count(300), 1UL);
622
623 EXPECT_EQ(io_history[200].entries.size(), 1UL);
624 EXPECT_EQ(io_history[300].entries.size(), 1UL);
625
626 uidm.clear_user_history(1);
627
628 EXPECT_EQ(io_history.size(), 0UL);
629 }
630
TEST(storaged_test,load_uid_io_proto)631 TEST(storaged_test, load_uid_io_proto) {
632 uid_monitor uidm;
633 auto& io_history = uidm.io_history();
634
635 static const uint64_t kProtoTime = 200;
636 io_history[kProtoTime] = {
637 .start_ts = 100,
638 .entries = {
639 { "app1", {
640 .user_id = 0,
641 .uid_ios.bytes[WRITE][FOREGROUND][CHARGER_ON] = 1000,
642 }
643 },
644 { "app2", {
645 .user_id = 0,
646 .uid_ios.bytes[READ][FOREGROUND][CHARGER_OFF] = 2000,
647 }
648 },
649 { "app3", {
650 .user_id = 0,
651 .uid_ios.bytes[READ][FOREGROUND][CHARGER_OFF] = 3000,
652 }
653 },
654 },
655 };
656
657 unordered_map<int, StoragedProto> protos;
658 uidm.update_uid_io_proto(&protos);
659 ASSERT_EQ(protos.size(), size_t(1));
660
661 // Loading the same proto many times should not add duplicate entries.
662 UidIOUsage user_0 = protos[0].uid_io_usage();
663 for (size_t i = 0; i < 10000; i++) {
664 uidm.load_uid_io_proto(0, user_0);
665 }
666 ASSERT_EQ(io_history.size(), size_t(1));
667 ASSERT_EQ(io_history[kProtoTime].entries.size(), size_t(3));
668
669 // Create duplicate entries until we go over the limit.
670 auto record = io_history[kProtoTime];
671 io_history.clear();
672 for (size_t i = 0; i < uid_monitor::MAX_UID_RECORDS_SIZE * 2; i++) {
673 if (i == kProtoTime) {
674 continue;
675 }
676 io_history[i] = record;
677 }
678 ASSERT_GT(io_history.size(), size_t(uid_monitor::MAX_UID_RECORDS_SIZE));
679
680 // After loading, the history should be truncated.
681 for (auto& item : *user_0.mutable_uid_io_items()) {
682 item.set_end_ts(io_history.size());
683 }
684 uidm.load_uid_io_proto(0, user_0);
685 ASSERT_LE(io_history.size(), size_t(uid_monitor::MAX_UID_RECORDS_SIZE));
686 }
687