1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #define DEBUG false
17 #include "Log.h"
18 #include "OringDurationTracker.h"
19 #include "guardrail/StatsdStats.h"
20 
21 namespace android {
22 namespace os {
23 namespace statsd {
24 
25 using std::pair;
26 
OringDurationTracker(const ConfigKey & key,const int64_t & id,const MetricDimensionKey & eventKey,sp<ConditionWizard> wizard,int conditionIndex,const vector<Matcher> & dimensionInCondition,bool nesting,int64_t currentBucketStartNs,int64_t currentBucketNum,int64_t startTimeNs,int64_t bucketSizeNs,bool conditionSliced,bool fullLink,const vector<sp<DurationAnomalyTracker>> & anomalyTrackers)27 OringDurationTracker::OringDurationTracker(
28         const ConfigKey& key, const int64_t& id, const MetricDimensionKey& eventKey,
29         sp<ConditionWizard> wizard, int conditionIndex, const vector<Matcher>& dimensionInCondition,
30         bool nesting, int64_t currentBucketStartNs, int64_t currentBucketNum,
31         int64_t startTimeNs, int64_t bucketSizeNs, bool conditionSliced, bool fullLink,
32         const vector<sp<DurationAnomalyTracker>>& anomalyTrackers)
33     : DurationTracker(key, id, eventKey, wizard, conditionIndex, dimensionInCondition, nesting,
34                       currentBucketStartNs, currentBucketNum, startTimeNs, bucketSizeNs,
35                       conditionSliced, fullLink, anomalyTrackers),
36       mStarted(),
37       mPaused() {
38     mLastStartTime = 0;
39     if (mWizard != nullptr) {
40         mSameConditionDimensionsInTracker =
41             mWizard->equalOutputDimensions(conditionIndex, mDimensionInCondition);
42     }
43 }
44 
clone(const int64_t eventTime)45 unique_ptr<DurationTracker> OringDurationTracker::clone(const int64_t eventTime) {
46     auto clonedTracker = make_unique<OringDurationTracker>(*this);
47     clonedTracker->mLastStartTime = eventTime;
48     clonedTracker->mDuration = 0;
49     return clonedTracker;
50 }
51 
hitGuardRail(const HashableDimensionKey & newKey)52 bool OringDurationTracker::hitGuardRail(const HashableDimensionKey& newKey) {
53     // ===========GuardRail==============
54     // 1. Report the tuple count if the tuple count > soft limit
55     if (mConditionKeyMap.find(newKey) != mConditionKeyMap.end()) {
56         return false;
57     }
58     if (mConditionKeyMap.size() > StatsdStats::kDimensionKeySizeSoftLimit - 1) {
59         size_t newTupleCount = mConditionKeyMap.size() + 1;
60         StatsdStats::getInstance().noteMetricDimensionSize(mConfigKey, mTrackerId, newTupleCount);
61         // 2. Don't add more tuples, we are above the allowed threshold. Drop the data.
62         if (newTupleCount > StatsdStats::kDimensionKeySizeHardLimit) {
63             ALOGE("OringDurTracker %lld dropping data for dimension key %s",
64                 (long long)mTrackerId, newKey.toString().c_str());
65             return true;
66         }
67     }
68     return false;
69 }
70 
noteStart(const HashableDimensionKey & key,bool condition,const int64_t eventTime,const ConditionKey & conditionKey)71 void OringDurationTracker::noteStart(const HashableDimensionKey& key, bool condition,
72                                      const int64_t eventTime, const ConditionKey& conditionKey) {
73     if (hitGuardRail(key)) {
74         return;
75     }
76     if (condition) {
77         if (mStarted.size() == 0) {
78             mLastStartTime = eventTime;
79             VLOG("record first start....");
80             startAnomalyAlarm(eventTime);
81         }
82         mStarted[key]++;
83     } else {
84         mPaused[key]++;
85     }
86 
87     if (mConditionSliced && mConditionKeyMap.find(key) == mConditionKeyMap.end()) {
88         mConditionKeyMap[key] = conditionKey;
89     }
90     VLOG("Oring: %s start, condition %d", key.toString().c_str(), condition);
91 }
92 
noteStop(const HashableDimensionKey & key,const int64_t timestamp,const bool stopAll)93 void OringDurationTracker::noteStop(const HashableDimensionKey& key, const int64_t timestamp,
94                                     const bool stopAll) {
95     VLOG("Oring: %s stop", key.toString().c_str());
96     auto it = mStarted.find(key);
97     if (it != mStarted.end()) {
98         (it->second)--;
99         if (stopAll || !mNested || it->second <= 0) {
100             mStarted.erase(it);
101             mConditionKeyMap.erase(key);
102         }
103         if (mStarted.empty()) {
104             mDuration += (timestamp - mLastStartTime);
105             detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration + mDurationFullBucket);
106             VLOG("record duration %lld, total %lld ", (long long)timestamp - mLastStartTime,
107                  (long long)mDuration);
108         }
109     }
110 
111     auto pausedIt = mPaused.find(key);
112     if (pausedIt != mPaused.end()) {
113         (pausedIt->second)--;
114         if (stopAll || !mNested || pausedIt->second <= 0) {
115             mPaused.erase(pausedIt);
116             mConditionKeyMap.erase(key);
117         }
118     }
119     if (mStarted.empty()) {
120         stopAnomalyAlarm(timestamp);
121     }
122 }
123 
noteStopAll(const int64_t timestamp)124 void OringDurationTracker::noteStopAll(const int64_t timestamp) {
125     if (!mStarted.empty()) {
126         mDuration += (timestamp - mLastStartTime);
127         VLOG("Oring Stop all: record duration %lld %lld ", (long long)timestamp - mLastStartTime,
128              (long long)mDuration);
129         detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration + mDurationFullBucket);
130     }
131 
132     stopAnomalyAlarm(timestamp);
133     mStarted.clear();
134     mPaused.clear();
135     mConditionKeyMap.clear();
136 }
137 
flushCurrentBucket(const int64_t & eventTimeNs,std::unordered_map<MetricDimensionKey,std::vector<DurationBucket>> * output)138 bool OringDurationTracker::flushCurrentBucket(
139         const int64_t& eventTimeNs,
140         std::unordered_map<MetricDimensionKey, std::vector<DurationBucket>>* output) {
141     VLOG("OringDurationTracker Flushing.............");
142 
143     // Note that we have to mimic the bucket time changes we do in the
144     // MetricProducer#notifyAppUpgrade.
145 
146     int numBucketsForward = 0;
147     int64_t fullBucketEnd = getCurrentBucketEndTimeNs();
148     int64_t currentBucketEndTimeNs;
149 
150     if (eventTimeNs >= fullBucketEnd) {
151         numBucketsForward = 1 + (eventTimeNs - fullBucketEnd) / mBucketSizeNs;
152         currentBucketEndTimeNs = fullBucketEnd;
153     } else {
154         // This must be a partial bucket.
155         currentBucketEndTimeNs = eventTimeNs;
156     }
157 
158     // Process the current bucket.
159     if (mStarted.size() > 0) {
160         mDuration += (currentBucketEndTimeNs - mLastStartTime);
161     }
162     if (mDuration > 0) {
163         DurationBucket current_info;
164         current_info.mBucketStartNs = mCurrentBucketStartTimeNs;
165         current_info.mBucketEndNs = currentBucketEndTimeNs;
166         current_info.mDuration = mDuration;
167         (*output)[mEventKey].push_back(current_info);
168         mDurationFullBucket += mDuration;
169         VLOG("  duration: %lld", (long long)current_info.mDuration);
170     }
171     if (eventTimeNs > fullBucketEnd) {
172         // End of full bucket, can send to anomaly tracker now.
173         addPastBucketToAnomalyTrackers(mDurationFullBucket, mCurrentBucketNum);
174         mDurationFullBucket = 0;
175     }
176 
177     if (mStarted.size() > 0) {
178         for (int i = 1; i < numBucketsForward; i++) {
179             DurationBucket info;
180             info.mBucketStartNs = fullBucketEnd + mBucketSizeNs * (i - 1);
181             info.mBucketEndNs = info.mBucketStartNs + mBucketSizeNs;
182             info.mDuration = mBucketSizeNs;
183             (*output)[mEventKey].push_back(info);
184             // Safe to send these buckets to anomaly tracker since they must be full buckets.
185             // If it's a partial bucket, numBucketsForward would be 0.
186             addPastBucketToAnomalyTrackers(info.mDuration, mCurrentBucketNum + i);
187             VLOG("  add filling bucket with duration %lld", (long long)info.mDuration);
188         }
189     } else {
190         if (numBucketsForward >= 2) {
191             addPastBucketToAnomalyTrackers(0, mCurrentBucketNum + numBucketsForward - 1);
192         }
193     }
194 
195     mDuration = 0;
196 
197     if (numBucketsForward > 0) {
198         mCurrentBucketStartTimeNs = fullBucketEnd + (numBucketsForward - 1) * mBucketSizeNs;
199         mCurrentBucketNum += numBucketsForward;
200     } else {  // We must be forming a partial bucket.
201         mCurrentBucketStartTimeNs = eventTimeNs;
202     }
203     mLastStartTime = mCurrentBucketStartTimeNs;
204 
205     // if all stopped, then tell owner it's safe to remove this tracker.
206     return mStarted.empty() && mPaused.empty();
207 }
208 
flushIfNeeded(int64_t eventTimeNs,unordered_map<MetricDimensionKey,vector<DurationBucket>> * output)209 bool OringDurationTracker::flushIfNeeded(
210         int64_t eventTimeNs, unordered_map<MetricDimensionKey, vector<DurationBucket>>* output) {
211     if (eventTimeNs < getCurrentBucketEndTimeNs()) {
212         return false;
213     }
214     return flushCurrentBucket(eventTimeNs, output);
215 }
216 
onSlicedConditionMayChange(bool overallCondition,const int64_t timestamp)217 void OringDurationTracker::onSlicedConditionMayChange(bool overallCondition,
218                                                       const int64_t timestamp) {
219     vector<pair<HashableDimensionKey, int>> startedToPaused;
220     vector<pair<HashableDimensionKey, int>> pausedToStarted;
221     if (!mStarted.empty()) {
222         for (auto it = mStarted.begin(); it != mStarted.end();) {
223             const auto& key = it->first;
224             const auto& condIt = mConditionKeyMap.find(key);
225             if (condIt == mConditionKeyMap.end()) {
226                 VLOG("Key %s dont have condition key", key.toString().c_str());
227                 ++it;
228                 continue;
229             }
230             std::unordered_set<HashableDimensionKey> conditionDimensionKeySet;
231             ConditionState conditionState =
232                 mWizard->query(mConditionTrackerIndex, condIt->second,
233                                mDimensionInCondition,
234                                !mSameConditionDimensionsInTracker,
235                                !mHasLinksToAllConditionDimensionsInTracker,
236                                &conditionDimensionKeySet);
237             if (conditionState != ConditionState::kTrue ||
238                 (mDimensionInCondition.size() != 0 &&
239                  conditionDimensionKeySet.find(mEventKey.getDimensionKeyInCondition()) ==
240                          conditionDimensionKeySet.end())) {
241                 startedToPaused.push_back(*it);
242                 it = mStarted.erase(it);
243                 VLOG("Key %s started -> paused", key.toString().c_str());
244             } else {
245                 ++it;
246             }
247         }
248 
249         if (mStarted.empty()) {
250             mDuration += (timestamp - mLastStartTime);
251             VLOG("Duration add %lld , to %lld ", (long long)(timestamp - mLastStartTime),
252                  (long long)mDuration);
253             detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration + mDurationFullBucket);
254         }
255     }
256 
257     if (!mPaused.empty()) {
258         for (auto it = mPaused.begin(); it != mPaused.end();) {
259             const auto& key = it->first;
260             if (mConditionKeyMap.find(key) == mConditionKeyMap.end()) {
261                 VLOG("Key %s dont have condition key", key.toString().c_str());
262                 ++it;
263                 continue;
264             }
265             std::unordered_set<HashableDimensionKey> conditionDimensionKeySet;
266             ConditionState conditionState =
267                 mWizard->query(mConditionTrackerIndex, mConditionKeyMap[key],
268                                mDimensionInCondition,
269                                !mSameConditionDimensionsInTracker,
270                                !mHasLinksToAllConditionDimensionsInTracker,
271                                &conditionDimensionKeySet);
272             if (conditionState == ConditionState::kTrue &&
273                 (mDimensionInCondition.size() == 0 ||
274                  conditionDimensionKeySet.find(mEventKey.getDimensionKeyInCondition()) !=
275                          conditionDimensionKeySet.end())) {
276                 pausedToStarted.push_back(*it);
277                 it = mPaused.erase(it);
278                 VLOG("Key %s paused -> started", key.toString().c_str());
279             } else {
280                 ++it;
281             }
282         }
283 
284         if (mStarted.empty() && pausedToStarted.size() > 0) {
285             mLastStartTime = timestamp;
286         }
287     }
288 
289     if (mStarted.empty() && !pausedToStarted.empty()) {
290         startAnomalyAlarm(timestamp);
291     }
292     mStarted.insert(pausedToStarted.begin(), pausedToStarted.end());
293     mPaused.insert(startedToPaused.begin(), startedToPaused.end());
294 
295     if (mStarted.empty()) {
296         stopAnomalyAlarm(timestamp);
297     }
298 }
299 
onConditionChanged(bool condition,const int64_t timestamp)300 void OringDurationTracker::onConditionChanged(bool condition, const int64_t timestamp) {
301     if (condition) {
302         if (!mPaused.empty()) {
303             VLOG("Condition true, all started");
304             if (mStarted.empty()) {
305                 mLastStartTime = timestamp;
306             }
307             if (mStarted.empty() && !mPaused.empty()) {
308                 startAnomalyAlarm(timestamp);
309             }
310             mStarted.insert(mPaused.begin(), mPaused.end());
311             mPaused.clear();
312         }
313     } else {
314         if (!mStarted.empty()) {
315             VLOG("Condition false, all paused");
316             mDuration += (timestamp - mLastStartTime);
317             mPaused.insert(mStarted.begin(), mStarted.end());
318             mStarted.clear();
319             detectAndDeclareAnomaly(timestamp, mCurrentBucketNum, mDuration + mDurationFullBucket);
320         }
321     }
322     if (mStarted.empty()) {
323         stopAnomalyAlarm(timestamp);
324     }
325 }
326 
predictAnomalyTimestampNs(const DurationAnomalyTracker & anomalyTracker,const int64_t eventTimestampNs) const327 int64_t OringDurationTracker::predictAnomalyTimestampNs(
328         const DurationAnomalyTracker& anomalyTracker, const int64_t eventTimestampNs) const {
329 
330     // The anomaly threshold.
331     const int64_t thresholdNs = anomalyTracker.getAnomalyThreshold();
332 
333     // The timestamp of the current bucket end.
334     const int64_t currentBucketEndNs = getCurrentBucketEndTimeNs();
335 
336     // The past duration ns for the current bucket.
337     int64_t currentBucketPastNs = mDuration + mDurationFullBucket;
338 
339     // As we move into the future, old buckets get overwritten (so their old data is erased).
340     // Sum of past durations. Will change as we overwrite old buckets.
341     int64_t pastNs = currentBucketPastNs + anomalyTracker.getSumOverPastBuckets(mEventKey);
342 
343     // The refractory period end timestamp for dimension mEventKey.
344     const int64_t refractoryPeriodEndNs =
345             anomalyTracker.getRefractoryPeriodEndsSec(mEventKey) * NS_PER_SEC;
346 
347     // The anomaly should happen when accumulated wakelock duration is above the threshold and
348     // not within the refractory period.
349     int64_t anomalyTimestampNs =
350         std::max(eventTimestampNs + thresholdNs - pastNs, refractoryPeriodEndNs);
351     // If the predicted the anomaly timestamp is within the current bucket, return it directly.
352     if (anomalyTimestampNs <= currentBucketEndNs) {
353         return std::max(eventTimestampNs, anomalyTimestampNs);
354     }
355 
356     // Remove the old bucket.
357     if (anomalyTracker.getNumOfPastBuckets() > 0) {
358         pastNs -= anomalyTracker.getPastBucketValue(
359                             mEventKey,
360                             mCurrentBucketNum - anomalyTracker.getNumOfPastBuckets());
361         // Add the remaining of the current bucket to the accumulated wakelock duration.
362         pastNs += (currentBucketEndNs - eventTimestampNs);
363     } else {
364         // The anomaly depends on only one bucket.
365         pastNs = 0;
366     }
367 
368     // The anomaly will not happen in the current bucket. We need to iterate over the future buckets
369     // to predict the accumulated wakelock duration and determine the anomaly timestamp accordingly.
370     for (int futureBucketIdx = 1; futureBucketIdx <= anomalyTracker.getNumOfPastBuckets() + 1;
371             futureBucketIdx++) {
372         // The alarm candidate timestamp should meet two requirements:
373         // 1. the accumulated wakelock duration is above the threshold.
374         // 2. it is not within the refractory period.
375         // 3. the alarm timestamp falls in this bucket. Otherwise we need to flush the past buckets,
376         //    find the new alarm candidate timestamp and check these requirements again.
377         const int64_t bucketEndNs = currentBucketEndNs + futureBucketIdx * mBucketSizeNs;
378         int64_t anomalyTimestampNs =
379             std::max(bucketEndNs - mBucketSizeNs + thresholdNs - pastNs, refractoryPeriodEndNs);
380         if (anomalyTimestampNs <= bucketEndNs) {
381             return anomalyTimestampNs;
382         }
383         if (anomalyTracker.getNumOfPastBuckets() <= 0) {
384             continue;
385         }
386 
387         // No valid alarm timestamp is found in this bucket. The clock moves to the end of the
388         // bucket. Update the pastNs.
389         pastNs += mBucketSizeNs;
390         // 1. If the oldest past bucket is still in the past bucket window, we could fetch the past
391         // bucket and erase it from pastNs.
392         // 2. If the oldest past bucket is the current bucket, we should compute the
393         //   wakelock duration in the current bucket and erase it from pastNs.
394         // 3. Otherwise all othe past buckets are ancient.
395         if (futureBucketIdx < anomalyTracker.getNumOfPastBuckets()) {
396             pastNs -= anomalyTracker.getPastBucketValue(
397                     mEventKey,
398                     mCurrentBucketNum - anomalyTracker.getNumOfPastBuckets() + futureBucketIdx);
399         } else if (futureBucketIdx == anomalyTracker.getNumOfPastBuckets()) {
400             pastNs -= (currentBucketPastNs + (currentBucketEndNs - eventTimestampNs));
401         }
402     }
403 
404     return std::max(eventTimestampNs + thresholdNs, refractoryPeriodEndNs);
405 }
406 
dumpStates(FILE * out,bool verbose) const407 void OringDurationTracker::dumpStates(FILE* out, bool verbose) const {
408     fprintf(out, "\t\t started count %lu\n", (unsigned long)mStarted.size());
409     fprintf(out, "\t\t paused count %lu\n", (unsigned long)mPaused.size());
410     fprintf(out, "\t\t current duration %lld\n", (long long)mDuration);
411 }
412 
413 }  // namespace statsd
414 }  // namespace os
415 }  // namespace android
416