1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 package com.android.tradefed.result;
17 
18 import com.android.ddmlib.testrunner.TestResult.TestStatus;
19 import com.android.tradefed.log.LogUtil.CLog;
20 import com.android.tradefed.metrics.proto.MetricMeasurement.Metric;
21 import com.android.tradefed.retry.MergeStrategy;
22 import com.android.tradefed.util.MultiMap;
23 import com.android.tradefed.util.proto.TfMetricProtoUtil;
24 
25 import java.util.ArrayList;
26 import java.util.Arrays;
27 import java.util.HashMap;
28 import java.util.LinkedHashMap;
29 import java.util.LinkedHashSet;
30 import java.util.List;
31 import java.util.Map;
32 import java.util.Set;
33 
34 /**
35  * Holds results from a single test run.
36  *
37  * <p>Maintains an accurate count of tests, and tracks incomplete tests.
38  *
39  * <p>Not thread safe! The test* callbacks must be called in order
40  */
41 public class TestRunResult {
42 
43     public static final String ERROR_DIVIDER = "\n====Next Error====\n";
44     private String mTestRunName;
45     // Uses a LinkedHashMap to have predictable iteration order
46     private Map<TestDescription, TestResult> mTestResults =
47             new LinkedHashMap<TestDescription, TestResult>();
48     // Store the metrics for the run
49     private Map<String, String> mRunMetrics = new HashMap<>();
50     private HashMap<String, Metric> mRunProtoMetrics = new HashMap<>();
51     // Log files associated with the test run itself (testRunStart / testRunEnd).
52     private MultiMap<String, LogFile> mRunLoggedFiles;
53     private boolean mIsRunComplete = false;
54     private long mElapsedTime = 0L;
55     private long mStartTime = 0L;
56 
57     private TestResult mCurrentTestResult;
58 
59     /** represents sums of tests in each TestStatus state. Indexed by TestStatus.ordinal() */
60     private int[] mStatusCounts = new int[TestStatus.values().length];
61     /** tracks if mStatusCounts is accurate, or if it needs to be recalculated */
62     private boolean mIsCountDirty = true;
63 
64     private FailureDescription mRunFailureError = null;
65 
66     private boolean mAggregateMetrics = false;
67 
68     private int mExpectedTestCount = 0;
69 
70     /** Create an empty{@link TestRunResult}. */
TestRunResult()71     public TestRunResult() {
72         mTestRunName = "not started";
73         mRunLoggedFiles = new MultiMap<String, LogFile>();
74     }
75 
setAggregateMetrics(boolean metricAggregation)76     public void setAggregateMetrics(boolean metricAggregation) {
77         mAggregateMetrics = metricAggregation;
78     }
79 
80     /** @return the test run name */
getName()81     public String getName() {
82         return mTestRunName;
83     }
84 
85     /** Returns a map of the test results. */
getTestResults()86     public Map<TestDescription, TestResult> getTestResults() {
87         return mTestResults;
88     }
89 
90     /** @return a {@link Map} of the test run metrics. */
getRunMetrics()91     public Map<String, String> getRunMetrics() {
92         return mRunMetrics;
93     }
94 
95     /** @return a {@link Map} of the test run metrics with the new proto format. */
getRunProtoMetrics()96     public HashMap<String, Metric> getRunProtoMetrics() {
97         return mRunProtoMetrics;
98     }
99 
100     /** Gets the set of completed tests. */
getCompletedTests()101     public Set<TestDescription> getCompletedTests() {
102         List<TestStatus> completedStatuses = new ArrayList<>();
103         for (TestStatus s : TestStatus.values()) {
104             if (!s.equals(TestStatus.INCOMPLETE)) {
105                 completedStatuses.add(s);
106             }
107         }
108         return getTestsInState(completedStatuses);
109     }
110 
111     /** Gets the set of failed tests. */
getFailedTests()112     public Set<TestDescription> getFailedTests() {
113         return getTestsInState(Arrays.asList(TestStatus.FAILURE));
114     }
115 
116     /** Gets the set of tests in given statuses. */
getTestsInState(List<TestStatus> statuses)117     private Set<TestDescription> getTestsInState(List<TestStatus> statuses) {
118         Set<TestDescription> tests = new LinkedHashSet<>();
119         for (Map.Entry<TestDescription, TestResult> testEntry : getTestResults().entrySet()) {
120             TestStatus status = testEntry.getValue().getStatus();
121             if (statuses.contains(status)) {
122                 tests.add(testEntry.getKey());
123             }
124         }
125         return tests;
126     }
127 
128     /** @return <code>true</code> if test run failed. */
isRunFailure()129     public boolean isRunFailure() {
130         return mRunFailureError != null;
131     }
132 
133     /** @return <code>true</code> if test run finished. */
isRunComplete()134     public boolean isRunComplete() {
135         return mIsRunComplete;
136     }
137 
setRunComplete(boolean runComplete)138     public void setRunComplete(boolean runComplete) {
139         mIsRunComplete = runComplete;
140     }
141 
142     /**
143      * Gets the number of test cases this TestRunResult expects to have. The actual number may be
144      * less than the expected number due to test crashes. Normally, such a mismatch indicates a test
145      * run failure.
146      */
getExpectedTestCount()147     public int getExpectedTestCount() {
148         return mExpectedTestCount;
149     }
150 
151     /** Gets the number of tests in given state for this run. */
getNumTestsInState(TestStatus status)152     public int getNumTestsInState(TestStatus status) {
153         if (mIsCountDirty) {
154             // clear counts
155             for (int i = 0; i < mStatusCounts.length; i++) {
156                 mStatusCounts[i] = 0;
157             }
158             // now recalculate
159             for (TestResult r : mTestResults.values()) {
160                 mStatusCounts[r.getStatus().ordinal()]++;
161             }
162             mIsCountDirty = false;
163         }
164         return mStatusCounts[status.ordinal()];
165     }
166 
167     /** Returns all the {@link TestResult} in a particular state. */
getTestsResultsInState(TestStatus status)168     public List<TestResult> getTestsResultsInState(TestStatus status) {
169         List<TestResult> results = new ArrayList<>();
170         for (TestResult r : mTestResults.values()) {
171             if (r.getStatus().equals(status)) {
172                 results.add(r);
173             }
174         }
175         return results;
176     }
177 
178     /** Gets the number of tests in this run. */
getNumTests()179     public int getNumTests() {
180         return mTestResults.size();
181     }
182 
183     /** Gets the number of complete tests in this run ie with status != incomplete. */
getNumCompleteTests()184     public int getNumCompleteTests() {
185         return getNumTests() - getNumTestsInState(TestStatus.INCOMPLETE);
186     }
187 
188     /** @return <code>true</code> if test run had any failed or error tests. */
hasFailedTests()189     public boolean hasFailedTests() {
190         return getNumAllFailedTests() > 0;
191     }
192 
193     /** Return total number of tests in a failure state (failed, assumption failure) */
getNumAllFailedTests()194     public int getNumAllFailedTests() {
195         return getNumTestsInState(TestStatus.FAILURE);
196     }
197 
198     /** Returns the current run elapsed time. */
getElapsedTime()199     public long getElapsedTime() {
200         return mElapsedTime;
201     }
202 
203     /** Returns the start time of the first testRunStart call. */
getStartTime()204     public long getStartTime() {
205         return mStartTime;
206     }
207 
208     /** Return the run failure error message, <code>null</code> if run did not fail. */
getRunFailureMessage()209     public String getRunFailureMessage() {
210         if (mRunFailureError == null) {
211             return null;
212         }
213         return mRunFailureError.getErrorMessage();
214     }
215 
216     /** Returns the run failure descriptor, <code>null</code> if run did not fail. */
getRunFailureDescription()217     public FailureDescription getRunFailureDescription() {
218         return mRunFailureError;
219     }
220 
221     /**
222      * Reset the run failure status.
223      *
224      * <p>Resetting the run failure status is sometimes required when retrying. This should be done
225      * with care to avoid clearing a real failure.
226      */
resetRunFailure()227     public void resetRunFailure() {
228         mRunFailureError = null;
229     }
230 
231     /**
232      * Notify that a test run started.
233      *
234      * @param runName the name associated to the test run for tracking purpose.
235      * @param testCount the number of expected test cases associated with the test run.
236      */
testRunStarted(String runName, int testCount)237     public void testRunStarted(String runName, int testCount) {
238         testRunStarted(runName, testCount, System.currentTimeMillis());
239     }
240 
241     /**
242      * Notify that a test run started.
243      *
244      * @param runName the name associated to the test run for tracking purpose.
245      * @param testCount the number of expected test cases associated with the test run.
246      */
testRunStarted(String runName, int testCount, long startTime)247     public void testRunStarted(String runName, int testCount, long startTime) {
248         // A run may be started multiple times due to crashes or other reasons. Normally the first
249         // run reflect the expected number of test "testCount". To avoid latter TestRunStarted
250         // overrides the expected count, only the first testCount will be recorded.
251         // mExpectedTestCount is initialized as 0.
252         if (mExpectedTestCount == 0) {
253             mExpectedTestCount = testCount;
254         } else {
255             CLog.w(
256                     "%s calls testRunStarted more than once. Previous expected count: %s. "
257                             + "New Expected count: %s",
258                     runName, mExpectedTestCount, mExpectedTestCount + testCount);
259             mExpectedTestCount += testCount;
260         }
261         mTestRunName = runName;
262         mIsRunComplete = false;
263         if (mStartTime == 0L) {
264             mStartTime = startTime;
265         }
266         // Do not reset mRunFailureError since for re-run we want to preserve previous failures.
267     }
268 
testStarted(TestDescription test)269     public void testStarted(TestDescription test) {
270         testStarted(test, System.currentTimeMillis());
271     }
272 
testStarted(TestDescription test, long startTime)273     public void testStarted(TestDescription test, long startTime) {
274         mCurrentTestResult = new TestResult();
275         mCurrentTestResult.setStartTime(startTime);
276         addTestResult(test, mCurrentTestResult);
277     }
278 
addTestResult(TestDescription test, TestResult testResult)279     private void addTestResult(TestDescription test, TestResult testResult) {
280         mIsCountDirty = true;
281         mTestResults.put(test, testResult);
282     }
283 
updateTestResult( TestDescription test, TestStatus status, FailureDescription failure)284     private void updateTestResult(
285             TestDescription test, TestStatus status, FailureDescription failure) {
286         TestResult r = mTestResults.get(test);
287         if (r == null) {
288             CLog.d("received test event without test start for %s", test);
289             r = new TestResult();
290         }
291         r.setStatus(status);
292         if (failure != null) {
293             r.setFailure(failure);
294         }
295         addTestResult(test, r);
296     }
297 
testFailed(TestDescription test, String trace)298     public void testFailed(TestDescription test, String trace) {
299         updateTestResult(test, TestStatus.FAILURE, FailureDescription.create(trace));
300     }
301 
testFailed(TestDescription test, FailureDescription failure)302     public void testFailed(TestDescription test, FailureDescription failure) {
303         updateTestResult(test, TestStatus.FAILURE, failure);
304     }
305 
testAssumptionFailure(TestDescription test, String trace)306     public void testAssumptionFailure(TestDescription test, String trace) {
307         updateTestResult(test, TestStatus.ASSUMPTION_FAILURE, FailureDescription.create(trace));
308     }
309 
testIgnored(TestDescription test)310     public void testIgnored(TestDescription test) {
311         updateTestResult(test, TestStatus.IGNORED, null);
312     }
313 
testEnded(TestDescription test, HashMap<String, Metric> testMetrics)314     public void testEnded(TestDescription test, HashMap<String, Metric> testMetrics) {
315         testEnded(test, System.currentTimeMillis(), testMetrics);
316     }
317 
testEnded(TestDescription test, long endTime, HashMap<String, Metric> testMetrics)318     public void testEnded(TestDescription test, long endTime, HashMap<String, Metric> testMetrics) {
319         TestResult result = mTestResults.get(test);
320         if (result == null) {
321             result = new TestResult();
322         }
323         if (result.getStatus().equals(TestStatus.INCOMPLETE)) {
324             result.setStatus(TestStatus.PASSED);
325         }
326         result.setEndTime(endTime);
327         result.setMetrics(TfMetricProtoUtil.compatibleConvert(testMetrics));
328         result.setProtoMetrics(testMetrics);
329         addTestResult(test, result);
330         mCurrentTestResult = null;
331     }
332 
333     // TODO: Remove when done updating
testRunFailed(String errorMessage)334     public void testRunFailed(String errorMessage) {
335         if (errorMessage == null) {
336             testRunFailed((FailureDescription) null);
337         } else {
338             testRunFailed(FailureDescription.create(errorMessage));
339         }
340     }
341 
testRunFailed(FailureDescription failureDescription)342     public void testRunFailed(FailureDescription failureDescription) {
343         if (failureDescription == null) {
344             failureDescription = FailureDescription.create("testRunFailed(null) was called.");
345         }
346 
347         if (mRunFailureError != null) {
348             if (mRunFailureError instanceof MultiFailureDescription) {
349                 ((MultiFailureDescription) mRunFailureError).addFailure(failureDescription);
350             } else {
351                 MultiFailureDescription aggregatedFailure =
352                         new MultiFailureDescription(mRunFailureError, failureDescription);
353                 mRunFailureError = aggregatedFailure;
354             }
355         } else {
356             mRunFailureError = failureDescription;
357         }
358     }
359 
testRunStopped(long elapsedTime)360     public void testRunStopped(long elapsedTime) {
361         mElapsedTime += elapsedTime;
362         mIsRunComplete = true;
363     }
364 
testRunEnded(long elapsedTime, Map<String, String> runMetrics)365     public void testRunEnded(long elapsedTime, Map<String, String> runMetrics) {
366         if (mAggregateMetrics) {
367             for (Map.Entry<String, String> entry : runMetrics.entrySet()) {
368                 String existingValue = mRunMetrics.get(entry.getKey());
369                 String combinedValue = combineValues(existingValue, entry.getValue());
370                 mRunMetrics.put(entry.getKey(), combinedValue);
371             }
372         } else {
373             mRunMetrics.putAll(runMetrics);
374         }
375         // Also add to the new interface:
376         mRunProtoMetrics.putAll(TfMetricProtoUtil.upgradeConvert(runMetrics));
377 
378         mElapsedTime += elapsedTime;
379         mIsRunComplete = true;
380     }
381 
382     /** New interface using the new proto metrics. */
testRunEnded(long elapsedTime, HashMap<String, Metric> runMetrics)383     public void testRunEnded(long elapsedTime, HashMap<String, Metric> runMetrics) {
384         // Internally store the information as backward compatible format
385         testRunEnded(elapsedTime, TfMetricProtoUtil.compatibleConvert(runMetrics));
386         // Store the new format directly too.
387         // TODO: See if aggregation should/can be done with the new format.
388         mRunProtoMetrics.putAll(runMetrics);
389 
390         // TODO: when old format is deprecated, do not forget to uncomment the next two lines
391         // mElapsedTime += elapsedTime;
392         // mIsRunComplete = true;
393     }
394 
395     /**
396      * Combine old and new metrics value
397      *
398      * @param existingValue
399      * @param newValue
400      * @return the combination of the two string as Long or Double value.
401      */
combineValues(String existingValue, String newValue)402     private String combineValues(String existingValue, String newValue) {
403         if (existingValue != null) {
404             try {
405                 Long existingLong = Long.parseLong(existingValue);
406                 Long newLong = Long.parseLong(newValue);
407                 return Long.toString(existingLong + newLong);
408             } catch (NumberFormatException e) {
409                 // not a long, skip to next
410             }
411             try {
412                 Double existingDouble = Double.parseDouble(existingValue);
413                 Double newDouble = Double.parseDouble(newValue);
414                 return Double.toString(existingDouble + newDouble);
415             } catch (NumberFormatException e) {
416                 // not a double either, fall through
417             }
418         }
419         // default to overriding existingValue
420         return newValue;
421     }
422 
423     /** Returns a user friendly string describing results. */
getTextSummary()424     public String getTextSummary() {
425         StringBuilder builder = new StringBuilder();
426         builder.append(String.format("Total tests %d, ", getNumTests()));
427         for (TestStatus status : TestStatus.values()) {
428             int count = getNumTestsInState(status);
429             // only add descriptive state for states that have non zero values, to avoid cluttering
430             // the response
431             if (count > 0) {
432                 builder.append(String.format("%s %d, ", status.toString().toLowerCase(), count));
433             }
434         }
435         return builder.toString();
436     }
437 
438     /**
439      * Information about a file being logged are stored and associated to the test case or test run
440      * in progress.
441      *
442      * @param dataName the name referencing the data.
443      * @param logFile The {@link LogFile} object representing where the object was saved and and
444      *     information about it.
445      */
testLogSaved(String dataName, LogFile logFile)446     public void testLogSaved(String dataName, LogFile logFile) {
447         if (mCurrentTestResult != null) {
448             // We have a test case in progress, we can associate the log to it.
449             mCurrentTestResult.addLoggedFile(dataName, logFile);
450         } else {
451             mRunLoggedFiles.put(dataName, logFile);
452         }
453     }
454 
455     /** Returns a copy of the map containing all the logged file associated with that test case. */
getRunLoggedFiles()456     public MultiMap<String, LogFile> getRunLoggedFiles() {
457         return new MultiMap<>(mRunLoggedFiles);
458     }
459 
460     /** @see #merge(List, MergeStrategy) */
merge(List<TestRunResult> testRunResults)461     public static TestRunResult merge(List<TestRunResult> testRunResults) {
462         return merge(testRunResults, MergeStrategy.ONE_TESTCASE_PASS_IS_PASS);
463     }
464 
465     /**
466      * Merge multiple TestRunResults of the same testRunName. If a testcase shows up in multiple
467      * TestRunResults but has different results (e.g. "boottest-device" runs three times with result
468      * FAIL-FAIL-PASS), we concatenate all the stack traces from the FAILED runs and trust the final
469      * run result for status, metrics, log files, start/end time.
470      *
471      * @param testRunResults A list of TestRunResult to merge.
472      * @param strategy the merging strategy adopted for merging results.
473      * @return the final TestRunResult containing the merged data from the testRunResults.
474      */
merge(List<TestRunResult> testRunResults, MergeStrategy strategy)475     public static TestRunResult merge(List<TestRunResult> testRunResults, MergeStrategy strategy) {
476         if (testRunResults.isEmpty()) {
477             return null;
478         }
479         if (MergeStrategy.NO_MERGE.equals(strategy)) {
480             throw new IllegalArgumentException(
481                     "TestRunResult#merge cannot be called with NO_MERGE strategy.");
482         }
483         if (testRunResults.size() == 1) {
484             // No merging is needed in case of a single test run result.
485             return testRunResults.get(0);
486         }
487         TestRunResult finalRunResult = new TestRunResult();
488 
489         String testRunName = testRunResults.get(0).getName();
490         Map<String, String> finalRunMetrics = new HashMap<>();
491         HashMap<String, Metric> finalRunProtoMetrics = new HashMap<>();
492         MultiMap<String, LogFile> finalRunLoggedFiles = new MultiMap<>();
493         Map<TestDescription, List<TestResult>> testResultsAttempts = new LinkedHashMap<>();
494 
495         // Keep track of if one of the run is not complete
496         boolean isAtLeastOneCompleted = false;
497         boolean areAllCompleted = true;
498         // Keep track of whether we have run failure or not
499         List<FailureDescription> runErrors = new ArrayList<>();
500         boolean atLeastOneFailure = false;
501         boolean allFailure = true;
502         // Keep track of elapsed time
503         long elapsedTime = 0L;
504         int maxExpectedTestCount = 0;
505 
506         for (TestRunResult eachRunResult : testRunResults) {
507             // Check all mTestRunNames are the same.
508             if (!testRunName.equals(eachRunResult.getName())) {
509                 throw new IllegalArgumentException(
510                         String.format(
511                                 "Unabled to merge TestRunResults: The run results names are "
512                                         + "different (%s, %s)",
513                                 testRunName, eachRunResult.getName()));
514             }
515             elapsedTime += eachRunResult.getElapsedTime();
516             // Evaluate the run failures
517             if (eachRunResult.isRunFailure()) {
518                 atLeastOneFailure = true;
519                 FailureDescription currentFailure = eachRunResult.getRunFailureDescription();
520                 if (currentFailure instanceof MultiFailureDescription) {
521                     runErrors.addAll(((MultiFailureDescription) currentFailure).getFailures());
522                 } else {
523                     runErrors.add(currentFailure);
524                 }
525             } else {
526                 allFailure = false;
527             }
528             // Evaluate the run completion
529             if (eachRunResult.isRunComplete()) {
530                 isAtLeastOneCompleted = true;
531             } else {
532                 areAllCompleted = false;
533             }
534 
535             // A run may start multiple times. Normally the first run shows the expected count
536             // (max value).
537             maxExpectedTestCount =
538                     Math.max(maxExpectedTestCount, eachRunResult.getExpectedTestCount());
539 
540             // Keep the last TestRunResult's RunMetrics, ProtoMetrics
541             finalRunMetrics.putAll(eachRunResult.getRunMetrics());
542             finalRunProtoMetrics.putAll(eachRunResult.getRunProtoMetrics());
543             finalRunLoggedFiles.putAll(eachRunResult.getRunLoggedFiles());
544             // TODO: We are not handling the TestResult log files in the merging logic (different
545             // from the TestRunResult log files). Need to improve in the future.
546             for (Map.Entry<TestDescription, TestResult> testResultEntry :
547                     eachRunResult.getTestResults().entrySet()) {
548                 if (!testResultsAttempts.containsKey(testResultEntry.getKey())) {
549                     testResultsAttempts.put(testResultEntry.getKey(), new ArrayList<>());
550                 }
551                 List<TestResult> results = testResultsAttempts.get(testResultEntry.getKey());
552                 results.add(testResultEntry.getValue());
553             }
554         }
555 
556         // Evaluate test cases based on strategy
557         finalRunResult.mTestResults = evaluateTestCases(testResultsAttempts, strategy);
558         // Evaluate the run error status based on strategy
559         boolean isRunFailure = isRunFailed(atLeastOneFailure, allFailure, strategy);
560         if (isRunFailure) {
561             if (runErrors.size() == 1) {
562                 finalRunResult.mRunFailureError = runErrors.get(0);
563             } else {
564                 finalRunResult.mRunFailureError = new MultiFailureDescription(runErrors);
565             }
566         }
567         // Evaluate run completion from all the attempts based on strategy
568         finalRunResult.mIsRunComplete =
569                 isRunComplete(isAtLeastOneCompleted, areAllCompleted, strategy);
570 
571         finalRunResult.mTestRunName = testRunName;
572         finalRunResult.mRunMetrics = finalRunMetrics;
573         finalRunResult.mRunProtoMetrics = finalRunProtoMetrics;
574         finalRunResult.mRunLoggedFiles = finalRunLoggedFiles;
575 
576         finalRunResult.mExpectedTestCount = maxExpectedTestCount;
577         // Report total elapsed times
578         finalRunResult.mElapsedTime = elapsedTime;
579         return finalRunResult;
580     }
581 
582     /** Merge the different test cases attempts based on the strategy. */
evaluateTestCases( Map<TestDescription, List<TestResult>> results, MergeStrategy strategy)583     private static Map<TestDescription, TestResult> evaluateTestCases(
584             Map<TestDescription, List<TestResult>> results, MergeStrategy strategy) {
585         Map<TestDescription, TestResult> finalTestResults = new LinkedHashMap<>();
586         for (TestDescription description : results.keySet()) {
587             List<TestResult> attemptRes = results.get(description);
588             TestResult aggResult = TestResult.merge(attemptRes, strategy);
589             finalTestResults.put(description, aggResult);
590         }
591         return finalTestResults;
592     }
593 
594     /** Decides whether or not considering an aggregation of runs a pass or fail. */
isRunFailed( boolean atLeastOneFailure, boolean allFailures, MergeStrategy strategy)595     private static boolean isRunFailed(
596             boolean atLeastOneFailure, boolean allFailures, MergeStrategy strategy) {
597         switch (strategy) {
598             case ANY_PASS_IS_PASS:
599             case ONE_TESTRUN_PASS_IS_PASS:
600                 return allFailures;
601             case ONE_TESTCASE_PASS_IS_PASS:
602             case ANY_FAIL_IS_FAIL:
603             default:
604                 return atLeastOneFailure;
605         }
606     }
607 
608     /** Decides whether or not considering an aggregation of runs completed or not. */
isRunComplete( boolean isAtLeastOneCompleted, boolean areAllCompleted, MergeStrategy strategy)609     private static boolean isRunComplete(
610             boolean isAtLeastOneCompleted, boolean areAllCompleted, MergeStrategy strategy) {
611         switch (strategy) {
612             case ANY_PASS_IS_PASS:
613             case ONE_TESTRUN_PASS_IS_PASS:
614                 return isAtLeastOneCompleted;
615             case ONE_TESTCASE_PASS_IS_PASS:
616             case ANY_FAIL_IS_FAIL:
617             default:
618                 return areAllCompleted;
619         }
620     }
621 }
622