1#!/usr/bin/env python3
2#
3# Copyright 2017, The Android Open Source Project
4#
5# Licensed under the Apache License, Version 2.0 (the "License");
6# you may not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9#     http://www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an "AS IS" BASIS,
13# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16
17"""
18Command line utility for running Android tests through TradeFederation.
19
20atest helps automate the flow of building test modules across the Android
21code base and executing the tests via the TradeFederation test harness.
22
23atest is designed to support any test types that can be ran by TradeFederation.
24"""
25
26# pylint: disable=line-too-long
27
28from __future__ import print_function
29
30import collections
31import logging
32import os
33import sys
34import tempfile
35import time
36import platform
37
38from multiprocessing import Process
39
40import atest_arg_parser
41import atest_error
42import atest_execution_info
43import atest_utils
44import bug_detector
45import cli_translator
46import constants
47import module_info
48import result_reporter
49import test_runner_handler
50
51from metrics import metrics
52from metrics import metrics_base
53from metrics import metrics_utils
54from test_runners import regression_test_runner
55from tools import atest_tools as at
56
57EXPECTED_VARS = frozenset([
58    constants.ANDROID_BUILD_TOP,
59    'ANDROID_TARGET_OUT_TESTCASES',
60    constants.ANDROID_OUT])
61TEST_RUN_DIR_PREFIX = "%Y%m%d_%H%M%S"
62CUSTOM_ARG_FLAG = '--'
63OPTION_NOT_FOR_TEST_MAPPING = (
64    'Option `%s` does not work for running tests in TEST_MAPPING files')
65
66DEVICE_TESTS = 'tests that require device'
67HOST_TESTS = 'tests that do NOT require device'
68RESULT_HEADER_FMT = '\nResults from %(test_type)s:'
69RUN_HEADER_FMT = '\nRunning %(test_count)d %(test_type)s.'
70TEST_COUNT = 'test_count'
71TEST_TYPE = 'test_type'
72# Tasks that must run in the build time but unable to build by soong.
73# (e.g subprocesses that invoke host commands.)
74ACLOUD_CREATE = at.acloud_create
75INDEX_TARGETS = at.index_targets
76
77
78def _run_multi_proc(func, *args, **kwargs):
79    """Start a process with multiprocessing and return Process object.
80
81    Args:
82        func: A string of function name which will be the target name.
83        args/kwargs: check doc page:
84        https://docs.python.org/3.8/library/multiprocessing.html#process-and-exceptions
85
86    Returns:
87        multiprocessing.Process object.
88    """
89
90    proc = Process(target=func, *args, **kwargs)
91    proc.start()
92    return proc
93
94
95def _parse_args(argv):
96    """Parse command line arguments.
97
98    Args:
99        argv: A list of arguments.
100
101    Returns:
102        An argspace.Namespace class instance holding parsed args.
103    """
104    # Store everything after '--' in custom_args.
105    pruned_argv = argv
106    custom_args_index = None
107    if CUSTOM_ARG_FLAG in argv:
108        custom_args_index = argv.index(CUSTOM_ARG_FLAG)
109        pruned_argv = argv[:custom_args_index]
110    parser = atest_arg_parser.AtestArgParser()
111    parser.add_atest_args()
112    args = parser.parse_args(pruned_argv)
113    args.custom_args = []
114    if custom_args_index is not None:
115        args.custom_args = argv[custom_args_index+1:]
116    return args
117
118
119def _configure_logging(verbose):
120    """Configure the logger.
121
122    Args:
123        verbose: A boolean. If true display DEBUG level logs.
124    """
125    log_format = '%(asctime)s %(filename)s:%(lineno)s:%(levelname)s: %(message)s'
126    datefmt = '%Y-%m-%d %H:%M:%S'
127    if verbose:
128        logging.basicConfig(level=logging.DEBUG,
129                            format=log_format, datefmt=datefmt)
130    else:
131        logging.basicConfig(level=logging.INFO,
132                            format=log_format, datefmt=datefmt)
133
134
135def _missing_environment_variables():
136    """Verify the local environment has been set up to run atest.
137
138    Returns:
139        List of strings of any missing environment variables.
140    """
141    missing = list(filter(None, [x for x in EXPECTED_VARS if not os.environ.get(x)]))
142    if missing:
143        logging.error('Local environment doesn\'t appear to have been '
144                      'initialized. Did you remember to run lunch? Expected '
145                      'Environment Variables: %s.', missing)
146    return missing
147
148
149def make_test_run_dir():
150    """Make the test run dir in ATEST_RESULT_ROOT.
151
152    Returns:
153        A string of the dir path.
154    """
155    if not os.path.exists(constants.ATEST_RESULT_ROOT):
156        os.makedirs(constants.ATEST_RESULT_ROOT)
157    ctime = time.strftime(TEST_RUN_DIR_PREFIX, time.localtime())
158    test_result_dir = tempfile.mkdtemp(prefix='%s_' % ctime,
159                                       dir=constants.ATEST_RESULT_ROOT)
160    return test_result_dir
161
162
163def get_extra_args(args):
164    """Get extra args for test runners.
165
166    Args:
167        args: arg parsed object.
168
169    Returns:
170        Dict of extra args for test runners to utilize.
171    """
172    extra_args = {}
173    if args.wait_for_debugger:
174        extra_args[constants.WAIT_FOR_DEBUGGER] = None
175    steps = args.steps or constants.ALL_STEPS
176    if constants.INSTALL_STEP not in steps:
177        extra_args[constants.DISABLE_INSTALL] = None
178    # The key and its value of the dict can be called via:
179    # if args.aaaa:
180    #     extra_args[constants.AAAA] = args.aaaa
181    arg_maps = {'all_abi': constants.ALL_ABI,
182                'collect_tests_only': constants.COLLECT_TESTS_ONLY,
183                'custom_args': constants.CUSTOM_ARGS,
184                'disable_teardown': constants.DISABLE_TEARDOWN,
185                'dry_run': constants.DRY_RUN,
186                'generate_baseline': constants.PRE_PATCH_ITERATIONS,
187                'generate_new_metrics': constants.POST_PATCH_ITERATIONS,
188                'host': constants.HOST,
189                'instant': constants.INSTANT,
190                'iterations': constants.ITERATIONS,
191                'rerun_until_failure': constants.RERUN_UNTIL_FAILURE,
192                'retry_any_failure': constants.RETRY_ANY_FAILURE,
193                'serial': constants.SERIAL,
194                'sharding': constants.SHARDING,
195                'tf_debug': constants.TF_DEBUG,
196                'tf_template': constants.TF_TEMPLATE,
197                'user_type': constants.USER_TYPE,
198                'flakes_info': constants.FLAKES_INFO}
199    not_match = [k for k in arg_maps if k not in vars(args)]
200    if not_match:
201        raise AttributeError('%s object has no attribute %s'
202                             %(type(args).__name__, not_match))
203    extra_args.update({arg_maps.get(k): v for k, v in vars(args).items()
204                       if arg_maps.get(k) and v})
205    return extra_args
206
207
208def _get_regression_detection_args(args, results_dir):
209    """Get args for regression detection test runners.
210
211    Args:
212        args: parsed args object.
213        results_dir: string directory to store atest results.
214
215    Returns:
216        Dict of args for regression detection test runner to utilize.
217    """
218    regression_args = {}
219    pre_patch_folder = (os.path.join(results_dir, 'baseline-metrics') if args.generate_baseline
220                        else args.detect_regression.pop(0))
221    post_patch_folder = (os.path.join(results_dir, 'new-metrics') if args.generate_new_metrics
222                         else args.detect_regression.pop(0))
223    regression_args[constants.PRE_PATCH_FOLDER] = pre_patch_folder
224    regression_args[constants.POST_PATCH_FOLDER] = post_patch_folder
225    return regression_args
226
227
228def _validate_exec_mode(args, test_infos, host_tests=None):
229    """Validate all test execution modes are not in conflict.
230
231    Exit the program with error code if have device-only and host-only.
232    If no conflict and host side, add args.host=True.
233
234    Args:
235        args: parsed args object.
236        test_info: TestInfo object.
237        host_tests: True if all tests should be deviceless, False if all tests
238            should be device tests. Default is set to None, which means
239            tests can be either deviceless or device tests.
240    """
241    all_device_modes = [x.get_supported_exec_mode() for x in test_infos]
242    err_msg = None
243    # In the case of '$atest <device-only> --host', exit.
244    if (host_tests or args.host) and constants.DEVICE_TEST in all_device_modes:
245        err_msg = ('Test side and option(--host) conflict. Please remove '
246                   '--host if the test run on device side.')
247    # In the case of '$atest <host-only> <device-only> --host' or
248    # '$atest <host-only> <device-only>', exit.
249    if (constants.DEVICELESS_TEST in all_device_modes and
250            constants.DEVICE_TEST in all_device_modes):
251        err_msg = 'There are host-only and device-only tests in command.'
252    if host_tests is False and constants.DEVICELESS_TEST in all_device_modes:
253        err_msg = 'There are host-only tests in command.'
254    if err_msg:
255        logging.error(err_msg)
256        metrics_utils.send_exit_event(constants.EXIT_CODE_ERROR, logs=err_msg)
257        sys.exit(constants.EXIT_CODE_ERROR)
258    # In the case of '$atest <host-only>', we add --host to run on host-side.
259    # The option should only be overridden if `host_tests` is not set.
260    if not args.host and host_tests is None:
261        args.host = bool(constants.DEVICELESS_TEST in all_device_modes)
262
263
264def _validate_tm_tests_exec_mode(args, test_infos):
265    """Validate all test execution modes are not in conflict.
266
267    Split the tests in Test Mapping files into two groups, device tests and
268    deviceless tests running on host. Validate the tests' host setting.
269    For device tests, exit the program if any test is found for host-only.
270    For deviceless tests, exit the program if any test is found for device-only.
271
272    Args:
273        args: parsed args object.
274        test_info: TestInfo object.
275    """
276    device_test_infos, host_test_infos = _split_test_mapping_tests(
277        test_infos)
278    # No need to verify device tests if atest command is set to only run host
279    # tests.
280    if device_test_infos and not args.host:
281        _validate_exec_mode(args, device_test_infos, host_tests=False)
282    if host_test_infos:
283        _validate_exec_mode(args, host_test_infos, host_tests=True)
284
285
286def _will_run_tests(args):
287    """Determine if there are tests to run.
288
289    Currently only used by detect_regression to skip the test if just running
290    regression detection.
291
292    Args:
293        args: parsed args object.
294
295    Returns:
296        True if there are tests to run, false otherwise.
297    """
298    return not (args.detect_regression and len(args.detect_regression) == 2)
299
300
301# pylint: disable=no-else-return
302# This method is going to dispose, let's ignore pylint for now.
303def _has_valid_regression_detection_args(args):
304    """Validate regression detection args.
305
306    Args:
307        args: parsed args object.
308
309    Returns:
310        True if args are valid
311    """
312    if args.generate_baseline and args.generate_new_metrics:
313        logging.error('Cannot collect both baseline and new metrics'
314                      'at the same time.')
315        return False
316    if args.detect_regression is not None:
317        if not args.detect_regression:
318            logging.error('Need to specify at least 1 arg for'
319                          ' regression detection.')
320            return False
321        elif len(args.detect_regression) == 1:
322            if args.generate_baseline or args.generate_new_metrics:
323                return True
324            logging.error('Need to specify --generate-baseline or'
325                          ' --generate-new-metrics.')
326            return False
327        elif len(args.detect_regression) == 2:
328            if args.generate_baseline:
329                logging.error('Specified 2 metric paths and --generate-baseline'
330                              ', either drop --generate-baseline or drop a path')
331                return False
332            if args.generate_new_metrics:
333                logging.error('Specified 2 metric paths and --generate-new-metrics, '
334                              'either drop --generate-new-metrics or drop a path')
335                return False
336            return True
337        else:
338            logging.error('Specified more than 2 metric paths.')
339            return False
340    return True
341
342
343def _has_valid_test_mapping_args(args):
344    """Validate test mapping args.
345
346    Not all args work when running tests in TEST_MAPPING files. Validate the
347    args before running the tests.
348
349    Args:
350        args: parsed args object.
351
352    Returns:
353        True if args are valid
354    """
355    is_test_mapping = atest_utils.is_test_mapping(args)
356    if not is_test_mapping:
357        return True
358    options_to_validate = [
359        (args.generate_baseline, '--generate-baseline'),
360        (args.detect_regression, '--detect-regression'),
361        (args.generate_new_metrics, '--generate-new-metrics'),
362    ]
363    for arg_value, arg in options_to_validate:
364        if arg_value:
365            logging.error(OPTION_NOT_FOR_TEST_MAPPING, arg)
366            return False
367    return True
368
369
370def _validate_args(args):
371    """Validate setups and args.
372
373    Exit the program with error code if any setup or arg is invalid.
374
375    Args:
376        args: parsed args object.
377    """
378    if _missing_environment_variables():
379        sys.exit(constants.EXIT_CODE_ENV_NOT_SETUP)
380    if args.generate_baseline and args.generate_new_metrics:
381        logging.error(
382            'Cannot collect both baseline and new metrics at the same time.')
383        sys.exit(constants.EXIT_CODE_ERROR)
384    if not _has_valid_regression_detection_args(args):
385        sys.exit(constants.EXIT_CODE_ERROR)
386    if not _has_valid_test_mapping_args(args):
387        sys.exit(constants.EXIT_CODE_ERROR)
388
389
390def _print_module_info_from_module_name(mod_info, module_name):
391    """print out the related module_info for a module_name.
392
393    Args:
394        mod_info: ModuleInfo object.
395        module_name: A string of module.
396
397    Returns:
398        True if the module_info is found.
399    """
400    title_mapping = collections.OrderedDict()
401    title_mapping[constants.MODULE_COMPATIBILITY_SUITES] = 'Compatibility suite'
402    title_mapping[constants.MODULE_PATH] = 'Source code path'
403    title_mapping[constants.MODULE_INSTALLED] = 'Installed path'
404    target_module_info = mod_info.get_module_info(module_name)
405    is_module_found = False
406    if target_module_info:
407        atest_utils.colorful_print(module_name, constants.GREEN)
408        for title_key in title_mapping:
409            atest_utils.colorful_print("\t%s" % title_mapping[title_key],
410                                       constants.CYAN)
411            for info_value in target_module_info[title_key]:
412                print("\t\t{}".format(info_value))
413        is_module_found = True
414    return is_module_found
415
416
417def _print_test_info(mod_info, test_infos):
418    """Print the module information from TestInfos.
419
420    Args:
421        mod_info: ModuleInfo object.
422        test_infos: A list of TestInfos.
423
424    Returns:
425        Always return EXIT_CODE_SUCCESS
426    """
427    for test_info in test_infos:
428        _print_module_info_from_module_name(mod_info, test_info.test_name)
429        atest_utils.colorful_print("\tRelated build targets", constants.MAGENTA)
430        sorted_build_targets = sorted(list(test_info.build_targets))
431        print("\t\t{}".format(", ".join(sorted_build_targets)))
432        for build_target in sorted_build_targets:
433            if build_target != test_info.test_name:
434                _print_module_info_from_module_name(mod_info, build_target)
435        atest_utils.colorful_print("", constants.WHITE)
436    return constants.EXIT_CODE_SUCCESS
437
438
439def is_from_test_mapping(test_infos):
440    """Check that the test_infos came from TEST_MAPPING files.
441
442    Args:
443        test_infos: A set of TestInfos.
444
445    Returns:
446        True if the test infos are from TEST_MAPPING files.
447    """
448    return list(test_infos)[0].from_test_mapping
449
450
451def _split_test_mapping_tests(test_infos):
452    """Split Test Mapping tests into 2 groups: device tests and host tests.
453
454    Args:
455        test_infos: A set of TestInfos.
456
457    Returns:
458        A tuple of (device_test_infos, host_test_infos), where
459        device_test_infos: A set of TestInfos for tests that require device.
460        host_test_infos: A set of TestInfos for tests that do NOT require
461            device.
462    """
463    assert is_from_test_mapping(test_infos)
464    host_test_infos = {info for info in test_infos if info.host}
465    device_test_infos = {info for info in test_infos if not info.host}
466    return device_test_infos, host_test_infos
467
468
469# pylint: disable=too-many-locals
470def _run_test_mapping_tests(results_dir, test_infos, extra_args):
471    """Run all tests in TEST_MAPPING files.
472
473    Args:
474        results_dir: String directory to store atest results.
475        test_infos: A set of TestInfos.
476        extra_args: Dict of extra args to add to test run.
477
478    Returns:
479        Exit code.
480    """
481    device_test_infos, host_test_infos = _split_test_mapping_tests(test_infos)
482    # `host` option needs to be set to True to run host side tests.
483    host_extra_args = extra_args.copy()
484    host_extra_args[constants.HOST] = True
485    test_runs = [(host_test_infos, host_extra_args, HOST_TESTS)]
486    if extra_args.get(constants.HOST):
487        atest_utils.colorful_print(
488            'Option `--host` specified. Skip running device tests.',
489            constants.MAGENTA)
490    else:
491        test_runs.append((device_test_infos, extra_args, DEVICE_TESTS))
492
493    test_results = []
494    for tests, args, test_type in test_runs:
495        if not tests:
496            continue
497        header = RUN_HEADER_FMT % {TEST_COUNT: len(tests), TEST_TYPE: test_type}
498        atest_utils.colorful_print(header, constants.MAGENTA)
499        logging.debug('\n'.join([str(info) for info in tests]))
500        tests_exit_code, reporter = test_runner_handler.run_all_tests(
501            results_dir, tests, args, delay_print_summary=True)
502        atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
503        test_results.append((tests_exit_code, reporter, test_type))
504
505    all_tests_exit_code = constants.EXIT_CODE_SUCCESS
506    failed_tests = []
507    for tests_exit_code, reporter, test_type in test_results:
508        atest_utils.colorful_print(
509            RESULT_HEADER_FMT % {TEST_TYPE: test_type}, constants.MAGENTA)
510        result = tests_exit_code | reporter.print_summary()
511        if result:
512            failed_tests.append(test_type)
513        all_tests_exit_code |= result
514
515    # List failed tests at the end as a reminder.
516    if failed_tests:
517        atest_utils.colorful_print(
518            atest_utils.delimiter('=', 30, prenl=1), constants.YELLOW)
519        atest_utils.colorful_print(
520            '\nFollowing tests failed:', constants.MAGENTA)
521        for failure in failed_tests:
522            atest_utils.colorful_print(failure, constants.RED)
523
524    return all_tests_exit_code
525
526
527def _dry_run(results_dir, extra_args, test_infos):
528    """Only print the commands of the target tests rather than running them in actual.
529
530    Args:
531        results_dir: Path for saving atest logs.
532        extra_args: Dict of extra args for test runners to utilize.
533        test_infos: A list of TestInfos.
534
535    Returns:
536        A list of test commands.
537    """
538    all_run_cmds = []
539    for test_runner, tests in test_runner_handler.group_tests_by_test_runners(test_infos):
540        runner = test_runner(results_dir)
541        run_cmds = runner.generate_run_commands(tests, extra_args)
542        for run_cmd in run_cmds:
543            all_run_cmds.append(run_cmd)
544            print('Would run test via command: %s'
545                  % (atest_utils.colorize(run_cmd, constants.GREEN)))
546    return all_run_cmds
547
548def _print_testable_modules(mod_info, suite):
549    """Print the testable modules for a given suite.
550
551    Args:
552        mod_info: ModuleInfo object.
553        suite: A string of suite name.
554    """
555    testable_modules = mod_info.get_testable_modules(suite)
556    print('\n%s' % atest_utils.colorize('%s Testable %s modules' % (
557        len(testable_modules), suite), constants.CYAN))
558    print(atest_utils.delimiter('-'))
559    for module in sorted(testable_modules):
560        print('\t%s' % module)
561
562def _is_inside_android_root():
563    """Identify whether the cwd is inside of Android source tree.
564
565    Returns:
566        False if the cwd is outside of the source tree, True otherwise.
567    """
568    build_top = os.getenv(constants.ANDROID_BUILD_TOP, ' ')
569    return build_top in os.getcwd()
570
571def _non_action_validator(args):
572    """Method for non-action arguments such as --version, --help, --history,
573    --latest_result, etc.
574
575    Args:
576        args: An argspace.Namespace class instance holding parsed args.
577    """
578    if not _is_inside_android_root():
579        atest_utils.colorful_print(
580            "\nAtest must always work under ${}!".format(
581                constants.ANDROID_BUILD_TOP), constants.RED)
582        sys.exit(constants.EXIT_CODE_OUTSIDE_ROOT)
583    if args.version:
584        if os.path.isfile(constants.VERSION_FILE):
585            with open(constants.VERSION_FILE) as version_file:
586                print(version_file.read())
587        sys.exit(constants.EXIT_CODE_SUCCESS)
588    if args.help:
589        atest_arg_parser.print_epilog_text()
590        sys.exit(constants.EXIT_CODE_SUCCESS)
591    if args.history:
592        atest_execution_info.print_test_result(constants.ATEST_RESULT_ROOT,
593                                               args.history)
594        sys.exit(constants.EXIT_CODE_SUCCESS)
595    if args.latest_result:
596        atest_execution_info.print_test_result_by_path(
597            constants.LATEST_RESULT_FILE)
598        sys.exit(constants.EXIT_CODE_SUCCESS)
599    # TODO(b/131879842): remove below statement after they are fully removed.
600    if any((args.detect_regression,
601            args.generate_baseline,
602            args.generate_new_metrics)):
603        stop_msg = ('Please STOP using arguments below -- they are obsolete and '
604                    'will be removed in a very near future:\n'
605                    '\t--detect-regression\n'
606                    '\t--generate-baseline\n'
607                    '\t--generate-new-metrics\n')
608        msg = ('Please use below arguments instead:\n'
609               '\t--iterations\n'
610               '\t--rerun-until-failure\n'
611               '\t--retry-any-failure\n')
612        atest_utils.colorful_print(stop_msg, constants.RED)
613        atest_utils.colorful_print(msg, constants.CYAN)
614
615def _dry_run_validator(args, results_dir, extra_args, test_infos):
616    """Method which process --dry-run argument.
617
618    Args:
619        args: An argspace.Namespace class instance holding parsed args.
620        result_dir: A string path of the results dir.
621        extra_args: A dict of extra args for test runners to utilize.
622        test_infos: A list of test_info.
623    Returns:
624        Exit code.
625    """
626    args.tests.sort()
627    dry_run_cmds = _dry_run(results_dir, extra_args, test_infos)
628    if args.verify_cmd_mapping:
629        try:
630            atest_utils.handle_test_runner_cmd(' '.join(args.tests),
631                                               dry_run_cmds,
632                                               do_verification=True)
633        except atest_error.DryRunVerificationError as e:
634            atest_utils.colorful_print(str(e), constants.RED)
635            return constants.EXIT_CODE_VERIFY_FAILURE
636    if args.update_cmd_mapping:
637        atest_utils.handle_test_runner_cmd(' '.join(args.tests),
638                                           dry_run_cmds)
639    return constants.EXIT_CODE_SUCCESS
640
641def acloud_create_validator(results_dir, args):
642    """Check lunch'd target before running 'acloud create'.
643
644    Args:
645        results_dir: A string of the results directory.
646        args: A list of arguments.
647
648    Returns:
649        If the target is valid:
650            A tuple of (multiprocessing.Process,
651                        string of report file path,
652                        start time of acloud_create)
653        else:
654            None, None, None
655    """
656    if not any((args.acloud_create, args.start_avd)):
657        return None, None, None
658    if args.start_avd:
659        args.acloud_create = ['--num=1']
660    acloud_args = ' '.join(args.acloud_create)
661    target = os.getenv('TARGET_PRODUCT', "")
662    if 'cf_x86' in target:
663        start = time.time()
664        report_file = at.get_report_file(results_dir, acloud_args)
665        acloud_proc = _run_multi_proc(
666            func=ACLOUD_CREATE,
667            args=[report_file],
668            kwargs={'args':acloud_args,
669                    'no_metrics_notice':args.no_metrics})
670        return acloud_proc, report_file, start
671    atest_utils.colorful_print(
672        '{} is not cf_x86 family; will not create any AVD.'.format(target),
673        constants.RED)
674    return None, None, None
675
676# pylint: disable=too-many-statements
677# pylint: disable=too-many-branches
678# pylint: disable=too-many-return-statements
679def main(argv, results_dir, args):
680    """Entry point of atest script.
681
682    Args:
683        argv: A list of arguments.
684        results_dir: A directory which stores the ATest execution information.
685        args: An argspace.Namespace class instance holding parsed args.
686
687    Returns:
688        Exit code.
689    """
690    _configure_logging(args.verbose)
691    _validate_args(args)
692    metrics_utils.get_start_time()
693    os_pyver = '{}:{}'.format(platform.platform(), platform.python_version())
694    metrics.AtestStartEvent(
695        command_line=' '.join(argv),
696        test_references=args.tests,
697        cwd=os.getcwd(),
698        os=os_pyver)
699    _non_action_validator(args)
700    proc_acloud, report_file, acloud_start = acloud_create_validator(results_dir, args)
701    mod_info = module_info.ModuleInfo(force_build=args.rebuild_module_info)
702    if args.rebuild_module_info:
703        proc_idx = _run_multi_proc(INDEX_TARGETS)
704        proc_idx.join()
705    translator = cli_translator.CLITranslator(module_info=mod_info,
706                                              print_cache_msg=not args.clear_cache)
707    if args.list_modules:
708        _print_testable_modules(mod_info, args.list_modules)
709        return constants.EXIT_CODE_SUCCESS
710    # Clear cache if user pass -c option
711    if args.clear_cache:
712        atest_utils.clean_test_info_caches(args.tests)
713    build_targets = set()
714    test_infos = set()
715    if _will_run_tests(args):
716        find_start = time.time()
717        build_targets, test_infos = translator.translate(args)
718        find_duration = time.time() - find_start
719        if not test_infos:
720            return constants.EXIT_CODE_TEST_NOT_FOUND
721        if not is_from_test_mapping(test_infos):
722            _validate_exec_mode(args, test_infos)
723        else:
724            _validate_tm_tests_exec_mode(args, test_infos)
725    if args.info:
726        return _print_test_info(mod_info, test_infos)
727    build_targets |= test_runner_handler.get_test_runner_reqs(mod_info,
728                                                              test_infos)
729    extra_args = get_extra_args(args)
730    if any((args.update_cmd_mapping, args.verify_cmd_mapping, args.dry_run)):
731        return _dry_run_validator(args, results_dir, extra_args, test_infos)
732    if args.detect_regression:
733        build_targets |= (regression_test_runner.RegressionTestRunner('')
734                          .get_test_runner_build_reqs())
735    # args.steps will be None if none of -bit set, else list of params set.
736    steps = args.steps if args.steps else constants.ALL_STEPS
737    if build_targets and constants.BUILD_STEP in steps:
738        if constants.TEST_STEP in steps and not args.rebuild_module_info:
739            # Run extra tasks along with build step concurrently. Note that
740            # Atest won't index targets when only "-b" is given(without -t).
741            proc_idx = _run_multi_proc(INDEX_TARGETS, daemon=True)
742        # Add module-info.json target to the list of build targets to keep the
743        # file up to date.
744        build_targets.add(mod_info.module_info_target)
745        build_start = time.time()
746        success = atest_utils.build(build_targets, verbose=args.verbose)
747        build_duration = time.time() - build_start
748        metrics.BuildFinishEvent(
749            duration=metrics_utils.convert_duration(build_duration),
750            success=success,
751            targets=build_targets)
752        if not success:
753            return constants.EXIT_CODE_BUILD_FAILURE
754        if proc_acloud:
755            proc_acloud.join()
756            status = at.probe_acloud_status(report_file)
757            acloud_duration = time.time() - acloud_start
758            find_build_duration = find_duration + build_duration
759            if find_build_duration - acloud_duration >= 0:
760                # find+build took longer, saved acloud create time.
761                logging.debug('Saved acloud create time: %ss.',
762                              acloud_duration)
763                metrics.LocalDetectEvent(
764                    detect_type=constants.DETECT_TYPE_ACLOUD_CREATE,
765                    result=round(acloud_duration))
766            else:
767                # acloud create took longer, saved find+build time.
768                logging.debug('Saved Find and Build time: %ss.',
769                              find_build_duration)
770                metrics.LocalDetectEvent(
771                    detect_type=constants.DETECT_TYPE_FIND_BUILD,
772                    result=round(find_build_duration))
773            if status != 0:
774                return status
775    elif constants.TEST_STEP not in steps:
776        logging.warning('Install step without test step currently not '
777                        'supported, installing AND testing instead.')
778        steps.append(constants.TEST_STEP)
779    tests_exit_code = constants.EXIT_CODE_SUCCESS
780    test_start = time.time()
781    if constants.TEST_STEP in steps:
782        if not is_from_test_mapping(test_infos):
783            tests_exit_code, reporter = test_runner_handler.run_all_tests(
784                results_dir, test_infos, extra_args)
785            atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
786        else:
787            tests_exit_code = _run_test_mapping_tests(
788                results_dir, test_infos, extra_args)
789    if args.detect_regression:
790        regression_args = _get_regression_detection_args(args, results_dir)
791        # TODO(b/110485713): Should not call run_tests here.
792        reporter = result_reporter.ResultReporter(
793            collect_only=extra_args.get(constants.COLLECT_TESTS_ONLY))
794        atest_execution_info.AtestExecutionInfo.result_reporters.append(reporter)
795        tests_exit_code |= regression_test_runner.RegressionTestRunner(
796            '').run_tests(
797                None, regression_args, reporter)
798    metrics.RunTestsFinishEvent(
799        duration=metrics_utils.convert_duration(time.time() - test_start))
800    preparation_time = atest_execution_info.preparation_time(test_start)
801    if preparation_time:
802        # Send the preparation time only if it's set.
803        metrics.RunnerFinishEvent(
804            duration=metrics_utils.convert_duration(preparation_time),
805            success=True,
806            runner_name=constants.TF_PREPARATION,
807            test=[])
808    if tests_exit_code != constants.EXIT_CODE_SUCCESS:
809        tests_exit_code = constants.EXIT_CODE_TEST_FAILURE
810    return tests_exit_code
811
812if __name__ == '__main__':
813    RESULTS_DIR = make_test_run_dir()
814    ARGS = _parse_args(sys.argv[1:])
815    with atest_execution_info.AtestExecutionInfo(sys.argv[1:],
816                                                 RESULTS_DIR,
817                                                 ARGS) as result_file:
818        if not ARGS.no_metrics:
819            atest_utils.print_data_collection_notice()
820            USER_FROM_TOOL = os.getenv(constants.USER_FROM_TOOL, '')
821            if USER_FROM_TOOL == '':
822                metrics_base.MetricsBase.tool_name = constants.TOOL_NAME
823            else:
824                metrics_base.MetricsBase.tool_name = USER_FROM_TOOL
825
826        EXIT_CODE = main(sys.argv[1:], RESULTS_DIR, ARGS)
827        DETECTOR = bug_detector.BugDetector(sys.argv[1:], EXIT_CODE)
828        metrics.LocalDetectEvent(
829            detect_type=constants.DETECT_TYPE_BUG_DETECTED,
830            result=DETECTOR.caught_result)
831        if result_file:
832            print("Run 'atest --history' to review test result history.")
833    sys.exit(EXIT_CODE)
834