1# Copyright 2017, The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#     http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15"""
16Utility functions for atest.
17"""
18
19
20# pylint: disable=import-outside-toplevel
21
22from __future__ import print_function
23
24import fnmatch
25import hashlib
26import importlib
27import itertools
28import json
29import logging
30import os
31import pickle
32import re
33import shutil
34import subprocess
35import sys
36import zipfile
37
38import atest_decorator
39import atest_error
40import constants
41
42# This proto related module will be auto generated in build time.
43# pylint: disable=no-name-in-module
44# pylint: disable=import-error
45from tools.tradefederation.core.proto import test_record_pb2
46
47# b/147562331 only occurs when running atest in source code. We don't encourge
48# the users to manually "pip3 install protobuf", therefore when the exception
49# occurs, we don't collect data and the tab completion is for args is silence.
50try:
51    from metrics import metrics_base
52    from metrics import metrics_utils
53except ModuleNotFoundError:
54    # This exception occurs only when invoking atest in source code.
55    print("You shouldn't see this message unless you ran 'atest-src'."
56          "To resolve the issue, please run:\n\t{}\n"
57          "and try again.".format('pip3 install protobuf'))
58    sys.exit(constants.IMPORT_FAILURE)
59
60_BASH_RESET_CODE = '\033[0m\n'
61# Arbitrary number to limit stdout for failed runs in _run_limited_output.
62# Reason for its use is that the make command itself has its own carriage
63# return output mechanism that when collected line by line causes the streaming
64# full_output list to be extremely large.
65_FAILED_OUTPUT_LINE_LIMIT = 100
66# Regular expression to match the start of a ninja compile:
67# ex: [ 99% 39710/39711]
68_BUILD_COMPILE_STATUS = re.compile(r'\[\s*(\d{1,3}%\s+)?\d+/\d+\]')
69_BUILD_FAILURE = 'FAILED: '
70CMD_RESULT_PATH = os.path.join(os.environ.get(constants.ANDROID_BUILD_TOP,
71                                              os.getcwd()),
72                               'tools/asuite/atest/test_data',
73                               'test_commands.json')
74BUILD_TOP_HASH = hashlib.md5(os.environ.get(constants.ANDROID_BUILD_TOP, '').
75                             encode()).hexdigest()
76TEST_INFO_CACHE_ROOT = os.path.join(os.path.expanduser('~'), '.atest',
77                                    'info_cache', BUILD_TOP_HASH[:8])
78_DEFAULT_TERMINAL_WIDTH = 80
79_DEFAULT_TERMINAL_HEIGHT = 25
80_BUILD_CMD = 'build/soong/soong_ui.bash'
81_FIND_MODIFIED_FILES_CMDS = (
82    "cd {};"
83    "local_branch=$(git rev-parse --abbrev-ref HEAD);"
84    "remote_branch=$(git branch -r | grep '\\->' | awk '{{print $1}}');"
85    # Get the number of commits from local branch to remote branch.
86    "ahead=$(git rev-list --left-right --count $local_branch...$remote_branch "
87    "| awk '{{print $1}}');"
88    # Get the list of modified files from HEAD to previous $ahead generation.
89    "git diff HEAD~$ahead --name-only")
90
91
92def get_build_cmd():
93    """Compose build command with no-absolute path and flag "--make-mode".
94
95    Returns:
96        A list of soong build command.
97    """
98    make_cmd = ('%s/%s' %
99                (os.path.relpath(os.environ.get(
100                    constants.ANDROID_BUILD_TOP, os.getcwd()), os.getcwd()),
101                 _BUILD_CMD))
102    return [make_cmd, '--make-mode']
103
104
105def _capture_fail_section(full_log):
106    """Return the error message from the build output.
107
108    Args:
109        full_log: List of strings representing full output of build.
110
111    Returns:
112        capture_output: List of strings that are build errors.
113    """
114    am_capturing = False
115    capture_output = []
116    for line in full_log:
117        if am_capturing and _BUILD_COMPILE_STATUS.match(line):
118            break
119        if am_capturing or line.startswith(_BUILD_FAILURE):
120            capture_output.append(line)
121            am_capturing = True
122            continue
123    return capture_output
124
125
126def _run_limited_output(cmd, env_vars=None):
127    """Runs a given command and streams the output on a single line in stdout.
128
129    Args:
130        cmd: A list of strings representing the command to run.
131        env_vars: Optional arg. Dict of env vars to set during build.
132
133    Raises:
134        subprocess.CalledProcessError: When the command exits with a non-0
135            exitcode.
136    """
137    # Send stderr to stdout so we only have to deal with a single pipe.
138    proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
139                            stderr=subprocess.STDOUT, env=env_vars)
140    sys.stdout.write('\n')
141    term_width, _ = get_terminal_size()
142    white_space = " " * int(term_width)
143    full_output = []
144    while proc.poll() is None:
145        line = proc.stdout.readline().decode('utf-8')
146        # Readline will often return empty strings.
147        if not line:
148            continue
149        full_output.append(line)
150        # Trim the line to the width of the terminal.
151        # Note: Does not handle terminal resizing, which is probably not worth
152        #       checking the width every loop.
153        if len(line) >= term_width:
154            line = line[:term_width - 1]
155        # Clear the last line we outputted.
156        sys.stdout.write('\r%s\r' % white_space)
157        sys.stdout.write('%s' % line.strip())
158        sys.stdout.flush()
159    # Reset stdout (on bash) to remove any custom formatting and newline.
160    sys.stdout.write(_BASH_RESET_CODE)
161    sys.stdout.flush()
162    # Wait for the Popen to finish completely before checking the returncode.
163    proc.wait()
164    if proc.returncode != 0:
165        # Parse out the build error to output.
166        output = _capture_fail_section(full_output)
167        if not output:
168            output = full_output
169        if len(output) >= _FAILED_OUTPUT_LINE_LIMIT:
170            output = output[-_FAILED_OUTPUT_LINE_LIMIT:]
171        output = 'Output (may be trimmed):\n%s' % ''.join(output)
172        raise subprocess.CalledProcessError(proc.returncode, cmd, output)
173
174
175def build(build_targets, verbose=False, env_vars=None):
176    """Shell out and make build_targets.
177
178    Args:
179        build_targets: A set of strings of build targets to make.
180        verbose: Optional arg. If True output is streamed to the console.
181                 If False, only the last line of the build output is outputted.
182        env_vars: Optional arg. Dict of env vars to set during build.
183
184    Returns:
185        Boolean of whether build command was successful, True if nothing to
186        build.
187    """
188    if not build_targets:
189        logging.debug('No build targets, skipping build.')
190        return True
191    full_env_vars = os.environ.copy()
192    if env_vars:
193        full_env_vars.update(env_vars)
194    print('\n%s\n%s' % (colorize("Building Dependencies...", constants.CYAN),
195                        ', '.join(build_targets)))
196    logging.debug('Building Dependencies: %s', ' '.join(build_targets))
197    cmd = get_build_cmd() + list(build_targets)
198    logging.debug('Executing command: %s', cmd)
199    try:
200        if verbose:
201            subprocess.check_call(cmd, stderr=subprocess.STDOUT,
202                                  env=full_env_vars)
203        else:
204            # TODO: Save output to a log file.
205            _run_limited_output(cmd, env_vars=full_env_vars)
206        logging.info('Build successful')
207        return True
208    except subprocess.CalledProcessError as err:
209        logging.error('Error building: %s', build_targets)
210        if err.output:
211            logging.error(err.output)
212        return False
213
214
215def _can_upload_to_result_server():
216    """Return True if we can talk to result server."""
217    # TODO: Also check if we have a slow connection to result server.
218    if constants.RESULT_SERVER:
219        try:
220            from urllib.request import urlopen
221            urlopen(constants.RESULT_SERVER,
222                    timeout=constants.RESULT_SERVER_TIMEOUT).close()
223            return True
224        # pylint: disable=broad-except
225        except Exception as err:
226            logging.debug('Talking to result server raised exception: %s', err)
227    return False
228
229
230def get_result_server_args(for_test_mapping=False):
231    """Return list of args for communication with result server.
232
233    Args:
234        for_test_mapping: True if the test run is for Test Mapping to include
235            additional reporting args. Default is False.
236    """
237    # TODO (b/147644460) Temporarily disable Sponge V1 since it will be turned
238    # down.
239    if _can_upload_to_result_server():
240        if for_test_mapping:
241            return (constants.RESULT_SERVER_ARGS +
242                    constants.TEST_MAPPING_RESULT_SERVER_ARGS)
243        return constants.RESULT_SERVER_ARGS
244    return []
245
246
247def sort_and_group(iterable, key):
248    """Sort and group helper function."""
249    return itertools.groupby(sorted(iterable, key=key), key=key)
250
251
252def is_test_mapping(args):
253    """Check if the atest command intends to run tests in test mapping.
254
255    When atest runs tests in test mapping, it must have at most one test
256    specified. If a test is specified, it must be started with  `:`,
257    which means the test value is a test group name in TEST_MAPPING file, e.g.,
258    `:postsubmit`.
259
260    If any test mapping options is specified, the atest command must also be
261    set to run tests in test mapping files.
262
263    Args:
264        args: arg parsed object.
265
266    Returns:
267        True if the args indicates atest shall run tests in test mapping. False
268        otherwise.
269    """
270    return (
271        args.test_mapping or
272        args.include_subdirs or
273        not args.tests or
274        (len(args.tests) == 1 and args.tests[0][0] == ':'))
275
276@atest_decorator.static_var("cached_has_colors", {})
277def _has_colors(stream):
278    """Check the output stream is colorful.
279
280    Args:
281        stream: The standard file stream.
282
283    Returns:
284        True if the file stream can interpreter the ANSI color code.
285    """
286    cached_has_colors = _has_colors.cached_has_colors
287    if stream in cached_has_colors:
288        return cached_has_colors[stream]
289    cached_has_colors[stream] = True
290    # Following from Python cookbook, #475186
291    if not hasattr(stream, "isatty"):
292        cached_has_colors[stream] = False
293        return False
294    if not stream.isatty():
295        # Auto color only on TTYs
296        cached_has_colors[stream] = False
297        return False
298    try:
299        import curses
300        curses.setupterm()
301        cached_has_colors[stream] = curses.tigetnum("colors") > 2
302    # pylint: disable=broad-except
303    except Exception as err:
304        logging.debug('Checking colorful raised exception: %s', err)
305        cached_has_colors[stream] = False
306    return cached_has_colors[stream]
307
308
309def colorize(text, color, highlight=False):
310    """ Convert to colorful string with ANSI escape code.
311
312    Args:
313        text: A string to print.
314        color: ANSI code shift for colorful print. They are defined
315               in constants_default.py.
316        highlight: True to print with highlight.
317
318    Returns:
319        Colorful string with ANSI escape code.
320    """
321    clr_pref = '\033[1;'
322    clr_suff = '\033[0m'
323    has_colors = _has_colors(sys.stdout)
324    if has_colors:
325        if highlight:
326            ansi_shift = 40 + color
327        else:
328            ansi_shift = 30 + color
329        clr_str = "%s%dm%s%s" % (clr_pref, ansi_shift, text, clr_suff)
330    else:
331        clr_str = text
332    return clr_str
333
334
335def colorful_print(text, color, highlight=False, auto_wrap=True):
336    """Print out the text with color.
337
338    Args:
339        text: A string to print.
340        color: ANSI code shift for colorful print. They are defined
341               in constants_default.py.
342        highlight: True to print with highlight.
343        auto_wrap: If True, Text wraps while print.
344    """
345    output = colorize(text, color, highlight)
346    if auto_wrap:
347        print(output)
348    else:
349        print(output, end="")
350
351
352def get_terminal_size():
353    """Get terminal size and return a tuple.
354
355    Returns:
356        2 integers: the size of X(columns) and Y(lines/rows).
357    """
358    # Determine the width of the terminal. We'll need to clear this many
359    # characters when carriage returning. Set default value as 80.
360    columns, rows = shutil.get_terminal_size(
361        fallback=(_DEFAULT_TERMINAL_WIDTH,
362                  _DEFAULT_TERMINAL_HEIGHT))
363    return columns, rows
364
365
366def is_external_run():
367    # TODO(b/133905312): remove this function after aidegen calling
368    #       metrics_base.get_user_type directly.
369    """Check is external run or not.
370
371    Determine the internal user by passing at least one check:
372      - whose git mail domain is from google
373      - whose hostname is from google
374    Otherwise is external user.
375
376    Returns:
377        True if this is an external run, False otherwise.
378    """
379    return metrics_base.get_user_type() == metrics_base.EXTERNAL_USER
380
381
382def print_data_collection_notice():
383    """Print the data collection notice."""
384    anonymous = ''
385    user_type = 'INTERNAL'
386    if metrics_base.get_user_type() == metrics_base.EXTERNAL_USER:
387        anonymous = ' anonymous'
388        user_type = 'EXTERNAL'
389    notice = ('  We collect%s usage statistics in accordance with our Content '
390              'Licenses (%s), Contributor License Agreement (%s), Privacy '
391              'Policy (%s) and Terms of Service (%s).'
392             ) % (anonymous,
393                  constants.CONTENT_LICENSES_URL,
394                  constants.CONTRIBUTOR_AGREEMENT_URL[user_type],
395                  constants.PRIVACY_POLICY_URL,
396                  constants.TERMS_SERVICE_URL
397                 )
398    print(delimiter('=', 18, prenl=1))
399    colorful_print("Notice:", constants.RED)
400    colorful_print("%s" % notice, constants.GREEN)
401    print(delimiter('=', 18, postnl=1))
402
403
404def handle_test_runner_cmd(input_test, test_cmds, do_verification=False,
405                           result_path=CMD_RESULT_PATH):
406    """Handle the runner command of input tests.
407
408    Args:
409        input_test: A string of input tests pass to atest.
410        test_cmds: A list of strings for running input tests.
411        do_verification: A boolean to indicate the action of this method.
412                         True: Do verification without updating result map and
413                               raise DryRunVerificationError if verifying fails.
414                         False: Update result map, if the former command is
415                                different with current command, it will confirm
416                                with user if they want to update or not.
417        result_path: The file path for saving result.
418    """
419    full_result_content = {}
420    if os.path.isfile(result_path):
421        with open(result_path) as json_file:
422            full_result_content = json.load(json_file)
423    former_test_cmds = full_result_content.get(input_test, [])
424    test_cmds = _normalize(test_cmds)
425    former_test_cmds = _normalize(former_test_cmds)
426    if not _are_identical_cmds(test_cmds, former_test_cmds):
427        if do_verification:
428            raise atest_error.DryRunVerificationError(
429                'Dry run verification failed, former commands: {}'.format(
430                    former_test_cmds))
431        if former_test_cmds:
432            # If former_test_cmds is different from test_cmds, ask users if they
433            # are willing to update the result.
434            print('Former cmds = %s' % former_test_cmds)
435            print('Current cmds = %s' % test_cmds)
436            try:
437                from distutils import util
438                if not util.strtobool(
439                        input('Do you want to update former result '
440                              'with the latest one?(Y/n)')):
441                    print('SKIP updating result!!!')
442                    return
443            except ValueError:
444                # Default action is updating the command result of the
445                # input_test. If the user input is unrecognizable telling yes
446                # or no, "Y" is implicitly applied.
447                pass
448    else:
449        # If current commands are the same as the formers, no need to update
450        # result.
451        return
452    full_result_content[input_test] = test_cmds
453    with open(result_path, 'w') as outfile:
454        json.dump(full_result_content, outfile, indent=0)
455        print('Save result mapping to %s' % result_path)
456
457def _normalize(cmd_list):
458    """Method that normalize commands. Note that '--atest-log-file-path' is not
459    considered a critical argument, therefore, it will be removed during
460    the comparison. Also, atest can be ran in any place, so verifying relative
461    path, LD_LIBRARY_PATH, and --proto-output-file is regardless as well.
462
463    Args:
464        cmd_list: A list with one element. E.g. ['cmd arg1 arg2 True']
465
466    Returns:
467        A list with elements. E.g. ['cmd', 'arg1', 'arg2', 'True']
468    """
469    _cmd = ' '.join(cmd_list).split()
470    for cmd in _cmd:
471        if cmd.startswith('--atest-log-file-path'):
472            _cmd.remove(cmd)
473            continue
474        if cmd.startswith('LD_LIBRARY_PATH='):
475            _cmd.remove(cmd)
476            continue
477        if cmd.startswith('--proto-output-file='):
478            _cmd.remove(cmd)
479            continue
480        if _BUILD_CMD in cmd:
481            _cmd.remove(cmd)
482            _cmd.append(os.path.join('./', _BUILD_CMD))
483            continue
484    return _cmd
485
486def _are_identical_cmds(current_cmds, former_cmds):
487    """Tell two commands are identical.
488
489    Args:
490        current_cmds: A list of strings for running input tests.
491        former_cmds: A list of strings recorded from the previous run.
492
493    Returns:
494        True if both commands are identical, False otherwise.
495    """
496    # Always sort cmd list to make it comparable.
497    current_cmds.sort()
498    former_cmds.sort()
499    return current_cmds == former_cmds
500
501def _get_hashed_file_name(main_file_name):
502    """Convert the input string to a md5-hashed string. If file_extension is
503       given, returns $(hashed_string).$(file_extension), otherwise
504       $(hashed_string).cache.
505
506    Args:
507        main_file_name: The input string need to be hashed.
508
509    Returns:
510        A string as hashed file name with .cache file extension.
511    """
512    hashed_fn = hashlib.md5(str(main_file_name).encode())
513    hashed_name = hashed_fn.hexdigest()
514    return hashed_name + '.cache'
515
516def get_test_info_cache_path(test_reference, cache_root=TEST_INFO_CACHE_ROOT):
517    """Get the cache path of the desired test_infos.
518
519    Args:
520        test_reference: A string of the test.
521        cache_root: Folder path where stores caches.
522
523    Returns:
524        A string of the path of test_info cache.
525    """
526    return os.path.join(cache_root,
527                        _get_hashed_file_name(test_reference))
528
529def update_test_info_cache(test_reference, test_infos,
530                           cache_root=TEST_INFO_CACHE_ROOT):
531    """Update cache content which stores a set of test_info objects through
532       pickle module, each test_reference will be saved as a cache file.
533
534    Args:
535        test_reference: A string referencing a test.
536        test_infos: A set of TestInfos.
537        cache_root: Folder path for saving caches.
538    """
539    if not os.path.isdir(cache_root):
540        os.makedirs(cache_root)
541    cache_path = get_test_info_cache_path(test_reference, cache_root)
542    # Save test_info to files.
543    try:
544        with open(cache_path, 'wb') as test_info_cache_file:
545            logging.debug('Saving cache %s.', cache_path)
546            pickle.dump(test_infos, test_info_cache_file, protocol=2)
547    except (pickle.PicklingError, TypeError, IOError) as err:
548        # Won't break anything, just log this error, and collect the exception
549        # by metrics.
550        logging.debug('Exception raised: %s', err)
551        metrics_utils.handle_exc_and_send_exit_event(
552            constants.ACCESS_CACHE_FAILURE)
553
554
555def load_test_info_cache(test_reference, cache_root=TEST_INFO_CACHE_ROOT):
556    """Load cache by test_reference to a set of test_infos object.
557
558    Args:
559        test_reference: A string referencing a test.
560        cache_root: Folder path for finding caches.
561
562    Returns:
563        A list of TestInfo namedtuple if cache found, else None.
564    """
565    cache_file = get_test_info_cache_path(test_reference, cache_root)
566    if os.path.isfile(cache_file):
567        logging.debug('Loading cache %s.', cache_file)
568        try:
569            with open(cache_file, 'rb') as config_dictionary_file:
570                return pickle.load(config_dictionary_file, encoding='utf-8')
571        except (pickle.UnpicklingError,
572                ValueError,
573                TypeError,
574                EOFError,
575                IOError) as err:
576            # Won't break anything, just remove the old cache, log this error,
577            # and collect the exception by metrics.
578            logging.debug('Exception raised: %s', err)
579            os.remove(cache_file)
580            metrics_utils.handle_exc_and_send_exit_event(
581                constants.ACCESS_CACHE_FAILURE)
582    return None
583
584def clean_test_info_caches(tests, cache_root=TEST_INFO_CACHE_ROOT):
585    """Clean caches of input tests.
586
587    Args:
588        tests: A list of test references.
589        cache_root: Folder path for finding caches.
590    """
591    for test in tests:
592        cache_file = get_test_info_cache_path(test, cache_root)
593        if os.path.isfile(cache_file):
594            logging.debug('Removing cache: %s', cache_file)
595            try:
596                os.remove(cache_file)
597            except IOError as err:
598                logging.debug('Exception raised: %s', err)
599                metrics_utils.handle_exc_and_send_exit_event(
600                    constants.ACCESS_CACHE_FAILURE)
601
602def get_modified_files(root_dir):
603    """Get the git modified files. The git path here is git top level of
604    the root_dir. It's inevitable to utilise different commands to fulfill
605    2 scenario:
606        1. locate unstaged/staged files
607        2. locate committed files but not yet merged.
608    the 'git_status_cmd' fulfils the former while the 'find_modified_files'
609    fulfils the latter.
610
611    Args:
612        root_dir: the root where it starts finding.
613
614    Returns:
615        A set of modified files altered since last commit.
616    """
617    modified_files = set()
618    try:
619        find_git_cmd = 'cd {}; git rev-parse --show-toplevel'.format(root_dir)
620        git_paths = subprocess.check_output(
621            find_git_cmd, shell=True).decode().splitlines()
622        for git_path in git_paths:
623            # Find modified files from git working tree status.
624            git_status_cmd = ("repo forall {} -c git status --short | "
625                              "awk '{{print $NF}}'").format(git_path)
626            modified_wo_commit = subprocess.check_output(
627                git_status_cmd, shell=True).decode().rstrip().splitlines()
628            for change in modified_wo_commit:
629                modified_files.add(
630                    os.path.normpath('{}/{}'.format(git_path, change)))
631            # Find modified files that are committed but not yet merged.
632            find_modified_files = _FIND_MODIFIED_FILES_CMDS.format(git_path)
633            commit_modified_files = subprocess.check_output(
634                find_modified_files, shell=True).decode().splitlines()
635            for line in commit_modified_files:
636                modified_files.add(os.path.normpath('{}/{}'.format(
637                    git_path, line)))
638    except (OSError, subprocess.CalledProcessError) as err:
639        logging.debug('Exception raised: %s', err)
640    return modified_files
641
642def delimiter(char, length=_DEFAULT_TERMINAL_WIDTH, prenl=0, postnl=0):
643    """A handy delimiter printer.
644
645    Args:
646        char: A string used for delimiter.
647        length: An integer for the replication.
648        prenl: An integer that insert '\n' before delimiter.
649        postnl: An integer that insert '\n' after delimiter.
650
651    Returns:
652        A string of delimiter.
653    """
654    return prenl * '\n' + char * length + postnl * '\n'
655
656def find_files(path, file_name=constants.TEST_MAPPING):
657    """Find all files with given name under the given path.
658
659    Args:
660        path: A string of path in source.
661        file_name: The file name pattern for finding matched files.
662
663    Returns:
664        A list of paths of the files with the matching name under the given
665        path.
666    """
667    match_files = []
668    for root, _, filenames in os.walk(path):
669        for filename in fnmatch.filter(filenames, file_name):
670            match_files.append(os.path.join(root, filename))
671    return match_files
672
673def extract_zip_text(zip_path):
674    """Extract the text files content for input zip file.
675
676    Args:
677        zip_path: The file path of zip.
678
679    Returns:
680        The string in input zip file.
681    """
682    content = ''
683    try:
684        with zipfile.ZipFile(zip_path) as zip_file:
685            for filename in zip_file.namelist():
686                if os.path.isdir(filename):
687                    continue
688                # Force change line if multiple text files in zip
689                content = content + '\n'
690                # read the file
691                with zip_file.open(filename) as extract_file:
692                    for line in extract_file:
693                        if matched_tf_error_log(line.decode()):
694                            content = content + line.decode()
695    except zipfile.BadZipfile as err:
696        logging.debug('Exception raised: %s', err)
697    return content
698
699def matched_tf_error_log(content):
700    """Check if the input content matched tradefed log pattern.
701    The format will look like this.
702    05-25 17:37:04 W/XXXXXX
703    05-25 17:37:04 E/XXXXXX
704
705    Args:
706        content: Log string.
707
708    Returns:
709        True if the content matches the regular expression for tradefed error or
710        warning log.
711    """
712    reg = ('^((0[1-9])|(1[0-2]))-((0[1-9])|([12][0-9])|(3[0-1])) '
713           '(([0-1][0-9])|([2][0-3])):([0-5][0-9]):([0-5][0-9]) (E|W/)')
714    if re.search(reg, content):
715        return True
716    return False
717
718def has_valid_cert():
719    """Check whether the certificate is valid.
720
721    Returns: True if the cert is valid.
722    """
723    if not constants.CERT_STATUS_CMD:
724        return False
725    try:
726        return (not subprocess.check_call(constants.CERT_STATUS_CMD,
727                                          stdout=subprocess.DEVNULL,
728                                          stderr=subprocess.DEVNULL))
729    except subprocess.CalledProcessError:
730        return False
731
732def get_flakes(branch='',
733               target='',
734               test_name='',
735               test_module='',
736               test_method=''):
737    """Get flake information.
738
739    Args:
740        branch: A string of branch name.
741        target: A string of target.
742        test_name: A string of test suite name.
743        test_module: A string of test module.
744        test_method: A string of test method.
745
746    Returns:
747        A dictionary of flake info. None if no flakes service exists.
748    """
749    if not branch:
750        branch = constants.FLAKE_BRANCH
751    if not target:
752        target = constants.FLAKE_TARGET
753    if not test_name:
754        test_name = constants.FLAKE_TEST_NAME
755    # Currently lock the flake information from test-mapping test
756    # which only runs on cuttlefish(x86) devices.
757    # TODO: extend supporting other devices
758    if test_module:
759        test_module = 'x86 {}'.format(test_module)
760    flake_service = os.path.join(constants.FLAKE_SERVICE_PATH,
761                                 constants.FLAKE_FILE)
762    if not os.path.exists(flake_service):
763        logging.debug('Get flakes: Flake service path not exist.')
764        return None
765    if not has_valid_cert():
766        logging.debug('Get flakes: No valid cert.')
767        return None
768    flake_info = {}
769    try:
770        shutil.copy2(flake_service, constants.FLAKE_TMP_PATH)
771        tmp_service = os.path.join(constants.FLAKE_TMP_PATH,
772                                   constants.FLAKE_FILE)
773        os.chmod(tmp_service, 0o0755)
774        cmd = [tmp_service, branch, target, test_name, test_module, test_method]
775        logging.debug('Executing: %s', ' '.join(cmd))
776        output = subprocess.check_output(cmd).decode()
777        percent_template = "{}:".format(constants.FLAKE_PERCENT)
778        postsubmit_template = "{}:".format(constants.FLAKE_POSTSUBMIT)
779        for line in output.splitlines():
780            if line.startswith(percent_template):
781                flake_info[constants.FLAKE_PERCENT] = line.replace(
782                    percent_template, '')
783            if line.startswith(postsubmit_template):
784                flake_info[constants.FLAKE_POSTSUBMIT] = line.replace(
785                    postsubmit_template, '')
786    # pylint: disable=broad-except
787    except Exception as e:
788        logging.debug('Exception:%s', e)
789        return None
790    return flake_info
791
792def read_test_record(path):
793    """A Helper to read test record proto.
794
795    Args:
796        path: The proto file path.
797
798    Returns:
799        The test_record proto instance.
800    """
801    with open(path, 'rb') as proto_file:
802        msg = test_record_pb2.TestRecord()
803        msg.ParseFromString(proto_file.read())
804    return msg
805
806def has_python_module(module_name):
807    """Detect if the module can be loaded without importing it in real.
808
809    Args:
810        cmd: A string of the tested module name.
811
812    Returns:
813        True if found, False otherwise.
814    """
815    return bool(importlib.util.find_spec(module_name))
816