1#!/usr/bin/env python3 2# 3# [VPYTHON:BEGIN] 4# python_version: "3.8" 5# [VPYTHON:END] 6# 7# Copyright 2017, The Android Open Source Project 8# 9# Licensed under the Apache License, Version 2.0 (the "License"); 10# you may not use this file except in compliance with the License. 11# You may obtain a copy of the License at 12# 13# http://www.apache.org/licenses/LICENSE-2.0 14# 15# Unless required by applicable law or agreed to in writing, software 16# distributed under the License is distributed on an "AS IS" BASIS, 17# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 18# See the License for the specific language governing permissions and 19# limitations under the License. 20 21"""ART Run-Test TestRunner 22 23The testrunner runs the ART run-tests by simply invoking the script. 24It fetches the list of eligible tests from art/test directory, and list of 25disabled tests from art/test/knownfailures.json. It runs the tests by 26invoking art/test/run-test script and checks the exit value to decide if the 27test passed or failed. 28 29Before invoking the script, first build all the tests dependencies. 30There are two major build targets for building target and host tests 31dependencies: 321) test-art-host-run-test 332) test-art-target-run-test 34 35There are various options to invoke the script which are: 36-t: Either the test name as in art/test or the test name including the variant 37 information. Eg, "-t 001-HelloWorld", 38 "-t test-art-host-run-test-debug-prebuild-optimizing-relocate-ntrace-cms-checkjni-picimage-ndebuggable-001-HelloWorld32" 39-j: Number of thread workers to be used. Eg - "-j64" 40--dry-run: Instead of running the test name, just print its name. 41--verbose 42-b / --build-dependencies: to build the dependencies before running the test 43 44To specify any specific variants for the test, use --<<variant-name>>. 45For eg, for compiler type as optimizing, use --optimizing. 46 47 48In the end, the script will print the failed and skipped tests if any. 49 50""" 51import argparse 52import collections 53 54# b/140161314 diagnostics. 55try: 56 import concurrent.futures 57except Exception: 58 import sys 59 sys.stdout.write("\n\n" + sys.executable + " " + sys.version + "\n\n") 60 sys.stdout.flush() 61 raise 62 63import contextlib 64import csv 65import datetime 66import fnmatch 67import itertools 68import json 69import multiprocessing 70import os 71import re 72import shlex 73import shutil 74import signal 75import subprocess 76import sys 77import tempfile 78import time 79 80import env 81from target_config import target_config 82from device_config import device_config 83 84# TODO: make it adjustable per tests and for buildbots 85# 86# Note: this needs to be larger than run-test timeouts, as long as this script 87# does not push the value to run-test. run-test is somewhat complicated: 88# base: 25m (large for ASAN) 89# + timeout handling: 2m 90# + gcstress extra: 20m 91# ----------------------- 92# 47m 93timeout = 3600 # 60 minutes 94 95# DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map 96# that has key as the test name (like 001-HelloWorld), and value as set of 97# variants that the test is disabled for. 98DISABLED_TEST_CONTAINER = {} 99 100# The Dict contains the list of all possible variants for a given type. For example, 101# for key TARGET, the value would be target and host. The list is used to parse 102# the test name given as the argument to run. 103VARIANT_TYPE_DICT = {} 104 105# The set of all variant sets that are incompatible and will always be skipped. 106NONFUNCTIONAL_VARIANT_SETS = set() 107 108# The set contains all the variants of each time. 109TOTAL_VARIANTS_SET = set() 110 111# The colors are used in the output. When a test passes, COLOR_PASS is used, 112# and so on. 113COLOR_ERROR = '\033[91m' 114COLOR_PASS = '\033[92m' 115COLOR_SKIP = '\033[93m' 116COLOR_NORMAL = '\033[0m' 117 118# The set contains the list of all the possible run tests that are in art/test 119# directory. 120RUN_TEST_SET = set() 121 122failed_tests = [] 123skipped_tests = [] 124 125# Flags 126n_thread = -1 127total_test_count = 0 128verbose = False 129dry_run = False 130ignore_skips = False 131build = False 132dist = False 133gdb = False 134gdb_arg = '' 135csv_result = None 136csv_writer = None 137runtime_option = '' 138with_agent = [] 139zipapex_loc = None 140run_test_option = [] 141dex2oat_jobs = -1 # -1 corresponds to default threads for dex2oat 142run_all_configs = False 143 144# Dict containing extra arguments 145extra_arguments = { "host" : [], "target" : [] } 146 147# Dict to store user requested test variants. 148# key: variant_type. 149# value: set of variants user wants to run of type <key>. 150_user_input_variants = collections.defaultdict(set) 151 152def setup_csv_result(): 153 """Set up the CSV output if required.""" 154 global csv_writer 155 csv_writer = csv.writer(csv_result) 156 # Write the header. 157 csv_writer.writerow(['target', 'run', 'prebuild', 'compiler', 'relocate', 'trace', 'gc', 158 'jni', 'image', 'debuggable', 'jvmti', 'cdex_level', 'test', 'address_size', 'result']) 159 160 161def send_csv_result(test, result): 162 """ 163 Write a line into the CSV results file if one is available. 164 """ 165 if csv_writer is not None: 166 csv_writer.writerow(extract_test_name(test) + [result]) 167 168def close_csv_file(): 169 global csv_result 170 global csv_writer 171 if csv_result is not None: 172 csv_writer = None 173 csv_result.flush() 174 csv_result.close() 175 csv_result = None 176 177def gather_test_info(): 178 """The method gathers test information about the test to be run which includes 179 generating the list of total tests from the art/test directory and the list 180 of disabled test. It also maps various variants to types. 181 """ 182 global TOTAL_VARIANTS_SET 183 # TODO: Avoid duplication of the variant names in different lists. 184 VARIANT_TYPE_DICT['run'] = {'ndebug', 'debug'} 185 VARIANT_TYPE_DICT['target'] = {'target', 'host', 'jvm'} 186 VARIANT_TYPE_DICT['trace'] = {'trace', 'ntrace', 'stream'} 187 VARIANT_TYPE_DICT['image'] = {'picimage', 'no-image'} 188 VARIANT_TYPE_DICT['debuggable'] = {'ndebuggable', 'debuggable'} 189 VARIANT_TYPE_DICT['gc'] = {'gcstress', 'gcverify', 'cms'} 190 VARIANT_TYPE_DICT['prebuild'] = {'no-prebuild', 'prebuild'} 191 VARIANT_TYPE_DICT['cdex_level'] = {'cdex-none', 'cdex-fast'} 192 VARIANT_TYPE_DICT['relocate'] = {'relocate', 'no-relocate'} 193 VARIANT_TYPE_DICT['jni'] = {'jni', 'forcecopy', 'checkjni'} 194 VARIANT_TYPE_DICT['address_sizes'] = {'64', '32'} 195 VARIANT_TYPE_DICT['jvmti'] = {'no-jvmti', 'jvmti-stress', 'redefine-stress', 'trace-stress', 196 'field-stress', 'step-stress'} 197 VARIANT_TYPE_DICT['compiler'] = {'interp-ac', 'interpreter', 'jit', 'jit-on-first-use', 198 'optimizing', 'regalloc_gc', 199 'speed-profile', 'baseline'} 200 201 # Regalloc_GC cannot work with prebuild. 202 NONFUNCTIONAL_VARIANT_SETS.add(frozenset({'regalloc_gc', 'prebuild'})) 203 204 for v_type in VARIANT_TYPE_DICT: 205 TOTAL_VARIANTS_SET = TOTAL_VARIANTS_SET.union(VARIANT_TYPE_DICT.get(v_type)) 206 207 test_dir = env.ANDROID_BUILD_TOP + '/art/test' 208 for f in os.listdir(test_dir): 209 if fnmatch.fnmatch(f, '[0-9]*'): 210 RUN_TEST_SET.add(f) 211 212 213def setup_test_env(): 214 """The method sets default value for the various variants of the tests if they 215 are already not set. 216 """ 217 if env.ART_TEST_BISECTION: 218 env.ART_TEST_RUN_TEST_NO_PREBUILD = True 219 env.ART_TEST_RUN_TEST_PREBUILD = False 220 # Bisection search writes to standard output. 221 env.ART_TEST_QUIET = False 222 223 global _user_input_variants 224 global run_all_configs 225 # These are the default variant-options we will use if nothing in the group is specified. 226 default_variants = { 227 'target': {'host', 'target'}, 228 'prebuild': {'prebuild'}, 229 'cdex_level': {'cdex-fast'}, 230 'jvmti': { 'no-jvmti'}, 231 'compiler': {'optimizing', 232 'jit', 233 'interpreter', 234 'interp-ac', 235 'speed-profile'}, 236 'relocate': {'no-relocate'}, 237 'trace': {'ntrace'}, 238 'gc': {'cms'}, 239 'jni': {'checkjni'}, 240 'image': {'picimage'}, 241 'debuggable': {'ndebuggable'}, 242 'run': {'debug'}, 243 # address_sizes_target depends on the target so it is dealt with below. 244 } 245 # We want to pull these early since the full VARIANT_TYPE_DICT has a few additional ones we don't 246 # want to pick up if we pass --all. 247 default_variants_keys = default_variants.keys() 248 if run_all_configs: 249 default_variants = VARIANT_TYPE_DICT 250 251 for key in default_variants_keys: 252 if not _user_input_variants[key]: 253 _user_input_variants[key] = default_variants[key] 254 255 _user_input_variants['address_sizes_target'] = collections.defaultdict(set) 256 if not _user_input_variants['address_sizes']: 257 _user_input_variants['address_sizes_target']['target'].add( 258 env.ART_PHONY_TEST_TARGET_SUFFIX) 259 _user_input_variants['address_sizes_target']['host'].add( 260 env.ART_PHONY_TEST_HOST_SUFFIX) 261 if env.ART_TEST_RUN_TEST_2ND_ARCH: 262 _user_input_variants['address_sizes_target']['host'].add( 263 env.ART_2ND_PHONY_TEST_HOST_SUFFIX) 264 _user_input_variants['address_sizes_target']['target'].add( 265 env.ART_2ND_PHONY_TEST_TARGET_SUFFIX) 266 else: 267 _user_input_variants['address_sizes_target']['host'] = _user_input_variants['address_sizes'] 268 _user_input_variants['address_sizes_target']['target'] = _user_input_variants['address_sizes'] 269 270 global n_thread 271 if n_thread == -1: 272 if 'target' in _user_input_variants['target']: 273 n_thread = get_default_threads('target') 274 else: 275 n_thread = get_default_threads('host') 276 print_text("Concurrency: " + str(n_thread) + "\n") 277 278 global extra_arguments 279 for target in _user_input_variants['target']: 280 extra_arguments[target] = find_extra_device_arguments(target) 281 282 if not sys.stdout.isatty(): 283 global COLOR_ERROR 284 global COLOR_PASS 285 global COLOR_SKIP 286 global COLOR_NORMAL 287 COLOR_ERROR = '' 288 COLOR_PASS = '' 289 COLOR_SKIP = '' 290 COLOR_NORMAL = '' 291 292def find_extra_device_arguments(target): 293 """ 294 Gets any extra arguments from the device_config. 295 """ 296 device_name = target 297 if target == 'target': 298 device_name = get_device_name() 299 return device_config.get(device_name, { 'run-test-args' : [] })['run-test-args'] 300 301def get_device_name(): 302 """ 303 Gets the value of ro.product.name from remote device. 304 """ 305 proc = subprocess.Popen(['adb', 'shell', 'getprop', 'ro.product.name'], 306 stderr=subprocess.STDOUT, 307 stdout = subprocess.PIPE, 308 universal_newlines=True) 309 # only wait 2 seconds. 310 output = proc.communicate(timeout = 2)[0] 311 success = not proc.wait() 312 if success: 313 return output.strip() 314 else: 315 print_text("Unable to determine device type!\n") 316 print_text("Continuing anyway.\n") 317 return "UNKNOWN_TARGET" 318 319def run_tests(tests): 320 """This method generates variants of the tests to be run and executes them. 321 322 Args: 323 tests: The set of tests to be run. 324 """ 325 options_all = '' 326 327 # jvm does not run with all these combinations, 328 # or at least it doesn't make sense for most of them. 329 # TODO: support some jvm variants like jvmti ? 330 target_input_variants = _user_input_variants['target'] 331 uncombinated_target_input_variants = [] 332 if 'jvm' in target_input_variants: 333 _user_input_variants['target'].remove('jvm') 334 uncombinated_target_input_variants.append('jvm') 335 336 global total_test_count 337 total_test_count = len(tests) 338 if target_input_variants: 339 for variant_type in VARIANT_TYPE_DICT: 340 if not (variant_type == 'target' or 'address_sizes' in variant_type): 341 total_test_count *= len(_user_input_variants[variant_type]) 342 target_address_combinations = 0 343 for target in target_input_variants: 344 for address_size in _user_input_variants['address_sizes_target'][target]: 345 target_address_combinations += 1 346 target_address_combinations += len(uncombinated_target_input_variants) 347 total_test_count *= target_address_combinations 348 349 if env.ART_TEST_WITH_STRACE: 350 options_all += ' --strace' 351 352 if env.ART_TEST_RUN_TEST_ALWAYS_CLEAN: 353 options_all += ' --always-clean' 354 355 if env.ART_TEST_BISECTION: 356 options_all += ' --bisection-search' 357 358 if gdb: 359 options_all += ' --gdb' 360 if gdb_arg: 361 options_all += ' --gdb-arg ' + gdb_arg 362 363 options_all += ' ' + ' '.join(run_test_option) 364 365 if runtime_option: 366 for opt in runtime_option: 367 options_all += ' --runtime-option ' + opt 368 if with_agent: 369 for opt in with_agent: 370 options_all += ' --with-agent ' + opt 371 372 if dex2oat_jobs != -1: 373 options_all += ' --dex2oat-jobs ' + str(dex2oat_jobs) 374 375 def iter_config(tests, input_variants, user_input_variants): 376 config = itertools.product(tests, input_variants, user_input_variants['run'], 377 user_input_variants['prebuild'], user_input_variants['compiler'], 378 user_input_variants['relocate'], user_input_variants['trace'], 379 user_input_variants['gc'], user_input_variants['jni'], 380 user_input_variants['image'], 381 user_input_variants['debuggable'], user_input_variants['jvmti'], 382 user_input_variants['cdex_level']) 383 return config 384 385 # [--host, --target] combines with all the other user input variants. 386 config = iter_config(tests, target_input_variants, _user_input_variants) 387 # [--jvm] currently combines with nothing else. most of the extra flags we'd insert 388 # would be unrecognizable by the 'java' binary, so avoid inserting any extra flags for now. 389 uncombinated_config = iter_config(tests, uncombinated_target_input_variants, { 'run': [''], 390 'prebuild': [''], 'compiler': [''], 391 'relocate': [''], 'trace': [''], 392 'gc': [''], 'jni': [''], 393 'image': [''], 394 'debuggable': [''], 'jvmti': [''], 395 'cdex_level': ['']}) 396 397 def start_combination(executor, config_tuple, global_options, address_size): 398 test, target, run, prebuild, compiler, relocate, trace, gc, \ 399 jni, image, debuggable, jvmti, cdex_level = config_tuple 400 401 # NB The order of components here should match the order of 402 # components in the regex parser in parse_test_name. 403 test_name = 'test-art-' 404 test_name += target + '-run-test-' 405 test_name += run + '-' 406 test_name += prebuild + '-' 407 test_name += compiler + '-' 408 test_name += relocate + '-' 409 test_name += trace + '-' 410 test_name += gc + '-' 411 test_name += jni + '-' 412 test_name += image + '-' 413 test_name += debuggable + '-' 414 test_name += jvmti + '-' 415 test_name += cdex_level + '-' 416 test_name += test 417 test_name += address_size 418 419 variant_set = {target, run, prebuild, compiler, relocate, trace, gc, jni, 420 image, debuggable, jvmti, cdex_level, address_size} 421 422 options_test = global_options 423 424 if target == 'host': 425 options_test += ' --host' 426 elif target == 'jvm': 427 options_test += ' --jvm' 428 429 # Honor ART_TEST_CHROOT, ART_TEST_ANDROID_ROOT, ART_TEST_ANDROID_ART_ROOT, 430 # ART_TEST_ANDROID_I18N_ROOT, and ART_TEST_ANDROID_TZDATA_ROOT but only 431 # for target tests. 432 if target == 'target': 433 if env.ART_TEST_CHROOT: 434 options_test += ' --chroot ' + env.ART_TEST_CHROOT 435 if env.ART_TEST_ANDROID_ROOT: 436 options_test += ' --android-root ' + env.ART_TEST_ANDROID_ROOT 437 if env.ART_TEST_ANDROID_I18N_ROOT: 438 options_test += ' --android-i18n-root ' + env.ART_TEST_ANDROID_I18N_ROOT 439 if env.ART_TEST_ANDROID_ART_ROOT: 440 options_test += ' --android-art-root ' + env.ART_TEST_ANDROID_ART_ROOT 441 if env.ART_TEST_ANDROID_TZDATA_ROOT: 442 options_test += ' --android-tzdata-root ' + env.ART_TEST_ANDROID_TZDATA_ROOT 443 444 if run == 'ndebug': 445 options_test += ' -O' 446 447 if prebuild == 'prebuild': 448 options_test += ' --prebuild' 449 elif prebuild == 'no-prebuild': 450 options_test += ' --no-prebuild' 451 452 if cdex_level: 453 # Add option and remove the cdex- prefix. 454 options_test += ' --compact-dex-level ' + cdex_level.replace('cdex-','') 455 456 if compiler == 'optimizing': 457 options_test += ' --optimizing' 458 elif compiler == 'regalloc_gc': 459 options_test += ' --optimizing -Xcompiler-option --register-allocation-strategy=graph-color' 460 elif compiler == 'interpreter': 461 options_test += ' --interpreter' 462 elif compiler == 'interp-ac': 463 options_test += ' --interpreter --verify-soft-fail' 464 elif compiler == 'jit': 465 options_test += ' --jit' 466 elif compiler == 'jit-on-first-use': 467 options_test += ' --jit --runtime-option -Xjitthreshold:0' 468 elif compiler == 'speed-profile': 469 options_test += ' --random-profile' 470 elif compiler == 'baseline': 471 options_test += ' --baseline' 472 473 if relocate == 'relocate': 474 options_test += ' --relocate' 475 elif relocate == 'no-relocate': 476 options_test += ' --no-relocate' 477 478 if trace == 'trace': 479 options_test += ' --trace' 480 elif trace == 'stream': 481 options_test += ' --trace --stream' 482 483 if gc == 'gcverify': 484 options_test += ' --gcverify' 485 elif gc == 'gcstress': 486 options_test += ' --gcstress' 487 488 if jni == 'forcecopy': 489 options_test += ' --runtime-option -Xjniopts:forcecopy' 490 elif jni == 'checkjni': 491 options_test += ' --runtime-option -Xcheck:jni' 492 493 if image == 'no-image': 494 options_test += ' --no-image' 495 496 if debuggable == 'debuggable': 497 options_test += ' --debuggable --runtime-option -Xopaque-jni-ids:true' 498 499 if jvmti == 'jvmti-stress': 500 options_test += ' --jvmti-trace-stress --jvmti-redefine-stress --jvmti-field-stress' 501 elif jvmti == 'field-stress': 502 options_test += ' --jvmti-field-stress' 503 elif jvmti == 'trace-stress': 504 options_test += ' --jvmti-trace-stress' 505 elif jvmti == 'redefine-stress': 506 options_test += ' --jvmti-redefine-stress' 507 elif jvmti == 'step-stress': 508 options_test += ' --jvmti-step-stress' 509 510 if address_size == '64': 511 options_test += ' --64' 512 513 # TODO(http://36039166): This is a temporary solution to 514 # fix build breakages. 515 options_test = (' --output-path %s') % ( 516 tempfile.mkdtemp(dir=env.ART_HOST_TEST_DIR)) + options_test 517 518 run_test_sh = env.ANDROID_BUILD_TOP + '/art/test/run-test' 519 command = ' '.join((run_test_sh, options_test, ' '.join(extra_arguments[target]), test)) 520 return executor.submit(run_test, command, test, variant_set, test_name) 521 522 # Use a context-manager to handle cleaning up the extracted zipapex if needed. 523 with handle_zipapex(zipapex_loc) as zipapex_opt: 524 options_all += zipapex_opt 525 global n_thread 526 with concurrent.futures.ThreadPoolExecutor(max_workers=n_thread) as executor: 527 test_futures = [] 528 for config_tuple in config: 529 target = config_tuple[1] 530 for address_size in _user_input_variants['address_sizes_target'][target]: 531 test_futures.append(start_combination(executor, config_tuple, options_all, address_size)) 532 533 for config_tuple in uncombinated_config: 534 test_futures.append( 535 start_combination(executor, config_tuple, options_all, "")) # no address size 536 537 tests_done = 0 538 for test_future in concurrent.futures.as_completed(test_futures): 539 (test, status, failure_info, test_time) = test_future.result() 540 tests_done += 1 541 print_test_info(tests_done, test, status, failure_info, test_time) 542 if failure_info and not env.ART_TEST_KEEP_GOING: 543 for f in test_futures: 544 f.cancel() 545 break 546 executor.shutdown(True) 547 548@contextlib.contextmanager 549def handle_zipapex(ziploc): 550 """Extracts the zipapex (if present) and handles cleanup. 551 552 If we are running out of a zipapex we want to unzip it once and have all the tests use the same 553 extracted contents. This extracts the files and handles cleanup if needed. It returns the 554 required extra arguments to pass to the run-test. 555 """ 556 if ziploc is not None: 557 with tempfile.TemporaryDirectory() as tmpdir: 558 subprocess.check_call(["unzip", "-qq", ziploc, "apex_payload.zip", "-d", tmpdir]) 559 subprocess.check_call( 560 ["unzip", "-qq", os.path.join(tmpdir, "apex_payload.zip"), "-d", tmpdir]) 561 yield " --runtime-extracted-zipapex " + tmpdir 562 else: 563 yield "" 564 565def _popen(**kwargs): 566 if sys.version_info.major == 3 and sys.version_info.minor >= 6: 567 return subprocess.Popen(encoding=sys.stdout.encoding, **kwargs) 568 return subprocess.Popen(**kwargs) 569 570def run_test(command, test, test_variant, test_name): 571 """Runs the test. 572 573 It invokes art/test/run-test script to run the test. The output of the script 574 is checked, and if it ends with "Succeeded!", it assumes that the tests 575 passed, otherwise, put it in the list of failed test. Before actually running 576 the test, it also checks if the test is placed in the list of disabled tests, 577 and if yes, it skips running it, and adds the test in the list of skipped 578 tests. 579 580 Args: 581 command: The command to be used to invoke the script 582 test: The name of the test without the variant information. 583 test_variant: The set of variant for the test. 584 test_name: The name of the test along with the variants. 585 586 Returns: a tuple of testname, status, optional failure info, and test time. 587 """ 588 try: 589 if is_test_disabled(test, test_variant): 590 test_skipped = True 591 test_time = datetime.timedelta() 592 else: 593 test_skipped = False 594 test_start_time = time.monotonic() 595 if verbose: 596 print_text("Starting %s at %s\n" % (test_name, test_start_time)) 597 if gdb: 598 proc = _popen( 599 args=command.split(), 600 stderr=subprocess.STDOUT, 601 universal_newlines=True, 602 start_new_session=True 603 ) 604 else: 605 proc = _popen( 606 args=command.split(), 607 stderr=subprocess.STDOUT, 608 stdout = subprocess.PIPE, 609 universal_newlines=True, 610 start_new_session=True, 611 ) 612 script_output = proc.communicate(timeout=timeout)[0] 613 test_passed = not proc.wait() 614 test_time_seconds = time.monotonic() - test_start_time 615 test_time = datetime.timedelta(seconds=test_time_seconds) 616 617 if not test_skipped: 618 if test_passed: 619 return (test_name, 'PASS', None, test_time) 620 else: 621 failed_tests.append((test_name, str(command) + "\n" + script_output)) 622 return (test_name, 'FAIL', ('%s\n%s') % (command, script_output), test_time) 623 elif not dry_run: 624 skipped_tests.append(test_name) 625 return (test_name, 'SKIP', None, test_time) 626 else: 627 return (test_name, 'PASS', None, test_time) 628 except subprocess.TimeoutExpired as e: 629 if verbose: 630 print_text("Timeout of %s at %s\n" % (test_name, time.monotonic())) 631 test_time_seconds = time.monotonic() - test_start_time 632 test_time = datetime.timedelta(seconds=test_time_seconds) 633 failed_tests.append((test_name, 'Timed out in %d seconds' % timeout)) 634 635 # HACK(b/142039427): Print extra backtraces on timeout. 636 if "-target-" in test_name: 637 for i in range(8): 638 proc_name = "dalvikvm" + test_name[-2:] 639 pidof = subprocess.run(["adb", "shell", "pidof", proc_name], stdout=subprocess.PIPE) 640 for pid in pidof.stdout.decode("ascii").split(): 641 if i >= 4: 642 print_text("Backtrace of %s at %s\n" % (pid, time.monotonic())) 643 subprocess.run(["adb", "shell", "debuggerd", pid]) 644 time.sleep(10) 645 task_dir = "/proc/%s/task" % pid 646 tids = subprocess.run(["adb", "shell", "ls", task_dir], stdout=subprocess.PIPE) 647 for tid in tids.stdout.decode("ascii").split(): 648 for status in ["stat", "status"]: 649 filename = "%s/%s/%s" % (task_dir, tid, status) 650 print_text("Content of %s\n" % (filename)) 651 subprocess.run(["adb", "shell", "cat", filename]) 652 time.sleep(60) 653 654 # The python documentation states that it is necessary to actually kill the process. 655 os.killpg(proc.pid, signal.SIGKILL) 656 script_output = proc.communicate() 657 658 return (test_name, 'TIMEOUT', 'Timed out in %d seconds\n%s' % (timeout, command), test_time) 659 except Exception as e: 660 failed_tests.append((test_name, str(e))) 661 return (test_name, 'FAIL', ('%s\n%s\n\n') % (command, str(e)), datetime.timedelta()) 662 663def print_test_info(test_count, test_name, result, failed_test_info="", 664 test_time=datetime.timedelta()): 665 """Print the continous test information 666 667 If verbose is set to True, it continuously prints test status information 668 on a new line. 669 If verbose is set to False, it keeps on erasing test 670 information by overriding it with the latest test information. Also, 671 in this case it stictly makes sure that the information length doesn't 672 exceed the console width. It does so by shortening the test_name. 673 674 When a test fails, it prints the output of the run-test script and 675 command used to invoke the script. It doesn't override the failing 676 test information in either of the cases. 677 """ 678 679 info = '' 680 if not verbose: 681 # Without --verbose, the testrunner erases passing test info. It 682 # does that by overriding the printed text with white spaces all across 683 # the console width. 684 console_width = int(os.popen('stty size', 'r').read().split()[1]) 685 info = '\r' + ' ' * console_width + '\r' 686 try: 687 percent = (test_count * 100) / total_test_count 688 progress_info = ('[ %d%% %d/%d ]') % ( 689 percent, 690 test_count, 691 total_test_count) 692 if test_time.total_seconds() != 0 and verbose: 693 info += '(%s)' % str(test_time) 694 695 696 if result == 'FAIL' or result == 'TIMEOUT': 697 if not verbose: 698 info += ('%s %s %s\n') % ( 699 progress_info, 700 test_name, 701 COLOR_ERROR + result + COLOR_NORMAL) 702 else: 703 info += ('%s %s %s\n%s\n') % ( 704 progress_info, 705 test_name, 706 COLOR_ERROR + result + COLOR_NORMAL, 707 failed_test_info) 708 else: 709 result_text = '' 710 if result == 'PASS': 711 result_text += COLOR_PASS + 'PASS' + COLOR_NORMAL 712 elif result == 'SKIP': 713 result_text += COLOR_SKIP + 'SKIP' + COLOR_NORMAL 714 715 if verbose: 716 info += ('%s %s %s\n') % ( 717 progress_info, 718 test_name, 719 result_text) 720 else: 721 total_output_length = 2 # Two spaces 722 total_output_length += len(progress_info) 723 total_output_length += len(result) 724 allowed_test_length = console_width - total_output_length 725 test_name_len = len(test_name) 726 if allowed_test_length < test_name_len: 727 test_name = ('...%s') % ( 728 test_name[-(allowed_test_length - 3):]) 729 info += ('%s %s %s') % ( 730 progress_info, 731 test_name, 732 result_text) 733 send_csv_result(test_name, result) 734 print_text(info) 735 except Exception as e: 736 print_text(('%s\n%s\n') % (test_name, str(e))) 737 failed_tests.append(test_name) 738 739def verify_knownfailure_entry(entry): 740 supported_field = { 741 'tests' : (list, str), 742 'test_patterns' : (list,), 743 'description' : (list, str), 744 'bug' : (str,), 745 'variant' : (str,), 746 'devices': (list, str), 747 'env_vars' : (dict,), 748 'zipapex' : (bool,), 749 } 750 for field in entry: 751 field_type = type(entry[field]) 752 if field_type not in supported_field[field]: 753 raise ValueError('%s is not supported type for %s\n%s' % ( 754 str(field_type), 755 field, 756 str(entry))) 757 758def get_disabled_test_info(device_name): 759 """Generate set of known failures. 760 761 It parses the art/test/knownfailures.json file to generate the list of 762 disabled tests. 763 764 Returns: 765 The method returns a dict of tests mapped to the variants list 766 for which the test should not be run. 767 """ 768 known_failures_file = env.ANDROID_BUILD_TOP + '/art/test/knownfailures.json' 769 with open(known_failures_file) as known_failures_json: 770 known_failures_info = json.loads(known_failures_json.read()) 771 772 disabled_test_info = {} 773 for failure in known_failures_info: 774 verify_knownfailure_entry(failure) 775 tests = failure.get('tests', []) 776 if isinstance(tests, str): 777 tests = [tests] 778 patterns = failure.get("test_patterns", []) 779 if (not isinstance(patterns, list)): 780 raise ValueError("test_patterns is not a list in %s" % failure) 781 782 tests += [f for f in RUN_TEST_SET if any(re.match(pat, f) is not None for pat in patterns)] 783 variants = parse_variants(failure.get('variant')) 784 785 # Treat a '"devices": "<foo>"' equivalent to 'target' variant if 786 # "foo" is present in "devices". 787 device_names = failure.get('devices', []) 788 if isinstance(device_names, str): 789 device_names = [device_names] 790 if len(device_names) != 0: 791 if device_name in device_names: 792 variants.add('target') 793 else: 794 # Skip adding test info as device_name is not present in "devices" entry. 795 continue 796 797 env_vars = failure.get('env_vars') 798 799 if check_env_vars(env_vars): 800 for test in tests: 801 if test not in RUN_TEST_SET: 802 raise ValueError('%s is not a valid run-test' % ( 803 test)) 804 if test in disabled_test_info: 805 disabled_test_info[test] = disabled_test_info[test].union(variants) 806 else: 807 disabled_test_info[test] = variants 808 809 zipapex_disable = failure.get("zipapex", False) 810 if zipapex_disable and zipapex_loc is not None: 811 for test in tests: 812 if test not in RUN_TEST_SET: 813 raise ValueError('%s is not a valid run-test' % (test)) 814 if test in disabled_test_info: 815 disabled_test_info[test] = disabled_test_info[test].union(variants) 816 else: 817 disabled_test_info[test] = variants 818 819 return disabled_test_info 820 821def gather_disabled_test_info(): 822 global DISABLED_TEST_CONTAINER 823 device_name = get_device_name() if 'target' in _user_input_variants['target'] else None 824 DISABLED_TEST_CONTAINER = get_disabled_test_info(device_name) 825 826def check_env_vars(env_vars): 827 """Checks if the env variables are set as required to run the test. 828 829 Returns: 830 True if all the env variables are set as required, otherwise False. 831 """ 832 833 if not env_vars: 834 return True 835 for key in env_vars: 836 if env.get_env(key) != env_vars.get(key): 837 return False 838 return True 839 840 841def is_test_disabled(test, variant_set): 842 """Checks if the test along with the variant_set is disabled. 843 844 Args: 845 test: The name of the test as in art/test directory. 846 variant_set: Variants to be used for the test. 847 Returns: 848 True, if the test is disabled. 849 """ 850 if dry_run: 851 return True 852 if test in env.EXTRA_DISABLED_TESTS: 853 return True 854 if ignore_skips: 855 return False 856 variants_list = DISABLED_TEST_CONTAINER.get(test, {}) 857 for variants in variants_list: 858 variants_present = True 859 for variant in variants: 860 if variant not in variant_set: 861 variants_present = False 862 break 863 if variants_present: 864 return True 865 for bad_combo in NONFUNCTIONAL_VARIANT_SETS: 866 if bad_combo.issubset(variant_set): 867 return True 868 return False 869 870 871def parse_variants(variants): 872 """Parse variants fetched from art/test/knownfailures.json. 873 """ 874 if not variants: 875 variants = '' 876 for variant in TOTAL_VARIANTS_SET: 877 variants += variant 878 variants += '|' 879 variants = variants[:-1] 880 variant_list = set() 881 or_variants = variants.split('|') 882 for or_variant in or_variants: 883 and_variants = or_variant.split('&') 884 variant = set() 885 for and_variant in and_variants: 886 and_variant = and_variant.strip() 887 if and_variant not in TOTAL_VARIANTS_SET: 888 raise ValueError('%s is not a valid variant' % ( 889 and_variant)) 890 variant.add(and_variant) 891 variant_list.add(frozenset(variant)) 892 return variant_list 893 894def print_text(output): 895 sys.stdout.write(output) 896 sys.stdout.flush() 897 898def print_analysis(): 899 if not verbose: 900 # Without --verbose, the testrunner erases passing test info. It 901 # does that by overriding the printed text with white spaces all across 902 # the console width. 903 console_width = int(os.popen('stty size', 'r').read().split()[1]) 904 eraser_text = '\r' + ' ' * console_width + '\r' 905 print_text(eraser_text) 906 907 # Prints information about the total tests run. 908 # E.g., "2/38 (5%) tests passed". 909 passed_test_count = total_test_count - len(skipped_tests) - len(failed_tests) 910 passed_test_information = ('%d/%d (%d%%) %s passed.\n') % ( 911 passed_test_count, 912 total_test_count, 913 (passed_test_count*100)/total_test_count, 914 'tests' if passed_test_count > 1 else 'test') 915 print_text(passed_test_information) 916 917 # Prints the list of skipped tests, if any. 918 if skipped_tests: 919 print_text(COLOR_SKIP + 'SKIPPED TESTS: ' + COLOR_NORMAL + '\n') 920 for test in skipped_tests: 921 print_text(test + '\n') 922 print_text('\n') 923 924 # Prints the list of failed tests, if any. 925 if failed_tests: 926 print_text(COLOR_ERROR + 'FAILED: ' + COLOR_NORMAL + '\n') 927 for test_info in failed_tests: 928 print_text(('%s\n%s\n' % (test_info[0], test_info[1]))) 929 print_text(COLOR_ERROR + '----------' + COLOR_NORMAL + '\n') 930 for failed_test in sorted([test_info[0] for test_info in failed_tests]): 931 print_text(('%s\n' % (failed_test))) 932 933test_name_matcher = None 934def extract_test_name(test_name): 935 """Parses the test name and returns all the parts""" 936 global test_name_matcher 937 if test_name_matcher is None: 938 regex = '^test-art-' 939 regex += '(' + '|'.join(VARIANT_TYPE_DICT['target']) + ')-' 940 regex += 'run-test-' 941 regex += '(' + '|'.join(VARIANT_TYPE_DICT['run']) + ')-' 942 regex += '(' + '|'.join(VARIANT_TYPE_DICT['prebuild']) + ')-' 943 regex += '(' + '|'.join(VARIANT_TYPE_DICT['compiler']) + ')-' 944 regex += '(' + '|'.join(VARIANT_TYPE_DICT['relocate']) + ')-' 945 regex += '(' + '|'.join(VARIANT_TYPE_DICT['trace']) + ')-' 946 regex += '(' + '|'.join(VARIANT_TYPE_DICT['gc']) + ')-' 947 regex += '(' + '|'.join(VARIANT_TYPE_DICT['jni']) + ')-' 948 regex += '(' + '|'.join(VARIANT_TYPE_DICT['image']) + ')-' 949 regex += '(' + '|'.join(VARIANT_TYPE_DICT['debuggable']) + ')-' 950 regex += '(' + '|'.join(VARIANT_TYPE_DICT['jvmti']) + ')-' 951 regex += '(' + '|'.join(VARIANT_TYPE_DICT['cdex_level']) + ')-' 952 regex += '(' + '|'.join(RUN_TEST_SET) + ')' 953 regex += '(' + '|'.join(VARIANT_TYPE_DICT['address_sizes']) + ')$' 954 test_name_matcher = re.compile(regex) 955 match = test_name_matcher.match(test_name) 956 if match: 957 return list(match.group(i) for i in range(1,15)) 958 raise ValueError(test_name + " is not a valid test") 959 960def parse_test_name(test_name): 961 """Parses the testname provided by the user. 962 It supports two types of test_name: 963 1) Like 001-HelloWorld. In this case, it will just verify if the test actually 964 exists and if it does, it returns the testname. 965 2) Like test-art-host-run-test-debug-prebuild-interpreter-no-relocate-ntrace-cms-checkjni-pointer-ids-picimage-ndebuggable-001-HelloWorld32 966 In this case, it will parse all the variants and check if they are placed 967 correctly. If yes, it will set the various VARIANT_TYPES to use the 968 variants required to run the test. Again, it returns the test_name 969 without the variant information like 001-HelloWorld. 970 """ 971 test_set = set() 972 for test in RUN_TEST_SET: 973 if test.startswith(test_name): 974 test_set.add(test) 975 if test_set: 976 return test_set 977 978 parsed = extract_test_name(test_name) 979 _user_input_variants['target'].add(parsed[0]) 980 _user_input_variants['run'].add(parsed[1]) 981 _user_input_variants['prebuild'].add(parsed[2]) 982 _user_input_variants['compiler'].add(parsed[3]) 983 _user_input_variants['relocate'].add(parsed[4]) 984 _user_input_variants['trace'].add(parsed[5]) 985 _user_input_variants['gc'].add(parsed[6]) 986 _user_input_variants['jni'].add(parsed[7]) 987 _user_input_variants['image'].add(parsed[8]) 988 _user_input_variants['debuggable'].add(parsed[9]) 989 _user_input_variants['jvmti'].add(parsed[10]) 990 _user_input_variants['cdex_level'].add(parsed[11]) 991 _user_input_variants['address_sizes'].add(parsed[13]) 992 return {parsed[12]} 993 994 995def get_default_threads(target): 996 if target == 'target': 997 adb_command = 'adb shell cat /sys/devices/system/cpu/present' 998 cpu_info_proc = subprocess.Popen(adb_command.split(), stdout=subprocess.PIPE) 999 cpu_info = cpu_info_proc.stdout.read() 1000 if type(cpu_info) is bytes: 1001 cpu_info = cpu_info.decode('utf-8') 1002 cpu_info_regex = r'\d*-(\d*)' 1003 match = re.match(cpu_info_regex, cpu_info) 1004 if match: 1005 return int(match.group(1)) 1006 else: 1007 raise ValueError('Unable to predict the concurrency for the target. ' 1008 'Is device connected?') 1009 else: 1010 return multiprocessing.cpu_count() 1011 1012def parse_option(): 1013 global verbose 1014 global dry_run 1015 global ignore_skips 1016 global n_thread 1017 global build 1018 global dist 1019 global gdb 1020 global gdb_arg 1021 global runtime_option 1022 global run_test_option 1023 global timeout 1024 global dex2oat_jobs 1025 global run_all_configs 1026 global with_agent 1027 global zipapex_loc 1028 global csv_result 1029 1030 parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.") 1031 parser.add_argument('-t', '--test', action='append', dest='tests', help='name(s) of the test(s)') 1032 global_group = parser.add_argument_group('Global options', 1033 'Options that affect all tests being run') 1034 global_group.add_argument('-j', type=int, dest='n_thread') 1035 global_group.add_argument('--timeout', default=timeout, type=int, dest='timeout') 1036 global_group.add_argument('--verbose', '-v', action='store_true', dest='verbose') 1037 global_group.add_argument('--dry-run', action='store_true', dest='dry_run') 1038 global_group.add_argument("--skip", action='append', dest="skips", default=[], 1039 help="Skip the given test in all circumstances.") 1040 global_group.add_argument("--no-skips", dest="ignore_skips", action='store_true', default=False, 1041 help="""Don't skip any run-test configurations listed in 1042 knownfailures.json.""") 1043 global_group.add_argument('--no-build-dependencies', 1044 action='store_false', dest='build', 1045 help="""Don't build dependencies under any circumstances. This is the 1046 behavior if ART_TEST_RUN_TEST_ALWAYS_BUILD is not set to 'true'.""") 1047 global_group.add_argument('-b', '--build-dependencies', 1048 action='store_true', dest='build', 1049 help="""Build dependencies under all circumstances. By default we will 1050 not build dependencies unless ART_TEST_RUN_TEST_BUILD=true.""") 1051 global_group.add_argument('--dist', 1052 action='store_true', dest='dist', 1053 help="""If dependencies are to be built, pass `dist` to the build 1054 command line. You may want to also set the DIST_DIR environment 1055 variable when using this flag.""") 1056 global_group.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD) 1057 global_group.add_argument('--gdb', action='store_true', dest='gdb') 1058 global_group.add_argument('--gdb-arg', dest='gdb_arg') 1059 global_group.add_argument('--run-test-option', action='append', dest='run_test_option', 1060 default=[], 1061 help="""Pass an option, unaltered, to the run-test script. 1062 This should be enclosed in single-quotes to allow for spaces. The option 1063 will be split using shlex.split() prior to invoking run-test. 1064 Example \"--run-test-option='--with-agent libtifast.so=MethodExit'\"""") 1065 global_group.add_argument('--with-agent', action='append', dest='with_agent', 1066 help="""Pass an agent to be attached to the runtime""") 1067 global_group.add_argument('--runtime-option', action='append', dest='runtime_option', 1068 help="""Pass an option to the runtime. Runtime options 1069 starting with a '-' must be separated by a '=', for 1070 example '--runtime-option=-Xjitthreshold:0'.""") 1071 global_group.add_argument('--dex2oat-jobs', type=int, dest='dex2oat_jobs', 1072 help='Number of dex2oat jobs') 1073 global_group.add_argument('--runtime-zipapex', dest='runtime_zipapex', default=None, 1074 help='Location for runtime zipapex.') 1075 global_group.add_argument('-a', '--all', action='store_true', dest='run_all', 1076 help="Run all the possible configurations for the input test set") 1077 global_group.add_argument('--csv-results', action='store', dest='csv_result', default=None, 1078 type=argparse.FileType('w'), help='Store a CSV record of all results.') 1079 for variant_type, variant_set in VARIANT_TYPE_DICT.items(): 1080 var_group = parser.add_argument_group( 1081 '{}-type Options'.format(variant_type), 1082 "Options that control the '{}' variants.".format(variant_type)) 1083 var_group.add_argument('--all-' + variant_type, 1084 action='store_true', 1085 dest='all_' + variant_type, 1086 help='Enable all variants of ' + variant_type) 1087 for variant in variant_set: 1088 flag = '--' + variant 1089 var_group.add_argument(flag, action='store_true', dest=variant) 1090 1091 options = vars(parser.parse_args()) 1092 if options['csv_result'] is not None: 1093 csv_result = options['csv_result'] 1094 setup_csv_result() 1095 # Handle the --all-<type> meta-options 1096 for variant_type, variant_set in VARIANT_TYPE_DICT.items(): 1097 if options['all_' + variant_type]: 1098 for variant in variant_set: 1099 options[variant] = True 1100 1101 tests = None 1102 env.EXTRA_DISABLED_TESTS.update(set(options['skips'])) 1103 if options['tests']: 1104 tests = set() 1105 for test_name in options['tests']: 1106 tests |= parse_test_name(test_name) 1107 1108 for variant_type in VARIANT_TYPE_DICT: 1109 for variant in VARIANT_TYPE_DICT[variant_type]: 1110 if options.get(variant): 1111 _user_input_variants[variant_type].add(variant) 1112 1113 if options['verbose']: 1114 verbose = True 1115 if options['n_thread']: 1116 n_thread = max(1, options['n_thread']) 1117 ignore_skips = options['ignore_skips'] 1118 if options['dry_run']: 1119 dry_run = True 1120 verbose = True 1121 build = options['build'] 1122 dist = options['dist'] 1123 if options['gdb']: 1124 n_thread = 1 1125 gdb = True 1126 if options['gdb_arg']: 1127 gdb_arg = options['gdb_arg'] 1128 runtime_option = options['runtime_option']; 1129 with_agent = options['with_agent']; 1130 run_test_option = sum(map(shlex.split, options['run_test_option']), []) 1131 zipapex_loc = options['runtime_zipapex'] 1132 1133 timeout = options['timeout'] 1134 if options['dex2oat_jobs']: 1135 dex2oat_jobs = options['dex2oat_jobs'] 1136 if options['run_all']: 1137 run_all_configs = True 1138 1139 return tests 1140 1141def main(): 1142 gather_test_info() 1143 user_requested_tests = parse_option() 1144 setup_test_env() 1145 gather_disabled_test_info() 1146 if build: 1147 build_targets = '' 1148 if 'host' in _user_input_variants['target']: 1149 build_targets += 'test-art-host-run-test-dependencies ' 1150 if 'target' in _user_input_variants['target']: 1151 build_targets += 'test-art-target-run-test-dependencies ' 1152 if 'jvm' in _user_input_variants['target']: 1153 build_targets += 'test-art-host-run-test-dependencies ' 1154 build_command = env.ANDROID_BUILD_TOP + '/build/soong/soong_ui.bash --make-mode' 1155 build_command += ' DX=' 1156 if dist: 1157 build_command += ' dist' 1158 build_command += ' ' + build_targets 1159 print_text('Build command: %s\n' % build_command) 1160 if subprocess.call(build_command.split()): 1161 # Debugging for b/62653020 1162 if env.DIST_DIR: 1163 shutil.copyfile(env.SOONG_OUT_DIR + '/build.ninja', env.DIST_DIR + '/soong.ninja') 1164 sys.exit(1) 1165 1166 if user_requested_tests: 1167 run_tests(user_requested_tests) 1168 else: 1169 run_tests(RUN_TEST_SET) 1170 1171 print_analysis() 1172 close_csv_file() 1173 1174 exit_code = 0 if len(failed_tests) == 0 else 1 1175 sys.exit(exit_code) 1176 1177if __name__ == '__main__': 1178 main() 1179