1
2import re
3import os
4import glob
5import sys
6import subprocess
7import argparse
8
9# test command line arguments
10argParser = argparse.ArgumentParser(description = 'dexter end-to-end test driver')
11argParser.add_argument('-cmd', default = 'dexter')
12argParser.add_argument('-root', help = 'Root location of the test data files')
13argParser.add_argument('-update', action = 'store_true', help = 'Update the expected files')
14args = argParser.parse_args()
15
16# the bazel sandbox test data root
17data_root = args.root or 'tools/dexter/testdata'
18
19# update expected (golden) output?
20if args.update:
21  if args.root is None:
22    print('ERROR: -update requires -root value')
23    exit(1)
24  print('\nUpdating expected output (test data root: %s)' % data_root)
25
26# list of test cases
27# ( <test_name> : { <test_case_config> } )
28test_cases = {
29  'map'              : { 'args' : '-m', 'input' : ['*.dex'] },
30  'stats'            : { 'args' : '-s', 'input' : ['*.dex'] },
31  'asm'              : { 'args' : '-d', 'input' : ['*.dex'] },
32  'hello_stats'      : { 'args' : '-s -e Hello', 'input' : ['hello.dex'] },
33  'am_stats'         : { 'args' : '-s -e android.app.ActivityManager', 'input' : ['large.dex'] },
34  'rewrite'          : { 'args' : '-d -x full_rewrite', 'input' : ['*.dex'] },
35  'entry_hook'       : { 'args' : '-d -x stress_entry_hook', 'input' : [
36                          'entry_hooks.dex', 'hello.dex', 'medium.dex', 'min.dex' ] },
37  'exit_hook'        : { 'args' : '-d -x stress_exit_hook', 'input' : [
38                          'exit_hooks.dex', 'medium.dex', 'try_catch.dex' ] },
39  'wrap_invoke'      : { 'args' : '-d -x stress_wrap_invoke', 'input' : [
40                          'hello.dex', 'hello_nodebug.dex', 'medium.dex' ] },
41  'mi'               : { 'args' : '-d -x test_method_instrumenter', 'input' : ['mi.dex'] },
42  'find_method'      : { 'args' : '-x stress_find_method', 'input' : [
43                          'hello.dex', 'entry_hooks.dex', 'medium.dex', 'large.dex', 'try_catch.dex' ] },
44  'verbose_cfg'      : { 'args' : '-d --cfg=verbose', 'input' : ['*.dex'] },
45  'compact_cfg'      : { 'args' : '-d --cfg=compact', 'input' : ['*.dex'] },
46  'scratch_regs'     : { 'args' : '-d -x stress_scratch_regs', 'input' : ['*.dex'] },
47  'regs_usage'       : { 'args' : '-x regs_histogram', 'input' : ['*.dex'] },
48  'code_coverage'    : { 'args' : '-d -x code_coverage', 'input' : ['*.dex'] },
49  'array_entry_hook' : { 'args' : '-d -x array_param_entry_hook', 'input' : ['mi.dex'] },
50  'object_exit_hook' : { 'args' : '-d -x return_obj_exit_hook', 'input' : ['mi.dex'] },
51}
52
53# run a shell command and returns the stdout content
54def Run(cmd, stdin_content=None):
55  return subprocess.Popen(
56    args = cmd,
57    shell = True,
58    stdin = subprocess.PIPE,
59    stdout = subprocess.PIPE,
60    stderr = subprocess.STDOUT).communicate(input = stdin_content)[0]
61
62tests = 0
63failures = 0
64
65# for each test_case, run dexter over the specified input (ex. *.dex)
66#
67# the expected ('golden') output has the same base name as the input .dex,
68# for example (test_name = 'map') :
69#
70#    'hello.dex' -> 'expected/hello.map'
71#
72for test_name, test_config in sorted(test_cases.iteritems()):
73  for input_pattern in test_config['input']:
74    input_files = glob.glob(os.path.join(data_root, input_pattern))
75
76    for input in input_files:
77      tests = tests + 1
78
79      # run dexter with the test arguments
80      cmd = '%s %s %s' % (args.cmd, test_config['args'], input)
81      actual_output = Run(cmd)
82
83      # build the expected filename
84      expected_filename = re.sub(r'\.dex', ('.%s' % test_name), os.path.basename(input))
85      expected_filename = os.path.join(data_root, 'expected', expected_filename)
86
87      if args.update:
88        # update expected output file
89        with open(expected_filename, "w") as f:
90          f.write(actual_output)
91      else:
92        # compare the actual output with the expected output
93        cmp_output = Run('diff "%s" -' % expected_filename, actual_output)
94        if cmp_output:
95          print('\nFAILED: expected output mismatch (%s)' % os.path.basename(expected_filename))
96          print(cmp_output)
97          failures = failures + 1
98        else:
99          print('ok: output matching (%s)' % os.path.basename(expected_filename))
100
101if args.update:
102  print('\nSUMMARY: updated expected output for %d tests\n' % tests)
103else:
104  print('\nSUMMARY: %d failure(s), %d test cases\n' % (failures, tests))
105
106if failures != 0:
107  exit(1)
108