1#!/usr/bin/env python2
2#
3# Copyright 2017 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6#
7# pylint: disable=cros-logging-import
8
9# This is the script to run specified benchmark with different toolchain
10# settings. It includes the process of building benchmark locally and running
11# benchmark on DUT.
12
13"""Main script to run the benchmark suite from building to testing."""
14from __future__ import print_function
15
16import argparse
17import config
18import ConfigParser
19import logging
20import os
21import subprocess
22import sys
23
24logging.basicConfig(level=logging.INFO)
25
26def _parse_arguments(argv):
27    parser = argparse.ArgumentParser(description='Build and run specific '
28                                     'benchamrk')
29    parser.add_argument(
30        '-b',
31        '--bench',
32        action='append',
33        default=[],
34        help='Select which benchmark to run')
35
36    # Only one of compiler directory and llvm prebuilts version can be indicated
37    # at the beginning, so set -c and -l into a exclusive group.
38    group = parser.add_mutually_exclusive_group()
39
40    # The toolchain setting arguments has action of 'append', so that users
41    # could compare performance with several toolchain settings together.
42    group.add_argument(
43        '-c',
44        '--compiler_dir',
45        metavar='DIR',
46        action='append',
47        default=[],
48        help='Specify path to the compiler\'s bin directory. '
49        'You shall give several paths, each with a -c, to '
50        'compare performance differences in '
51        'each compiler.')
52
53    parser.add_argument(
54        '-o',
55        '--build_os',
56        action='append',
57        default=[],
58        help='Specify the host OS to build the benchmark.')
59
60    group.add_argument(
61        '-l',
62        '--llvm_prebuilts_version',
63        action='append',
64        default=[],
65        help='Specify the version of prebuilt LLVM. When '
66        'specific prebuilt version of LLVM already '
67        'exists, no need to pass the path to compiler '
68        'directory.')
69
70    parser.add_argument(
71        '-f',
72        '--cflags',
73        action='append',
74        default=[],
75        help='Specify the cflags options for the toolchain. '
76        'Be sure to quote all the cflags with quotation '
77        'mark("") or use equal(=).')
78    parser.add_argument(
79        '--ldflags',
80        action='append',
81        default=[],
82        help='Specify linker flags for the toolchain.')
83
84    parser.add_argument(
85        '-i',
86        '--iterations',
87        type=int,
88        default=1,
89        help='Specify how many iterations does the test '
90        'take.')
91
92    # Arguments -s and -r are for connecting to DUT.
93    parser.add_argument(
94        '-s',
95        '--serials',
96        help='Comma separate list of device serials under '
97        'test.')
98
99    parser.add_argument(
100        '-r',
101        '--remote',
102        default='localhost',
103        help='hostname[:port] if the ADB device is connected '
104        'to a remote machine. Ensure this workstation '
105        'is configured for passwordless ssh access as '
106        'users "root" or "adb"')
107
108    # Arguments -frequency and -m are for device settings
109    parser.add_argument(
110        '--frequency',
111        type=int,
112        default=979200,
113        help='Specify the CPU frequency of the device. The '
114        'unit is KHZ. The available value is defined in'
115        'cpufreq/scaling_available_frequency file in '
116        'device\'s each core directory. '
117        'The default value is 979200, which shows a '
118        'balance in noise and performance. Lower '
119        'frequency will slow down the performance but '
120        'reduce noise.')
121
122    parser.add_argument(
123        '-m',
124        '--mode',
125        default='little',
126        help='User can specify whether \'little\' or \'big\' '
127        'mode to use. The default one is little mode. '
128        'The little mode runs on a single core of '
129        'Cortex-A53, while big mode runs on single core '
130        'of Cortex-A57.')
131
132    # Configure file for benchmark test
133    parser.add_argument(
134        '-t',
135        '--test',
136        help='Specify the test settings with configuration '
137        'file.')
138
139    # Whether to keep old json result or not
140    parser.add_argument(
141        '-k',
142        '--keep',
143        default='False',
144        help='User can specify whether to keep the old json '
145        'results from last run. This can be useful if you '
146        'want to compare performance differences in two or '
147        'more different runs. Default is False(off).')
148
149    return parser.parse_args(argv)
150
151
152# Clear old log files in bench suite directory
153def clear_logs():
154    logging.info('Removing old logfiles...')
155    for f in ['build_log', 'device_log', 'test_log']:
156        logfile = os.path.join(config.bench_suite_dir, f)
157        try:
158            os.remove(logfile)
159        except OSError:
160            logging.info('No logfile %s need to be removed. Ignored.', f)
161    logging.info('Old logfiles been removed.')
162
163
164# Clear old json files in bench suite directory
165def clear_results():
166    logging.info('Clearing old json results...')
167    for bench in config.bench_list:
168        result = os.path.join(config.bench_suite_dir, bench + '.json')
169        try:
170            os.remove(result)
171        except OSError:
172            logging.info('no %s json file need to be removed. Ignored.', bench)
173    logging.info('Old json results been removed.')
174
175
176# Use subprocess.check_call to run other script, and put logs to files
177def check_call_with_log(cmd, log_file):
178    log_file = os.path.join(config.bench_suite_dir, log_file)
179    with open(log_file, 'a') as logfile:
180        log_header = 'Log for command: %s\n' % (cmd)
181        logfile.write(log_header)
182        try:
183            subprocess.check_call(cmd, stdout=logfile)
184        except subprocess.CalledProcessError:
185            logging.error('Error running %s, please check %s for more info.',
186                          cmd, log_file)
187            raise
188    logging.info('Logs for %s are written to %s.', cmd, log_file)
189
190
191def set_device(serials, remote, frequency):
192    setting_cmd = [
193        os.path.join(
194            os.path.join(config.android_home, config.autotest_dir),
195            'site_utils/set_device.py')
196    ]
197    setting_cmd.append('-r=' + remote)
198    setting_cmd.append('-q=' + str(frequency))
199
200    # Deal with serials.
201    # If there is no serails specified, try to run test on the only device.
202    # If specified, split the serials into a list and run test on each device.
203    if serials:
204        for serial in serials.split(','):
205            setting_cmd.append('-s=' + serial)
206            check_call_with_log(setting_cmd, 'device_log')
207            setting_cmd.pop()
208    else:
209        check_call_with_log(setting_cmd, 'device_log')
210
211    logging.info('CPU mode and frequency set successfully!')
212
213
214def log_ambiguous_args():
215    logging.error('The count of arguments does not match!')
216    raise ValueError('The count of arguments does not match.')
217
218
219# Check if the count of building arguments are log_ambiguous or not.  The
220# number of -c/-l, -f, and -os should be either all 0s or all the same.
221def check_count(compiler, llvm_version, build_os, cflags, ldflags):
222    # Count will be set to 0 if no compiler or llvm_version specified.
223    # Otherwise, one of these two args length should be 0 and count will be
224    # the other one.
225    count = max(len(compiler), len(llvm_version))
226
227    # Check if number of cflags is 0 or the same with before.
228    if len(cflags) != 0:
229        if count != 0 and len(cflags) != count:
230            log_ambiguous_args()
231        count = len(cflags)
232
233    if len(ldflags) != 0:
234        if count != 0 and len(ldflags) != count:
235            log_ambiguous_args()
236        count = len(ldflags)
237
238    if len(build_os) != 0:
239        if count != 0 and len(build_os) != count:
240            log_ambiguous_args()
241        count = len(build_os)
242
243    # If no settings are passed, only run default once.
244    return max(1, count)
245
246
247# Build benchmark binary with toolchain settings
248def build_bench(setting_no, bench, compiler, llvm_version, build_os, cflags,
249                ldflags):
250    # Build benchmark locally
251    build_cmd = ['./build_bench.py', '-b=' + bench]
252    if compiler:
253        build_cmd.append('-c=' + compiler[setting_no])
254    if llvm_version:
255        build_cmd.append('-l=' + llvm_version[setting_no])
256    if build_os:
257        build_cmd.append('-o=' + build_os[setting_no])
258    if cflags:
259        build_cmd.append('-f=' + cflags[setting_no])
260    if ldflags:
261        build_cmd.append('--ldflags=' + ldflags[setting_no])
262
263    logging.info('Building benchmark for toolchain setting No.%d...',
264                 setting_no)
265    logging.info('Command: %s', build_cmd)
266
267    try:
268        subprocess.check_call(build_cmd)
269    except:
270        logging.error('Error while building benchmark!')
271        raise
272
273
274def run_and_collect_result(test_cmd, setting_no, i, bench, serial='default'):
275
276    # Run autotest script for benchmark on DUT
277    check_call_with_log(test_cmd, 'test_log')
278
279    logging.info('Benchmark with setting No.%d, iter.%d finished testing on '
280                 'device %s.', setting_no, i, serial)
281
282    # Rename results from the bench_result generated in autotest
283    bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
284    if not os.path.exists(bench_result):
285        logging.error('No result found at %s, '
286                      'please check test_log for details.', bench_result)
287        raise OSError('Result file %s not found.' % bench_result)
288
289    new_bench_result = 'bench_result_%s_%s_%d_%d' % (bench, serial,
290                                                     setting_no, i)
291    new_bench_result_path = os.path.join(config.bench_suite_dir,
292                                         new_bench_result)
293    try:
294        os.rename(bench_result, new_bench_result_path)
295    except OSError:
296        logging.error('Error while renaming raw result %s to %s',
297                      bench_result, new_bench_result_path)
298        raise
299
300    logging.info('Benchmark result saved at %s.', new_bench_result_path)
301
302
303def test_bench(bench, setting_no, iterations, serials, remote, mode):
304    logging.info('Start running benchmark on device...')
305
306    # Run benchmark and tests on DUT
307    for i in xrange(iterations):
308        logging.info('Iteration No.%d:', i)
309        test_cmd = [
310            os.path.join(
311                os.path.join(config.android_home, config.autotest_dir),
312                'site_utils/test_bench.py')
313        ]
314        test_cmd.append('-b=' + bench)
315        test_cmd.append('-r=' + remote)
316        test_cmd.append('-m=' + mode)
317
318        # Deal with serials. If there is no serails specified, try to run test
319        # on the only device. If specified, split the serials into a list and
320        # run test on each device.
321        if serials:
322            for serial in serials.split(','):
323                test_cmd.append('-s=' + serial)
324
325                run_and_collect_result(test_cmd, setting_no, i, bench, serial)
326                test_cmd.pop()
327        else:
328            run_and_collect_result(test_cmd, setting_no, i, bench)
329
330
331def gen_json(bench, setting_no, iterations, serials):
332    bench_result = os.path.join(config.bench_suite_dir, 'bench_result')
333
334    logging.info('Generating JSON file for Crosperf...')
335
336    if not serials:
337        serials = 'default'
338
339    for serial in serials.split(','):
340
341        # Platform will be used as device lunch combo instead
342        #experiment = '_'.join([serial, str(setting_no)])
343        experiment = config.product_combo
344
345        # Input format: bench_result_{bench}_{serial}_{setting_no}_
346        input_file = '_'.join([bench_result, bench,
347                               serial, str(setting_no), ''])
348        gen_json_cmd = [
349            './gen_json.py', '--input=' + input_file,
350            '--output=%s.json' % os.path.join(config.bench_suite_dir, bench),
351            '--bench=' + bench, '--platform=' + experiment,
352            '--iterations=' + str(iterations)
353        ]
354
355        logging.info('Command: %s', gen_json_cmd)
356        if subprocess.call(gen_json_cmd):
357            logging.error('Error while generating JSON file, please check raw'
358                          ' data of the results at %s.', input_file)
359
360
361def gen_crosperf(infile, outfile):
362    # Set environment variable for crosperf
363    os.environ['PYTHONPATH'] = os.path.dirname(config.toolchain_utils)
364
365    logging.info('Generating Crosperf Report...')
366    crosperf_cmd = [
367        os.path.join(config.toolchain_utils, 'generate_report.py'),
368        '-i=' + infile, '-o=' + outfile, '-f'
369    ]
370
371    # Run crosperf generate_report.py
372    logging.info('Command: %s', crosperf_cmd)
373    subprocess.call(crosperf_cmd)
374
375    logging.info('Report generated successfully!')
376    logging.info('Report Location: ' + outfile + '.html at bench'
377                 'suite directory.')
378
379
380def main(argv):
381    # Set environment variable for the local loacation of benchmark suite.
382    # This is for collecting testing results to benchmark suite directory.
383    os.environ['BENCH_SUITE_DIR'] = config.bench_suite_dir
384
385    # Set Android type, used for the difference part between aosp and internal.
386    os.environ['ANDROID_TYPE'] = config.android_type
387
388    # Set ANDROID_HOME for both building and testing.
389    os.environ['ANDROID_HOME'] = config.android_home
390
391    # Set environment variable for architecture, this will be used in
392    # autotest.
393    os.environ['PRODUCT'] = config.product
394
395    arguments = _parse_arguments(argv)
396
397    bench_list = arguments.bench
398    if not bench_list:
399        bench_list = config.bench_list
400
401    compiler = arguments.compiler_dir
402    build_os = arguments.build_os
403    llvm_version = arguments.llvm_prebuilts_version
404    cflags = arguments.cflags
405    ldflags = arguments.ldflags
406    iterations = arguments.iterations
407    serials = arguments.serials
408    remote = arguments.remote
409    frequency = arguments.frequency
410    mode = arguments.mode
411    keep = arguments.keep
412
413    # Clear old logs every time before run script
414    clear_logs()
415
416    if keep == 'False':
417        clear_results()
418
419    # Set test mode and frequency of CPU on the DUT
420    set_device(serials, remote, frequency)
421
422    test = arguments.test
423    # if test configuration file has been given, use the build settings
424    # in the configuration file and run the test.
425    if test:
426        test_config = ConfigParser.ConfigParser(allow_no_value=True)
427        if not test_config.read(test):
428            logging.error('Error while reading from building '
429                          'configuration file %s.', test)
430            raise RuntimeError('Error while reading configuration file %s.'
431                               % test)
432
433        for setting_no, section in enumerate(test_config.sections()):
434            bench = test_config.get(section, 'bench')
435            compiler = [test_config.get(section, 'compiler')]
436            build_os = [test_config.get(section, 'build_os')]
437            llvm_version = [test_config.get(section, 'llvm_version')]
438            cflags = [test_config.get(section, 'cflags')]
439            ldflags = [test_config.get(section, 'ldflags')]
440
441            # Set iterations from test_config file, if not exist, use the one
442            # from command line.
443            it = test_config.get(section, 'iterations')
444            if not it:
445                it = iterations
446            it = int(it)
447
448            # Build benchmark for each single test configuration
449            build_bench(0, bench, compiler, llvm_version,
450                        build_os, cflags, ldflags)
451
452            test_bench(bench, setting_no, it, serials, remote, mode)
453
454            gen_json(bench, setting_no, it, serials)
455
456        for bench in config.bench_list:
457            infile = os.path.join(config.bench_suite_dir, bench + '.json')
458            if os.path.exists(infile):
459                outfile = os.path.join(config.bench_suite_dir,
460                                       bench + '_report')
461                gen_crosperf(infile, outfile)
462
463        # Stop script if there is only config file provided
464        return 0
465
466    # If no configuration file specified, continue running.
467    # Check if the count of the setting arguments are log_ambiguous.
468    setting_count = check_count(compiler, llvm_version, build_os,
469                                cflags, ldflags)
470
471    for bench in bench_list:
472        logging.info('Start building and running benchmark: [%s]', bench)
473        # Run script for each toolchain settings
474        for setting_no in xrange(setting_count):
475            build_bench(setting_no, bench, compiler, llvm_version,
476                        build_os, cflags, ldflags)
477
478            # Run autotest script for benchmark test on device
479            test_bench(bench, setting_no, iterations, serials, remote, mode)
480
481            gen_json(bench, setting_no, iterations, serials)
482
483        infile = os.path.join(config.bench_suite_dir, bench + '.json')
484        outfile = os.path.join(config.bench_suite_dir, bench + '_report')
485        gen_crosperf(infile, outfile)
486
487
488if __name__ == '__main__':
489    main(sys.argv[1:])
490