1 2import re 3import os 4import glob 5import subprocess 6import argparse 7 8# test command line arguments 9argParser = argparse.ArgumentParser(description = 'dexter end-to-end test driver') 10argParser.add_argument('-cmd', default = 'dexter') 11argParser.add_argument('-root', help = 'Root location of the test data files') 12argParser.add_argument('-update', action = 'store_true', help = 'Update the expected files') 13args = argParser.parse_args() 14 15# the bazel sandbox test data root 16data_root = args.root or 'tools/dexter/testdata' 17 18# update expected (golden) output? 19if args.update: 20 if args.root is None: 21 print('ERROR: -update requires -root value') 22 exit(1) 23 print('\nUpdating expected output (test data root: %s)' % data_root) 24 25# list of test cases 26# ( <test_name> : { <test_case_config> } ) 27test_cases = { 28 'map' : { 'args' : '-m', 'input' : ['*.dex'] }, 29 'stats' : { 'args' : '-s', 'input' : ['*.dex'] }, 30 'asm' : { 'args' : '-d', 'input' : ['*.dex'] }, 31 'hello_stats' : { 'args' : '-s -e Hello', 'input' : ['hello.dex'] }, 32 'am_stats' : { 'args' : '-s -e android.app.ActivityManager', 'input' : ['large.dex'] }, 33 'rewrite' : { 'args' : '-d -x full_rewrite', 'input' : ['*.dex'], 'skip' : ['method_handles.dex'] }, 34 'entry_hook' : { 'args' : '-d -x stress_entry_hook', 'input' : [ 35 'entry_hooks.dex', 'hello.dex', 'medium.dex', 'min.dex' ] }, 36 'exit_hook' : { 'args' : '-d -x stress_exit_hook', 'input' : [ 37 'exit_hooks.dex', 'medium.dex', 'try_catch.dex' ] }, 38 'wrap_invoke' : { 'args' : '-d -x stress_wrap_invoke', 'input' : [ 39 'hello.dex', 'hello_nodebug.dex', 'medium.dex' ] }, 40 'mi' : { 'args' : '-d -x test_method_instrumenter', 'input' : ['mi.dex'] }, 41 'find_method' : { 'args' : '-x stress_find_method', 'input' : [ 42 'hello.dex', 'entry_hooks.dex', 'medium.dex', 'large.dex', 'try_catch.dex' ] }, 43 'verbose_cfg' : { 'args' : '-d --cfg=verbose', 'input' : ['*.dex'] }, 44 'compact_cfg' : { 'args' : '-d --cfg=compact', 'input' : ['*.dex'] }, 45 'scratch_regs' : { 'args' : '-d -x stress_scratch_regs', 'input' : ['*.dex'], 'skip' : ['method_handles.dex'] }, 46 'regs_usage' : { 'args' : '-x regs_histogram', 'input' : ['*.dex'], 'skip' : ['method_handles.dex'] }, 47 'code_coverage' : { 'args' : '-d -x code_coverage', 'input' : ['*.dex'], 'skip' : ['method_handles.dex'] }, 48 'array_entry_hook' : { 'args' : '-d -x array_param_entry_hook', 'input' : ['mi.dex'] }, 49 'object_exit_hook' : { 'args' : '-d -x return_obj_exit_hook', 'input' : ['mi.dex'] }, 50 'sign_exit_hook' : { 'args' : '-d -x pass_sign_exit_hook', 'input' : ['mi.dex'] }, 51 'method_handle_extract_one_asm' 52 : { 'args' : '-d -x ExampleJavaJniFuzzer', 'input' : ['method_handles.dex'] }, 53 'method_handle_extract_two_asm' 54 : { 'args' : '-d -x com/example/ExampleJavaHelper', 'input': ['method_handles.dex'] } 55} 56 57# run a shell command and returns the stdout content 58def Run(cmd, stdin_content=None): 59 return subprocess.Popen( 60 args = cmd, 61 shell = True, 62 stdin = subprocess.PIPE, 63 stdout = subprocess.PIPE, 64 stderr = subprocess.STDOUT).communicate(input = stdin_content)[0] 65 66tests = 0 67failures = 0 68 69# for each test_case, run dexter over the specified input (ex. *.dex) 70# 71# the expected ('golden') output has the same base name as the input .dex, 72# for example (test_name = 'map') : 73# 74# 'hello.dex' -> 'expected/hello.map' 75# 76for test_name, test_config in sorted(test_cases.items()): 77 for input_pattern in test_config['input']: 78 input_files = glob.glob(os.path.join(data_root, input_pattern)) 79 skip_set = set() 80 if 'skip' in test_config.keys(): 81 for skip_file in test_config['skip']: 82 skip_set.add(os.path.join(data_root, skip_file)) 83 84 85 for input in input_files: 86 if input in skip_set: 87 continue 88 89 tests = tests + 1 90 91 # run dexter with the test arguments 92 cmd = '%s %s %s' % (args.cmd, test_config['args'], input) 93 actual_output = Run(cmd) 94 95 # build the expected filename 96 expected_filename = re.sub(r'\.dex', ('.%s' % test_name), os.path.basename(input)) 97 expected_filename = os.path.join(data_root, 'expected', expected_filename) 98 99 if args.update: 100 # update expected output file 101 with open(expected_filename, "w") as f: 102 f.write(actual_output) 103 else: 104 # compare the actual output with the expected output 105 cmp_output = Run('diff "%s" -' % expected_filename, actual_output) 106 if cmp_output: 107 print('\nFAILED: expected output mismatch (%s)' % os.path.basename(expected_filename)) 108 print(cmp_output) 109 failures = failures + 1 110 else: 111 print('ok: output matching (%s)' % os.path.basename(expected_filename)) 112 113if args.update: 114 print('\nSUMMARY: updated expected output for %d tests\n' % tests) 115else: 116 print('\nSUMMARY: %d failure(s), %d test cases\n' % (failures, tests)) 117 118if failures != 0: 119 exit(1) 120