diff options
author | Scott Wakeling <scott.wakeling@linaro.org> | 2015-08-18 13:00:09 +0100 |
---|---|---|
committer | Scott Wakeling <scott.wakeling@linaro.org> | 2015-08-26 11:38:49 +0100 |
commit | aa381fb57bbdd3e39a02ca29e676cd9bde0e6f43 (patch) | |
tree | c9a590a09f00eb5ad2c26df1452529924d012cbc | |
parent | 381a856f2728aa48fd8dec9038aba9da69d5fa06 (diff) |
Fixes and improvements.
- use float("inf") to avoid errors running under Python 2.
- Optionally verify benchmark results with boolean verify().
- Pass --debug to java when run.py is verbose.
- Fix 'mode' argument ignored by run_adb.
- -f, --filter benchmarks by an optional glob.
Change-Id: I233503b82445eed71b32a42989f1684c72e73509
-rw-r--r-- | benchmarking/java-ubenchs/benchmarks/BubbleSort.java | 6 | ||||
-rw-r--r-- | benchmarking/java-ubenchs/benchmarks/com/arm/microbench/RunBench.java | 53 | ||||
-rwxr-xr-x | benchmarking/java-ubenchs/run.py | 40 | ||||
-rw-r--r-- | benchmarking/java-ubenchs/tools/perf/PERF.README | 6 | ||||
-rw-r--r-- | benchmarking/java-ubenchs/utils.py | 8 |
5 files changed, 72 insertions, 41 deletions
diff --git a/benchmarking/java-ubenchs/benchmarks/BubbleSort.java b/benchmarking/java-ubenchs/benchmarks/BubbleSort.java index 705d759..2e01458 100644 --- a/benchmarking/java-ubenchs/benchmarks/BubbleSort.java +++ b/benchmarking/java-ubenchs/benchmarks/BubbleSort.java @@ -41,17 +41,19 @@ public class BubbleSort { } } - public void verify() { + public boolean verify() { // Verify sorted output. for(int i = 0; i < this.inputArr.length; ++i) { int expected = this.inputArr.length - i - 1; int actual = this.inputArr[i]; if(expected != actual) { - System.out.println(" Mismatch at position " + i + + System.out.println("ERROR: Mismatch at position " + i + " Expected " + expected + " Actual " + actual); + return false; } } + return true; } public static void main(String[] args) { diff --git a/benchmarking/java-ubenchs/benchmarks/com/arm/microbench/RunBench.java b/benchmarking/java-ubenchs/benchmarks/com/arm/microbench/RunBench.java index 5c4070c..d642083 100644 --- a/benchmarking/java-ubenchs/benchmarks/com/arm/microbench/RunBench.java +++ b/benchmarking/java-ubenchs/benchmarks/com/arm/microbench/RunBench.java @@ -22,25 +22,26 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Comparator; import java.util.List; +import java.util.concurrent.TimeUnit; public class RunBench { - // Minimum valid calibration time: 300ms. - public final static long DEFAULT_CALIBRATION_MIN_TIME = 30000000L; + // Minimum valid calibration time: 400ms. + public final static long DEFAULT_CALIBRATION_MIN_TIME_NS = TimeUnit.NANOSECONDS.convert(400, TimeUnit.MILLISECONDS); // The target benchmark running time: 2s. - public final static long DEFAULT_BENCHMARK_TARGET_RUN_TIME = 2000000000L; + public final static long DEFAULT_BENCHMARK_TARGET_RUN_TIME_NS = TimeUnit.NANOSECONDS.convert(2, TimeUnit.SECONDS); public final static int ITERATIONS_LIMIT = 0x40000000; // A method with this name will be executed as a microbenchmark. public static final String TESTNAME_PREFIX = "time"; private SimpleLogger log; - private long calibrationMinTime; - private long benchmarkTargetRunTime; + private long calibrationMinTimeNS; + private long benchmarkTargetRunTimeNS; public RunBench() { this.log = SimpleLogger.getInstance(); - calibrationMinTime = DEFAULT_CALIBRATION_MIN_TIME; - benchmarkTargetRunTime = DEFAULT_BENCHMARK_TARGET_RUN_TIME; + calibrationMinTimeNS = DEFAULT_CALIBRATION_MIN_TIME_NS; + benchmarkTargetRunTimeNS = DEFAULT_BENCHMARK_TARGET_RUN_TIME_NS; } public void setLogLevel(SimpleLogger.LogLevel level) { @@ -87,14 +88,14 @@ public class RunBench { } else { // Estimate how long it takes to run one iteration. iterations = 1; - while ((duration < calibrationMinTime) && (iterations < ITERATIONS_LIMIT)) { + while ((duration < calibrationMinTimeNS) && (iterations < ITERATIONS_LIMIT)) { iterations *= 2; duration = timeIterations(instance, method, (int) iterations); } // Estimate the number of iterations to run based on the calibration // phase, and benchmark the function. double iter_time = duration / (double) iterations; - iterations = (int) Math.max(1.0, benchmarkTargetRunTime / iter_time); + iterations = (int) Math.max(1.0, benchmarkTargetRunTimeNS / iter_time); duration = timeIterations(instance, method, (int) iterations); } @@ -106,37 +107,47 @@ public class RunBench { + "." + method.getName().substring(4) + ":", iteration_time); } - public int runBenchSet(String test, String subtest) { + public int runBenchSet(String test, String subtest, boolean verify) { if (test == null) { return 1; } - List<Method> methods = new ArrayList<Method>(5); + List<Method> benchMethods = new ArrayList<Method>(5); + Method verifyMethod = null; try { Class<?> clazz = Class.forName(test); Object instance = clazz.newInstance(); if (subtest != null) { Method m = clazz.getMethod(TESTNAME_PREFIX + subtest, int.class); - methods.add(m); + benchMethods.add(m); } else { for (Method m : clazz.getDeclaredMethods()) { if (m.getName().startsWith(TESTNAME_PREFIX)) { - methods.add(m); + benchMethods.add(m); + } else if (m.getName().equals("verify") && m.getReturnType() == boolean.class) { + verifyMethod = m; } } } - // Sort methods by name. - Collections.sort(methods, new Comparator<Method>() { + // Sort benchMethods by name. + Collections.sort(benchMethods, new Comparator<Method>() { @Override public int compare(Method m1, Method m2) { return m1.getName().compareTo(m2.getName()); } }); - for (Method m : methods) { + for (Method m : benchMethods) { // Run each method as a benchmark. runOneBench(instance, m); } + // Optionally verify benchmark results. + if (verify && verifyMethod != null) { + if (!(Boolean)verifyMethod.invoke(instance)) { + log.error(clazz.getName() + " failed verification."); + return 1; + } + } } catch (Exception e) { // TODO: filter exceptions. e.printStackTrace(); @@ -148,6 +159,8 @@ public class RunBench { public void parseCmdlineAndRun(String[] args) { String test = null; String subtest = null; + boolean verify = true; // Verify all benchmark results by default. + // TODO: help message for (int i = 0; i < args.length; i++) { if (args[i].startsWith("--")) { @@ -166,23 +179,25 @@ public class RunBench { } else if (option.equals("benchmark_run_time")) { i++; if (i < args.length) { - this.benchmarkTargetRunTime = Long.valueOf(args[i]) * 1000000; // milliseconds + this.benchmarkTargetRunTimeNS = TimeUnit.NANOSECONDS.convert(Long.valueOf(args[i]), TimeUnit.MILLISECONDS); } else { log.fatal("Require time."); } } else if (option.equals("calibration_min_time")) { i++; if (i < args.length) { - this.calibrationMinTime = Long.valueOf(args[i]) * 1000000; // milliseconds + this.calibrationMinTimeNS = TimeUnit.NANOSECONDS.convert(Long.valueOf(args[i]), TimeUnit.MILLISECONDS); } else { log.fatal("Require time."); } + } else if (option.equals("noverify")) { + verify = false; } } else { test = args[i]; } } - if (runBenchSet(test, subtest) != 0) { + if (runBenchSet(test, subtest, verify) != 0) { log.error("Test failed."); } } diff --git a/benchmarking/java-ubenchs/run.py b/benchmarking/java-ubenchs/run.py index 06b5f67..28bdb1f 100755 --- a/benchmarking/java-ubenchs/run.py +++ b/benchmarking/java-ubenchs/run.py @@ -79,6 +79,8 @@ def BuildOptions(): default = default_remote_copy_path, help = '''Path where objects should be copied on the target.''') + parser.add_argument('-f', '--filter', action = 'store', default = '*', + help='Quoted (benchmark name) filter pattern.') return parser.parse_args() @@ -92,9 +94,8 @@ def ensure_dir(path): if not os.path.exists(path): os.makedirs(path) -def get_current_path_files_by_extension(ext, path): - return glob.glob(os.path.join(path, '*.' + ext)) - +def get_files(name_glob, ext, path): + return glob.glob(os.path.join(path, '*' + name_glob + '*.' + ext)) # ADB helpers @@ -139,11 +140,16 @@ def BuildBenchmarks(args): def run_adb(mode, apk, classname): dalvikvm = 'dalvikvm%s' % mode - command = "cd %s && ANDROID_DATA=`pwd` DEX_LOCATION=`pwd` dalvikvm -cp %s" % (os.path.dirname(apk), apk) + command = ("cd %s && ANDROID_DATA=`pwd` DEX_LOCATION=`pwd` %s -cp %s" + % (os.path.dirname(apk), dalvikvm, apk)) if args.calibrate: + # Run the benchmark's time* method(s) via bench_runner_main command += " %s %s" % (bench_runner_main, classname) else: - command = " %s" % (classname) + # Run the benchmark as a main class directly + command += " %s" % (classname) + if verbose: + command += " --debug" out, err = adb_shell(command) return out.decode('UTF-8') @@ -176,19 +182,25 @@ def RunBench(apk, classname, sys.stderr.write(" \-> FAILED, continuing anyway\n") continue - for line in out.rstrip().split("\n"): - name = line.split(":")[0].rstrip() - score = float(line.split(":")[1].strip().split(" ")[0].strip()) - - if name not in result: - result[name] = list() + try: + for line in out.rstrip().split("\n"): + name = line.split(":")[0].rstrip() + # Ignore any java logging from --debug + if name not in ['INFO', 'DEBUG', 'ERROR']: + score = float(line.split(":")[1].strip().split(" ")[0].strip()) + if name not in result: + result[name] = list() + result[name].append(score) + except Exception as e: + print(e) + print(" \-> Error parsing output from %s" % classname) + break - result[name].append(score) def RunBenchs(apk, bench_names, iterations = default_n_iterations, mode = default_mode): - VerbosePrint('\n# Running benchmarks') + VerbosePrint('\n# Running benchmarks: ' + ' '.join(bench_names)) for bench in bench_names: RunBench(apk, bench, iterations = iterations, mode = mode) @@ -210,7 +222,7 @@ if __name__ == "__main__": if args.norun: sys.exit(0) - bench_files = get_current_path_files_by_extension('java', dir_benchmarks) + bench_files = get_files(args.filter, 'java', dir_benchmarks) bench_names = [os.path.basename(f).replace('.java', '') for f in bench_files] bench_names.sort() diff --git a/benchmarking/java-ubenchs/tools/perf/PERF.README b/benchmarking/java-ubenchs/tools/perf/PERF.README index 15944de..ff7e379 100644 --- a/benchmarking/java-ubenchs/tools/perf/PERF.README +++ b/benchmarking/java-ubenchs/tools/perf/PERF.README @@ -33,7 +33,7 @@ Steps to use the automation script: . build/envsetup.sh lunch -1.3 Build and install perf. +1.3 Build and install perf (it's part of <AOSP>) m -j 64 perf adb root adb remount @@ -58,7 +58,9 @@ For example, 3.2. Review output via browser. - 3.2.1. Copy this folder entirely to webserver's html folder. Or make a symbol link in the html + Try viewing html/index.html locally. If that doesn't work for you: + + 3.2.1. Copy tools/perf/html folder entirely to webserver's html folder. Or make a symbol link in the html folder to this folder. For example, ln -s $(pwd) /var/www/html/java-test diff --git a/benchmarking/java-ubenchs/utils.py b/benchmarking/java-ubenchs/utils.py index 81f78de..8c6d99a 100644 --- a/benchmarking/java-ubenchs/utils.py +++ b/benchmarking/java-ubenchs/utils.py @@ -24,7 +24,7 @@ def PrintStats(dict_results, iterations = None): M = max(nums) ave = statistics.mean(nums) d = statistics.pstdev(nums, ave) - dp = d / ave * 100 if ave != 0 else infinity + dp = d / ave * 100 if ave != 0 else float("inf") results.append([benchmark, m, M, ave, d, dp]) PrintTable(headers, results) @@ -39,11 +39,11 @@ def PrintDiff(res_1, res_2): for bench in sorted(benchmarks): ave1 = statistics.mean(res_1[bench]) d1 = statistics.pstdev(res_1[bench], ave1) - dp1 = d1 / ave1 * 100 if ave1 != 0 else infinity + dp1 = d1 / ave1 * 100 if ave1 != 0 else float("inf") ave2 = statistics.mean(res_2[bench]) d2 = statistics.pstdev(res_2[bench], ave2) - dp2 = d2 / ave2 * 100 if ave2 != 0 else infinity - diff = (ave2 - ave1) / ave1 * 100 if ave1 != 0 else infinity + dp2 = d2 / ave2 * 100 if ave2 != 0 else float("inf") + diff = (ave2 - ave1) / ave1 * 100 if ave1 != 0 else float("inf") results.append([bench, ave1, dp1, ave2, dp2, diff]) PrintTable(headers, results) |