aboutsummaryrefslogtreecommitdiff
path: root/size-data-to-csv.py
diff options
context:
space:
mode:
authorPrathamesh Kulkarni <prathamesh.kulkarni@linaro.org>2023-02-26 15:50:14 +0530
committerPrathamesh Kulkarni <prathamesh.kulkarni@linaro.org>2023-02-26 15:58:20 +0530
commitb9236aa4effec65fdfd873f0440d96120c995d8d (patch)
treed298f24e0ee05ffc78e80646e37b9b1a8990eddf /size-data-to-csv.py
parentcbe6a59814e0de8066f943f03b40315f0f96d878 (diff)
Refactor metric_utils to provide a Benchmarks hierarchy.
The patch mainly refactors metric_utils.py to provide an inheritance hierarchy for Benchmarks. It starts with an abstract base class called Benchmark with a single abstract method "exe", which must be implemented by subclasses. Currently, we have two classes Benchmark_coremark, and Benchmark_spec that subclasses Benchmark. The other part of the patch modifies metric scripts to use the modified API from metric_utils.py. In particular, this removes the need to special-case coremark in size-data-to-csv.py, which is now abstracted under Benchmark. The intent is to similarly reuse the script for other benchmarks like AOSP, by subclassing Benchmark and providing relevant benchmark details. Changes to other metric scripts are mechanical. The script also adds proper command line option processing to size-data-to-csv.py using argparse. I have tested the patch by running tcwg-benchmark-results.sh with/without this patch and verifying that there is no difference in final results csv generated by csvs2table.py (and compare-results.py). Change-Id: I903929132bc8109103a2aa0e3d5076b7361419e6
Diffstat (limited to 'size-data-to-csv.py')
-rwxr-xr-xsize-data-to-csv.py83
1 files changed, 38 insertions, 45 deletions
diff --git a/size-data-to-csv.py b/size-data-to-csv.py
index 12a3b3f..f6637cf 100755
--- a/size-data-to-csv.py
+++ b/size-data-to-csv.py
@@ -5,6 +5,7 @@ import csv
import subprocess
import os
import metric_utils
+import argparse
"""
Parse output of nm -S -td."""
@@ -44,68 +45,60 @@ def get_symbol_size(symbol_to_size, perf_bmk_symbols, exe_path):
return symbol_to_size
-class Executable:
- def __init__(self, name, path):
- self.name = name
- self.path = path
+def calculate_size(exe_path):
+ return int(subprocess.check_output ("size {0} | tail -n1 | cut -f1".format(exe_path), shell=True).decode('utf-8').strip())
- def calculate_size(self):
- return int(subprocess.check_output ("size {0} | tail -n1 | cut -f1".format(self.path), shell=True).decode('utf-8').strip())
+"""
+Parse command line args. """
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-b", "--benchmark", dest="benchmark", type=str, required=True,
+ help="Benchmark name (spec, coremark)")
+ parser.add_argument("-r", "--results_dir", dest="results_dir", type=str, required=True,
+ help="Path to results dir.")
+ parser.add_argument("-o", "--out", dest="output_csv", type=str, required=True,
+ help="Output csv file")
+ parser.add_argument("-p", "--profile", dest="profile_csv", type=str, default="",
+ help="Performance profile")
+
+ args = parser.parse_args()
+ if args.benchmark == "spec" and args.profile_csv == "":
+ print("Error: SPEC needs to be provided with profile csv.")
+ exit(1)
+ return args
def main():
- bmk = sys.argv[1]
- results_dir = sys.argv[2]
- out_csv_file = sys.argv[3]
+ args = parse_args()
+ bmk_name = args.benchmark
+ results_dir = args.results_dir
+ out_csv_file = args.output_csv
+ perf_csv = args.profile_csv
outf = open(out_csv_file, "w")
csvwriter = csv.writer(outf)
csvwriter.writerow(("benchmark", "symbol", "size"))
- if bmk == "coremark":
- sizes = glob.glob("{0}/*/size.txt".format(results_dir), recursive=True)
- assert len(sizes) == 1
- size_txt = sizes[0]
- textsize = -1
- with open(size_txt, "r") as fp:
- lines = fp.readlines()
- for line in lines:
- if line.startswith(".text"):
- textsize = int(line.split()[1])
- break
- csvwriter.writerow(("coremark", "coremark", textsize))
- return 0
-
- perf_csv = sys.argv[4]
- benchmarks = metric_utils.get_benchmarks_from_results_dir(results_dir)
+ benchmarks = metric_utils.get_benchmarks_from_results_dir(bmk_name, results_dir)
for bmk in benchmarks:
- exe_path = bmk.exe_path
- # Skip calculating size, if exe is not found.
- if exe_path is None:
- csvwriter.writerow((bmk.name, bmk.exe_name, 0))
- continue
-
- # exes holds the main exe and libs used by benchmark.
- exes = []
- exes.append(Executable(bmk.exe_name, exe_path))
-
- libs = bmk.libs
- for libname in libs.keys():
- exes.append(Executable(libname, libs[libname]))
+ exe = bmk.exe
# Get symbols from perf.csv. We ensure that size for all symbols from
# perf.csv is calculcated.
perf_bmk_symbols = metric_utils.get_bmk_symbols_from_perf(perf_csv, bmk.name)
-
+
# FIXME: For now, accumulate size for symbols with same name across main exe and libraries.
# So sym_to_size now contains symbols across the main exe and libs, with
# one entry for common symbols.
- sym_to_size = {}
- for exe in exes:
- sym_to_size = get_symbol_size(sym_to_size, perf_bmk_symbols, exe.path)
+ sym_to_size = get_symbol_size({}, perf_bmk_symbols, exe.path)
+ libs = exe.libs
+ for libname in libs:
+ sym_to_size = get_symbol_size(sym_to_size, perf_bmk_symbols, libs[libname])
# Write entry for libs: bmk, libname, total lib size
- for exe in exes:
- csvwriter.writerow((bmk.name, exe.name, exe.calculate_size()))
+ csvwriter.writerow((bmk.name, exe.name, calculate_size(exe.path)))
+ libs = exe.libs
+ for libname in libs:
+ csvwriter.writerow((bmk.name, libname, calculate_size(libs[libname])))
# Write entry for indiviudal symbols
for symbol in sym_to_size.keys():
@@ -116,4 +109,4 @@ def main():
return 0
if __name__ == "__main__":
- exit(main())
+ exit(main()) \ No newline at end of file