summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArthur She <arthur.she@linaro.org>2015-11-02 23:08:10 -0800
committerArthur She <arthur.she@linaro.org>2015-11-02 23:08:10 -0800
commit35aaadf85b773be7d208c8d7dec253152caa13cf (patch)
treee583ea8229e2e19fbbc0f77e9d1b90d8980308db
parent58e98933c73c4bf98e738b239721fd8f58b919a5 (diff)
Complete LAVA:get_test_results()
But main program NOT modify yet.
-rwxr-xr-xart_post_script.py49
1 files changed, 37 insertions, 12 deletions
diff --git a/art_post_script.py b/art_post_script.py
index ffc3fba..65e6160 100755
--- a/art_post_script.py
+++ b/art_post_script.py
@@ -81,7 +81,7 @@ class ArtDb(object):
}
manifest = self.push_object(MANIFEST_ENDPOINT, params)
params = {
- "name": data['test_result']['test_name']
+ "name": data['test_result']['benchmark_name']
}
benchmark = self.push_object(BENCHMARK_ENDPOINT, params)
params = {
@@ -113,7 +113,7 @@ class ArtDb(object):
"benchmark": benchmark['id'],
"result": result['id']
}
- for t in data['test_result']['test_result']:
+ for t in data['test_result']['subscore']:
params.update(t)
# logger.info("Submit: %s" % str(params))
self.push_object(RESULTDATA_ENDPOINT, params)
@@ -201,10 +201,11 @@ class LAVA(object):
raise
def get_test_results(self, job_no):
+ test_result_list = []
test_result = { "board": "",
"board_config": "",
- "test_name": "",
- "test_result": [] }
+ "benchmark_name": "",
+ "subscore": [] }
try:
job_status = self.server.scheduler.job_status(job_no)
except:
@@ -230,25 +231,49 @@ class LAVA(object):
# Get test name and test result
if host['test_id'] == 'lava-android-benchmark-host':
# This is a 3rd. party benchmark test
- test_result['test_name'] = ast.literal_eval(src['test_params'])['TEST_NAME']
+ test_result['benchmark_name'] = ast.literal_eval(src['test_params'])['TEST_NAME']
# Get test results
for t in host['test_results']:
if 'measurement' in t:
test_case = { "name": t['test_case_id'],
"measurement" : t['measurement'] }
- test_result['test_result'].append(test_case)
+ test_result['subscore'].append(test_case)
+ test_result_list.append(test_result)
else:
# This is an art-microbenchmarks test
# The test name and test results are in the attachmented pkl file
# get test results for the attachment
+ test_mode = ast.literal_eval(src['test_params'])['MODE']
pkl_content = (a['content'] for a in host['attachments'] if a['pathname'].endswith('pkl')).next()
test_result_dict = pickle.loads(base64.b64decode(pkl_content))
- test_mode = ast.literal_eval(src['test_params'])['MODE']
-
- if not test_result['test_result']:
- raise InfoRequestError("No test cases in job #%s" % job_no)
-
- return test_result
+ # Key Format: benchmarks/micro/<BENCHMARK_NAME>.<SUBSCORE>
+ # Extract and unique them to form a benchmark name list
+ test_result_keys = list(bn.split('/')[-1].split('.')[0] for bn in test_result_dict.keys())
+ benchmark_list = list(set(test_result_keys))
+ for benchmark in benchmark_list:
+# print("Benchmark: %s" % benchmark)
+ test_result = {}
+ # benchmark iteration
+ test_result['benchmark_name'] = benchmark
+ test_result['subscore'] = []
+ key_word = "/%s." % benchmark
+ tests = ((k, test_result_dict[k]) for k in test_result_dict.keys() if k.find(key_word) > 0)
+ for test in tests:
+ # subscore iteration
+ subscore = "%s_%s" % (test[0].split('.')[-1], test_mode)
+# print("\tSubscore: %s" % subscore)
+# print("\tValues: %s" % str(test[1]))
+ for i in test[1]:
+ test_case = { "name": subscore,
+ "measurement": i }
+ test_result['subscore'].append(test_case)
+
+ test_result_list.append(test_result)
+
+# if not test_result['test_result']:
+# raise InfoRequestError("No test cases in job #%s" % job_no)
+
+ return test_result_list
if __name__ == '__main__':