summaryrefslogtreecommitdiff
path: root/notify/output-bmk-results.log
diff options
context:
space:
mode:
Diffstat (limited to 'notify/output-bmk-results.log')
-rw-r--r--notify/output-bmk-results.log226
1 files changed, 165 insertions, 61 deletions
diff --git a/notify/output-bmk-results.log b/notify/output-bmk-results.log
index eea9990..8ba5bd6 100644
--- a/notify/output-bmk-results.log
+++ b/notify/output-bmk-results.log
@@ -107,21 +107,51 @@ output-bmk-results.py(258): print(results_df)
1 502.gcc_r cpugcc_r_base.default ... failed-to-run failed-to-run
2 505.mcf_r mcf_r_base.default ... success success
4 508.namd_r namd_r_base.default ... failed-to-run failed-to-run
-5 519.lbm_r lbm_r_base.default ... -1 failed-to-run
-6 519.lbm_r lbm_r_base.default ... -1 failed-to-run
-7 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-8 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-9 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
-10 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
-11 531.deepsjeng_r deepsjeng_r_base.default ... success success
-13 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
-14 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
-15 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-16 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-18 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
-19 557.xz_r xz_r_base.default ... success success
+5 510.parest_r parest_r_base.default ... failed-to-run failed-to-run
+6 511.povray_r povray_r_base.default ... failed-to-run failed-to-run
+7 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
+8 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
+9 520.omnetpp_r omnetpp_r_base.default ... failed-to-run failed-to-run
+10 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+11 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+12 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
+13 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
+14 531.deepsjeng_r deepsjeng_r_base.default ... success success
+16 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
+17 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+18 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+20 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
+21 557.xz_r xz_r_base.default ... success success
-[17 rows x 20 columns]
+[19 rows x 20 columns]
+output-bmk-results.py(261): for index, row in results_df.iterrows():
+output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
+ --- modulename: output-bmk-results, funcname: get_status_diag
+output-bmk-results.py(113): bmk = row["benchmark"]
+output-bmk-results.py(115): short_diag=""
+output-bmk-results.py(116): classif=""
+output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
+output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
+output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
+output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
+output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
+output-bmk-results.py(134): return classif, short_diag
+output-bmk-results.py(265): if classif != change_kind:
+output-bmk-results.py(266): continue;
+output-bmk-results.py(261): for index, row in results_df.iterrows():
+output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
+ --- modulename: output-bmk-results, funcname: get_status_diag
+output-bmk-results.py(113): bmk = row["benchmark"]
+output-bmk-results.py(115): short_diag=""
+output-bmk-results.py(116): classif=""
+output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
+output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
+output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
+output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
+output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
+output-bmk-results.py(134): return classif, short_diag
+output-bmk-results.py(265): if classif != change_kind:
+output-bmk-results.py(266): continue;
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -382,21 +412,51 @@ output-bmk-results.py(258): print(results_df)
1 502.gcc_r cpugcc_r_base.default ... failed-to-run failed-to-run
2 505.mcf_r mcf_r_base.default ... success success
4 508.namd_r namd_r_base.default ... failed-to-run failed-to-run
-5 519.lbm_r lbm_r_base.default ... -1 failed-to-run
-6 519.lbm_r lbm_r_base.default ... -1 failed-to-run
-7 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-8 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-9 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
-10 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
-11 531.deepsjeng_r deepsjeng_r_base.default ... success success
-13 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
-14 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
-15 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-16 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-18 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
-19 557.xz_r xz_r_base.default ... success success
+5 510.parest_r parest_r_base.default ... failed-to-run failed-to-run
+6 511.povray_r povray_r_base.default ... failed-to-run failed-to-run
+7 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
+8 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
+9 520.omnetpp_r omnetpp_r_base.default ... failed-to-run failed-to-run
+10 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+11 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+12 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
+13 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
+14 531.deepsjeng_r deepsjeng_r_base.default ... success success
+16 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
+17 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+18 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+20 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
+21 557.xz_r xz_r_base.default ... success success
-[17 rows x 20 columns]
+[19 rows x 20 columns]
+output-bmk-results.py(261): for index, row in results_df.iterrows():
+output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
+ --- modulename: output-bmk-results, funcname: get_status_diag
+output-bmk-results.py(113): bmk = row["benchmark"]
+output-bmk-results.py(115): short_diag=""
+output-bmk-results.py(116): classif=""
+output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
+output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
+output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
+output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
+output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
+output-bmk-results.py(134): return classif, short_diag
+output-bmk-results.py(265): if classif != change_kind:
+output-bmk-results.py(266): continue;
+output-bmk-results.py(261): for index, row in results_df.iterrows():
+output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
+ --- modulename: output-bmk-results, funcname: get_status_diag
+output-bmk-results.py(113): bmk = row["benchmark"]
+output-bmk-results.py(115): short_diag=""
+output-bmk-results.py(116): classif=""
+output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
+output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
+output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
+output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
+output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
+output-bmk-results.py(134): return classif, short_diag
+output-bmk-results.py(265): if classif != change_kind:
+output-bmk-results.py(266): continue;
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -735,7 +795,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 523.xalancbmk_r,cpuxalan_r_base.default : sample=0% (threshold=3%)
+DEBUG: checking exe.regression : 523.xalancbmk_r,cpuxalan_r_base.default : sample=-1% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -757,7 +817,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 523.xalancbmk_r,cpuxalan_r_base.default : sample=1% (threshold=3%)
+DEBUG: checking exe.regression : 523.xalancbmk_r,cpuxalan_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -801,6 +861,28 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.regression : 538.imagick_r,imagick_r_base.default : sample=0% (threshold=3%)
+output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_regression
+output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(184): return (result - 100 > threshold)
+output-bmk-results.py(233): continue
+output-bmk-results.py(224): for index, row in out_df.iterrows():
+output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(62): return np.nan
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 541.leela_r,leela_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
@@ -951,7 +1033,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.improvement : 523.xalancbmk_r,cpuxalan_r_base.default : sample=0% (threshold=3%)
+DEBUG: checking exe.improvement : 523.xalancbmk_r,cpuxalan_r_base.default : sample=-1% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -973,7 +1055,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.improvement : 523.xalancbmk_r,cpuxalan_r_base.default : sample=1% (threshold=3%)
+DEBUG: checking exe.improvement : 523.xalancbmk_r,cpuxalan_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1017,6 +1099,28 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.improvement : 538.imagick_r,imagick_r_base.default : sample=0% (threshold=3%)
+output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_improvement
+output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(193): return (100 - result > threshold)
+output-bmk-results.py(233): continue
+output-bmk-results.py(224): for index, row in out_df.iterrows():
+output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(62): return np.nan
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 541.leela_r,leela_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
@@ -1123,12 +1227,37 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 538.imagick_r,libc.so.6 : sample=33% (threshold=15%)
+DEBUG: checking symbol.regression : 538.imagick_r,libc.so.6 : sample=-50% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
output-bmk-results.py(184): return (result - 100 > threshold)
-output-bmk-results.py(233): continue
+output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
+ --- modulename: output-bmk-results, funcname: get_short_long_diag
+output-bmk-results.py(137): bmk = row["benchmark"]
+output-bmk-results.py(139): rel_value = row["rel_" + metric]
+output-bmk-results.py(140): prev_value = row[metric + "_x"]
+output-bmk-results.py(141): curr_value = row[metric + "_y"]
+output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(152): suffix = ""
+output-bmk-results.py(153): if metric == "sample":
+output-bmk-results.py(154): prefix_regression = "slowed down by"
+output-bmk-results.py(155): prefix_improvement = "sped up by"
+output-bmk-results.py(156): suffix = "perf samples"
+output-bmk-results.py(167): if sym_type=="symbol":
+output-bmk-results.py(168): item=bmk+":"+row["symbol"]
+output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
+output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
+output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
+output-bmk-results.py(239): if metric == "sample" \
+output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
+output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
+output-bmk-results.py(242): and row['symbol_md5sum_x'] != "d41d8cd98f00b204e9800998ecf8427e":
+output-bmk-results.py(243): f_skip.write_csv((row["benchmark"], row["symbol"], short_diag, long_diag))
+ --- modulename: output-bmk-results, funcname: write_csv
+output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
+output-bmk-results.py(43): self.csvwriter.writerow(arr)
+output-bmk-results.py(244): continue
output-bmk-results.py(224): for index, row in out_df.iterrows():
output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
@@ -1207,37 +1336,12 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 538.imagick_r,libc.so.6 : sample=33% (threshold=15%)
+DEBUG: checking symbol.improvement : 538.imagick_r,libc.so.6 : sample=-50% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
output-bmk-results.py(193): return (100 - result > threshold)
-output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
- --- modulename: output-bmk-results, funcname: get_short_long_diag
-output-bmk-results.py(137): bmk = row["benchmark"]
-output-bmk-results.py(139): rel_value = row["rel_" + metric]
-output-bmk-results.py(140): prev_value = row[metric + "_x"]
-output-bmk-results.py(141): curr_value = row[metric + "_y"]
-output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(152): suffix = ""
-output-bmk-results.py(153): if metric == "sample":
-output-bmk-results.py(154): prefix_regression = "slowed down by"
-output-bmk-results.py(155): prefix_improvement = "sped up by"
-output-bmk-results.py(156): suffix = "perf samples"
-output-bmk-results.py(167): if sym_type=="symbol":
-output-bmk-results.py(168): item=bmk+":"+row["symbol"]
-output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
-output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
-output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
-output-bmk-results.py(239): if metric == "sample" \
-output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
-output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
-output-bmk-results.py(242): and row['symbol_md5sum_x'] != "d41d8cd98f00b204e9800998ecf8427e":
-output-bmk-results.py(243): f_skip.write_csv((row["benchmark"], row["symbol"], short_diag, long_diag))
- --- modulename: output-bmk-results, funcname: write_csv
-output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
-output-bmk-results.py(43): self.csvwriter.writerow(arr)
-output-bmk-results.py(244): continue
+output-bmk-results.py(233): continue
output-bmk-results.py(224): for index, row in out_df.iterrows():
output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold