summaryrefslogtreecommitdiff
path: root/notify/output-bmk-results.log
diff options
context:
space:
mode:
Diffstat (limited to 'notify/output-bmk-results.log')
-rw-r--r--notify/output-bmk-results.log462
1 files changed, 264 insertions, 198 deletions
diff --git a/notify/output-bmk-results.log b/notify/output-bmk-results.log
index 53f8bbc..ba2aefa 100644
--- a/notify/output-bmk-results.log
+++ b/notify/output-bmk-results.log
@@ -109,19 +109,18 @@ output-bmk-results.py(258): print(results_df)
6 508.namd_r namd_r_base.default ... failed-to-run failed-to-run
7 510.parest_r parest_r_base.default ... failed-to-run failed-to-run
8 511.povray_r povray_r_base.default ... failed-to-run failed-to-run
-9 519.lbm_r lbm_r_base.default ... failed-to-run success
-10 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
-11 520.omnetpp_r omnetpp_r_base.default ... failed-to-run failed-to-run
-12 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-13 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
-14 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
-15 531.deepsjeng_r deepsjeng_r_base.default ... success success
-20 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
-21 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-22 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
-23 557.xz_r xz_r_base.default ... success success
+9 519.lbm_r lbm_r_base.default ... success failed-to-run
+10 520.omnetpp_r omnetpp_r_base.default ... success failed-to-run
+11 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+12 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
+13 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
+14 531.deepsjeng_r deepsjeng_r_base.default ... success success
+18 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
+19 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+20 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
+21 557.xz_r xz_r_base.default ... success success
-[17 rows x 20 columns]
+[16 rows x 20 columns]
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -214,27 +213,25 @@ output-bmk-results.py(115): short_diag=""
output-bmk-results.py(116): classif=""
output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(128): short_diag = "{0} run now OK".format(bmk)
-output-bmk-results.py(129): classif="improvement"
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
+output-bmk-results.py(122): short_diag = "{0} failed to run".format(bmk)
+output-bmk-results.py(123): classif="regression"
output-bmk-results.py(134): return classif, short_diag
output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
+output-bmk-results.py(268): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], short_diag))
+DEBUG: *** 519.lbm_r,lbm_r_base.default : 519.lbm_r failed to run
+output-bmk-results.py(270): f_out.write_csv((100, row["benchmark"], row["symbol"], short_diag, short_diag))
+ --- modulename: output-bmk-results, funcname: write_csv
+output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
+output-bmk-results.py(43): self.csvwriter.writerow(arr)
+output-bmk-results.py(271): if change_kind == "regression":
+output-bmk-results.py(272): f_regr.write("# {0},{1}\n".format(row["symbol"], short_diag))
+ --- modulename: output-bmk-results, funcname: write
+output-bmk-results.py(36): if not self.predicate or not self.outf:
+output-bmk-results.py(38): self.outf.write(string)
+output-bmk-results.py(273): f_ebp.write("++benchmarks {0} ".format(row["benchmark"]))
+ --- modulename: output-bmk-results, funcname: write
+output-bmk-results.py(36): if not self.predicate or not self.outf:
+output-bmk-results.py(38): self.outf.write(string)
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -243,12 +240,25 @@ output-bmk-results.py(115): short_diag=""
output-bmk-results.py(116): classif=""
output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
+output-bmk-results.py(122): short_diag = "{0} failed to run".format(bmk)
+output-bmk-results.py(123): classif="regression"
output-bmk-results.py(134): return classif, short_diag
output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
+output-bmk-results.py(268): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], short_diag))
+DEBUG: *** 520.omnetpp_r,omnetpp_r_base.default : 520.omnetpp_r failed to run
+output-bmk-results.py(270): f_out.write_csv((100, row["benchmark"], row["symbol"], short_diag, short_diag))
+ --- modulename: output-bmk-results, funcname: write_csv
+output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
+output-bmk-results.py(43): self.csvwriter.writerow(arr)
+output-bmk-results.py(271): if change_kind == "regression":
+output-bmk-results.py(272): f_regr.write("# {0},{1}\n".format(row["symbol"], short_diag))
+ --- modulename: output-bmk-results, funcname: write
+output-bmk-results.py(36): if not self.predicate or not self.outf:
+output-bmk-results.py(38): self.outf.write(string)
+output-bmk-results.py(273): f_ebp.write("++benchmarks {0} ".format(row["benchmark"]))
+ --- modulename: output-bmk-results, funcname: write
+output-bmk-results.py(36): if not self.predicate or not self.outf:
+output-bmk-results.py(38): self.outf.write(string)
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -367,7 +377,6 @@ output-bmk-results.py(275): f_out.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
-output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(294): output_bmk_results_status(exe_df, "improvement", None, None, run_step_artifacts, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_status
output-bmk-results.py(256): f_out = Outfile("{0}/status.{1}".format(run_step_artifacts, change_kind), "w", predicate=(details=="verbose"))
@@ -385,19 +394,18 @@ output-bmk-results.py(258): print(results_df)
6 508.namd_r namd_r_base.default ... failed-to-run failed-to-run
7 510.parest_r parest_r_base.default ... failed-to-run failed-to-run
8 511.povray_r povray_r_base.default ... failed-to-run failed-to-run
-9 519.lbm_r lbm_r_base.default ... failed-to-run success
-10 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
-11 520.omnetpp_r omnetpp_r_base.default ... failed-to-run failed-to-run
-12 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-13 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
-14 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
-15 531.deepsjeng_r deepsjeng_r_base.default ... success success
-20 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
-21 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-22 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
-23 557.xz_r xz_r_base.default ... success success
+9 519.lbm_r lbm_r_base.default ... success failed-to-run
+10 520.omnetpp_r omnetpp_r_base.default ... success failed-to-run
+11 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+12 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
+13 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
+14 531.deepsjeng_r deepsjeng_r_base.default ... success success
+18 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
+19 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+20 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
+21 557.xz_r xz_r_base.default ... success success
-[17 rows x 20 columns]
+[16 rows x 20 columns]
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -490,30 +498,8 @@ output-bmk-results.py(115): short_diag=""
output-bmk-results.py(116): classif=""
output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(128): short_diag = "{0} run now OK".format(bmk)
-output-bmk-results.py(129): classif="improvement"
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(268): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], short_diag))
-DEBUG: *** 519.lbm_r,lbm_r_base.default : 519.lbm_r run now OK
-output-bmk-results.py(270): f_out.write_csv((100, row["benchmark"], row["symbol"], short_diag, short_diag))
- --- modulename: output-bmk-results, funcname: write_csv
-output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
-output-bmk-results.py(43): self.csvwriter.writerow(arr)
-output-bmk-results.py(271): if change_kind == "regression":
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
+output-bmk-results.py(122): short_diag = "{0} failed to run".format(bmk)
+output-bmk-results.py(123): classif="regression"
output-bmk-results.py(134): return classif, short_diag
output-bmk-results.py(265): if classif != change_kind:
output-bmk-results.py(266): continue;
@@ -525,9 +511,8 @@ output-bmk-results.py(115): short_diag=""
output-bmk-results.py(116): classif=""
output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
+output-bmk-results.py(122): short_diag = "{0} failed to run".format(bmk)
+output-bmk-results.py(123): classif="regression"
output-bmk-results.py(134): return classif, short_diag
output-bmk-results.py(265): if classif != change_kind:
output-bmk-results.py(266): continue;
@@ -649,6 +634,7 @@ output-bmk-results.py(275): f_out.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
+output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(297): output_bmk_results_1(exe_df, "exe", "regression", f_regr, f_skip, f_ebp, run_step_artifacts, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_1
output-bmk-results.py(218): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
@@ -671,14 +657,39 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.regression : 505.mcf_r,mcf_r_base.default : sample=0% (threshold=3%)
+output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_regression
+output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(184): return (result - 100 > threshold)
+output-bmk-results.py(233): continue
+output-bmk-results.py(224): for index, row in out_df.iterrows():
+output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(62): return np.nan
output-bmk-results.py(100): if not np.isnan(spec_thr):
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 505.mcf_r,mcf_r_base.default : sample=0% (threshold=3%)
+DEBUG: checking exe.regression : 519.lbm_r,lbm_r_base.default : sample=200% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -695,14 +706,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 531.deepsjeng_r,deepsjeng_r_base.default : sample=-1% (threshold=3%)
+DEBUG: checking exe.regression : 531.deepsjeng_r,deepsjeng_r_base.default : sample=1% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -761,10 +775,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -783,16 +800,67 @@ output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mod
output-bmk-results.py(57): if specific_variability is None:
output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
output-bmk-results.py(61): if var.empty:
-output-bmk-results.py(63): elif len(var)>1:
-output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(62): return np.nan
output-bmk-results.py(100): if not np.isnan(spec_thr):
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.improvement : 531.deepsjeng_r,deepsjeng_r_base.default : sample=-1% (threshold=3%)
+DEBUG: checking exe.improvement : 519.lbm_r,lbm_r_base.default : sample=200% (threshold=3%)
+output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_improvement
+output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(193): return (100 - result > threshold)
+output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
+ --- modulename: output-bmk-results, funcname: get_short_long_diag
+output-bmk-results.py(137): bmk = row["benchmark"]
+output-bmk-results.py(139): rel_value = row["rel_" + metric]
+output-bmk-results.py(140): prev_value = row[metric + "_x"]
+output-bmk-results.py(141): curr_value = row[metric + "_y"]
+output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(152): suffix = ""
+output-bmk-results.py(153): if metric == "sample":
+output-bmk-results.py(154): prefix_regression = "slowed down by"
+output-bmk-results.py(155): prefix_improvement = "sped up by"
+output-bmk-results.py(156): suffix = "perf samples"
+output-bmk-results.py(167): if sym_type=="symbol":
+output-bmk-results.py(170): item=bmk
+output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
+output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
+output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
+output-bmk-results.py(239): if metric == "sample" \
+output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
+output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
+output-bmk-results.py(246): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
+DEBUG: *** 519.lbm_r,lbm_r_base.default : sped up by 200% - 519.lbm_r - from 1 to -1 perf samples
+output-bmk-results.py(248): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
+ --- modulename: output-bmk-results, funcname: write_csv
+output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
+output-bmk-results.py(43): self.csvwriter.writerow(arr)
+output-bmk-results.py(249): if change_kind == "regression":
+output-bmk-results.py(224): for index, row in out_df.iterrows():
+output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(63): elif len(var)>1:
+output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.improvement : 531.deepsjeng_r,deepsjeng_r_base.default : sample=1% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -828,7 +896,6 @@ output-bmk-results.py(253): f_out.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
-output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(300): output_bmk_results_1(sym_df, "symbol", "regression", f_regr, f_skip, f_ebp, run_step_artifacts, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_1
output-bmk-results.py(218): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
@@ -851,10 +918,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -875,14 +945,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] primal_bea_mpp : sample=-2% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] primal_bea_mpp : sample=1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -899,14 +972,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] cost_compare : sample=1% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] cost_compare : sample=-2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -923,14 +999,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-5% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -947,14 +1026,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=-3% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=6% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -971,14 +1053,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=-4% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -995,34 +1080,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z15FindFirstRemovePy : sample=3% (threshold=15%)
-output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
- --- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(184): return (result - 100 > threshold)
-output-bmk-results.py(233): continue
-output-bmk-results.py(224): for index, row in out_df.iterrows():
-output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
- --- modulename: output-bmk-results, funcname: get_threshold
-output-bmk-results.py(98): if metric == "sample":
-output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
- --- modulename: output-bmk-results, funcname: get_specific_thresholds
-output-bmk-results.py(57): if specific_variability is None:
-output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
-output-bmk-results.py(61): if var.empty:
-output-bmk-results.py(63): elif len(var)>1:
-output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
-output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -1050,7 +1114,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=2% (threshold=15%)
+DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1067,10 +1131,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -1109,10 +1176,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -1133,38 +1203,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
-output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] primal_bea_mpp : sample=-2% (threshold=15%)
-output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
- --- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(193): return (100 - result > threshold)
-output-bmk-results.py(233): continue
-output-bmk-results.py(224): for index, row in out_df.iterrows():
-output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
- --- modulename: output-bmk-results, funcname: get_threshold
-output-bmk-results.py(98): if metric == "sample":
-output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
- --- modulename: output-bmk-results, funcname: get_specific_thresholds
-output-bmk-results.py(57): if specific_variability is None:
-output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
-output-bmk-results.py(61): if var.empty:
-output-bmk-results.py(63): elif len(var)>1:
-output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] cost_compare : sample=1% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] primal_bea_mpp : sample=1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1181,14 +1230,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-5% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] cost_compare : sample=-2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1205,14 +1257,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=-3% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1229,14 +1284,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=-4% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=6% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1253,14 +1311,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z15FindFirstRemovePy : sample=3% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1277,10 +1338,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -1308,7 +1372,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=2% (threshold=15%)
+DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1325,10 +1389,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -1360,7 +1427,6 @@ output-bmk-results.py(306): f_regr.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
-output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(307): f_ebp.close()
--- modulename: output-bmk-results, funcname: close
output-bmk-results.py(29): if not self.outf: