summaryrefslogtreecommitdiff
path: root/notify
diff options
context:
space:
mode:
authorTCWG BuildSlave <tcwg-buildslave@linaro.org>2023-10-05 14:44:45 +0000
committerTCWG BuildSlave <tcwg-buildslave@linaro.org>2023-10-05 14:45:32 +0000
commit81a4cde9cc65392841de715e4cc622475175d498 (patch)
treedee6cadad0d8f4f23383ab3a8134385c710b6358 /notify
parent20bdf641b2c7ee9e50781202282c73b2ada7294b (diff)
onsuccess: #30: 1: [TCWG CI] https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/30/
Results : | # reset_artifacts: | -10 | # build_bmk_llvm: | -3 | # benchmark -- -O2_marm: | 1 check_regression status : 0
Diffstat (limited to 'notify')
-rw-r--r--notify/exe.regression2
-rw-r--r--notify/extra-bisect-params2
-rw-r--r--notify/jira/comment-template.txt4
-rw-r--r--notify/mail-body.txt16
-rw-r--r--notify/mail-recipients.txt2
-rw-r--r--notify/mail-subject.txt2
-rw-r--r--notify/output-bmk-results.log294
-rw-r--r--notify/results.regressions2
8 files changed, 223 insertions, 101 deletions
diff --git a/notify/exe.regression b/notify/exe.regression
deleted file mode 100644
index e2fb4f0..0000000
--- a/notify/exe.regression
+++ /dev/null
@@ -1,2 +0,0 @@
--1,523.xalancbmk_r,cpuxalan_r_base.default,523.xalancbmk_r failed to run,523.xalancbmk_r failed to run
--1,519.lbm_r,lbm_r_base.default,519.lbm_r failed to run,519.lbm_r failed to run
diff --git a/notify/extra-bisect-params b/notify/extra-bisect-params
index dc96b7f..fa6c7c9 100644
--- a/notify/extra-bisect-params
+++ b/notify/extra-bisect-params
@@ -1 +1 @@
-extra_build_params=++benchmarks 519.lbm_r ++benchmarks 523.xalancbmk_r
+extra_build_params=
diff --git a/notify/jira/comment-template.txt b/notify/jira/comment-template.txt
index 135467a..7484411 100644
--- a/notify/jira/comment-template.txt
+++ b/notify/jira/comment-template.txt
@@ -1,3 +1,3 @@
[LLVM-651]
-519.lbm_r failed to run
-Details: https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/29/artifact/artifacts/notify/mail-body.txt/*view*/
+No change
+Details: https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/30/artifact/artifacts/notify/mail-body.txt/*view*/
diff --git a/notify/mail-body.txt b/notify/mail-body.txt
index 3f157a2..27cbc73 100644
--- a/notify/mail-body.txt
+++ b/notify/mail-body.txt
@@ -2,11 +2,15 @@ Dear contributor, our automatic CI has detected problems related to your patch(e
In CI config tcwg_bmk-code_speed-cpu2017rate/llvm-arm-master-O2 after:
- | baseline build
+ | 964 commits in llvm
+ | 3db7d0dffb98 [MachineFunction][DebugInfo][nfc] Introduce EntryValue variable kind
+ | 4d9c936a3e1a [libc++] Adjust tests using ext/* headers that undefine __DEPRECATED
+ | 7cc57c07e36f [AArch64] Handle vector with two different values with efficient vector mask
+ | eea5d9cc4188 [libc][benchmark] Do not force static linking
+ | b51e6bfcb674 [libc] Allows cross compilation of membenchmarks
+ | ... and 959 more commits in llvm
-the following benchmarks slowed down by more than 3%:
-- 523.xalancbmk_r failed to run
-- 519.lbm_r failed to run
+No change
Below reproducer instructions can be used to re-build both "first_bad" and "last_good" cross-toolchains used in this bisection. Naturally, the scripts will fail when triggerring benchmarking jobs if you don\'t have access to Linaro TCWG CI.
@@ -23,6 +27,6 @@ This benchmarking CI is work-in-progress, and we welcome feedback and suggestion
-----------------8<--------------------------8<--------------------------8<--------------------------
The information below can be used to reproduce a debug environment:
-Current build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/29/artifact/artifacts
-Reference build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/27/artifact/artifacts
+Current build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/30/artifact/artifacts
+Reference build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/29/artifact/artifacts
diff --git a/notify/mail-recipients.txt b/notify/mail-recipients.txt
index aa219ef..56b65bb 100644
--- a/notify/mail-recipients.txt
+++ b/notify/mail-recipients.txt
@@ -1 +1 @@
-bcc:tcwg-validation@linaro.org
+bcc:tcwg-validation@linaro.org,maxim.kuvyrkov@linaro.org
diff --git a/notify/mail-subject.txt b/notify/mail-subject.txt
index 481d6d8..a2c0392 100644
--- a/notify/mail-subject.txt
+++ b/notify/mail-subject.txt
@@ -1 +1 @@
-[Linaro-TCWG-CI] baseline build: 519.lbm_r failed to run
+[Linaro-TCWG-CI] 964 commits in llvm: No change
diff --git a/notify/output-bmk-results.log b/notify/output-bmk-results.log
index 0099602..8966a7a 100644
--- a/notify/output-bmk-results.log
+++ b/notify/output-bmk-results.log
@@ -179,6 +179,72 @@ output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mod
output-bmk-results.py(57): if specific_variability is None:
output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(62): return np.nan
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.regression : 508.namd_r,namd_r_base.default : sample=0% (threshold=3%)
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_regression
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(62): return np.nan
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.regression : 510.parest_r,parest_r_base.default : sample=0% (threshold=3%)
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_regression
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(62): return np.nan
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.regression : 511.povray_r,povray_r_base.default : sample=0% (threshold=3%)
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_regression
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
output-bmk-results.py(83): return np.nan
@@ -188,40 +254,34 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 519.lbm_r,lbm_r_base.default : sample=-2147483547% (threshold=3%)
+DEBUG: checking exe.regression : 519.lbm_r,lbm_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
output-bmk-results.py(176): return (result - 100 > threshold)
-output-bmk-results.py(227): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
- --- modulename: output-bmk-results, funcname: get_short_long_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(114): rel_value = row["rel_" + metric]
-output-bmk-results.py(115): prev_value = row[metric + "_x"]
-output-bmk-results.py(116): curr_value = row[metric + "_y"]
-output-bmk-results.py(118): if metric == "sample":
-output-bmk-results.py(119): if curr_value == 999999999:
-output-bmk-results.py(122): elif curr_value == 888888888:
-output-bmk-results.py(123): short_diag = "{0} failed to run".format(bmk)
-output-bmk-results.py(124): return -1, short_diag, short_diag
-output-bmk-results.py(231): if metric == "sample" \
-output-bmk-results.py(232): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
-output-bmk-results.py(233): and row['symbol_md5sum_x'] != "-1" \
-output-bmk-results.py(238): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
-DEBUG: *** 519.lbm_r,lbm_r_base.default : 519.lbm_r failed to run
-output-bmk-results.py(240): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
- --- modulename: output-bmk-results, funcname: write_csv
-output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
-output-bmk-results.py(43): self.csvwriter.writerow(arr)
-output-bmk-results.py(241): if change_kind == "regression":
-output-bmk-results.py(242): f_regr.write("# {0},{1}\n".format(row["symbol"], long_diag))
- --- modulename: output-bmk-results, funcname: write
-output-bmk-results.py(36): if not self.predicate or not self.outf:
-output-bmk-results.py(38): self.outf.write(string)
-output-bmk-results.py(243): f_ebp.write("++benchmarks {0} ".format(row["benchmark"]))
- --- modulename: output-bmk-results, funcname: write
-output-bmk-results.py(36): if not self.predicate or not self.outf:
-output-bmk-results.py(38): self.outf.write(string)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(62): return np.nan
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.regression : 520.omnetpp_r,omnetpp_r_base.default : sample=0% (threshold=3%)
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_regression
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
output-bmk-results.py(216): for index, row in out_df.iterrows():
output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
@@ -240,40 +300,12 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 523.xalancbmk_r,cpuxalan_r_base.default : sample=-34885649% (threshold=3%)
+DEBUG: checking exe.regression : 523.xalancbmk_r,cpuxalan_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
output-bmk-results.py(176): return (result - 100 > threshold)
-output-bmk-results.py(227): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
- --- modulename: output-bmk-results, funcname: get_short_long_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(114): rel_value = row["rel_" + metric]
-output-bmk-results.py(115): prev_value = row[metric + "_x"]
-output-bmk-results.py(116): curr_value = row[metric + "_y"]
-output-bmk-results.py(118): if metric == "sample":
-output-bmk-results.py(119): if curr_value == 999999999:
-output-bmk-results.py(122): elif curr_value == 888888888:
-output-bmk-results.py(123): short_diag = "{0} failed to run".format(bmk)
-output-bmk-results.py(124): return -1, short_diag, short_diag
-output-bmk-results.py(231): if metric == "sample" \
-output-bmk-results.py(232): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
-output-bmk-results.py(233): and row['symbol_md5sum_x'] != "-1" \
-output-bmk-results.py(238): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
-DEBUG: *** 523.xalancbmk_r,cpuxalan_r_base.default : 523.xalancbmk_r failed to run
-output-bmk-results.py(240): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
- --- modulename: output-bmk-results, funcname: write_csv
-output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
-output-bmk-results.py(43): self.csvwriter.writerow(arr)
-output-bmk-results.py(241): if change_kind == "regression":
-output-bmk-results.py(242): f_regr.write("# {0},{1}\n".format(row["symbol"], long_diag))
- --- modulename: output-bmk-results, funcname: write
-output-bmk-results.py(36): if not self.predicate or not self.outf:
-output-bmk-results.py(38): self.outf.write(string)
-output-bmk-results.py(243): f_ebp.write("++benchmarks {0} ".format(row["benchmark"]))
- --- modulename: output-bmk-results, funcname: write
-output-bmk-results.py(36): if not self.predicate or not self.outf:
-output-bmk-results.py(38): self.outf.write(string)
+output-bmk-results.py(225): continue
output-bmk-results.py(216): for index, row in out_df.iterrows():
output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
@@ -339,7 +371,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 531.deepsjeng_r,deepsjeng_r_base.default : sample=0% (threshold=3%)
+DEBUG: checking exe.regression : 531.deepsjeng_r,deepsjeng_r_base.default : sample=-1% (threshold=3%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
@@ -444,6 +476,7 @@ output-bmk-results.py(245): f_out.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
+output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(263): output_bmk_results_1(exe_df, "exe", "improvement", None, f_skip, None, run_step_artifacts, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_1
output-bmk-results.py(210): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
@@ -535,6 +568,72 @@ output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mod
output-bmk-results.py(57): if specific_variability is None:
output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(62): return np.nan
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.improvement : 508.namd_r,namd_r_base.default : sample=0% (threshold=3%)
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_improvement
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(62): return np.nan
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.improvement : 510.parest_r,parest_r_base.default : sample=0% (threshold=3%)
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_improvement
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(62): return np.nan
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.improvement : 511.povray_r,povray_r_base.default : sample=0% (threshold=3%)
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_improvement
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
output-bmk-results.py(83): return np.nan
@@ -544,7 +643,29 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.improvement : 519.lbm_r,lbm_r_base.default : sample=-2147483547% (threshold=3%)
+DEBUG: checking exe.improvement : 519.lbm_r,lbm_r_base.default : sample=0% (threshold=3%)
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_improvement
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(62): return np.nan
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.improvement : 520.omnetpp_r,omnetpp_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -568,7 +689,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.improvement : 523.xalancbmk_r,cpuxalan_r_base.default : sample=-34885649% (threshold=3%)
+DEBUG: checking exe.improvement : 523.xalancbmk_r,cpuxalan_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -639,7 +760,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.improvement : 531.deepsjeng_r,deepsjeng_r_base.default : sample=0% (threshold=3%)
+DEBUG: checking exe.improvement : 531.deepsjeng_r,deepsjeng_r_base.default : sample=-1% (threshold=3%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -777,7 +898,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] primal_bea_mpp : sample=0% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] primal_bea_mpp : sample=-1% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
@@ -804,7 +925,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] price_out_impl : sample=-1% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] price_out_impl : sample=3% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
@@ -831,7 +952,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] cost_compare : sample=0% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] cost_compare : sample=-1% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
@@ -858,7 +979,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] replace_weaker_arc : sample=1% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] replace_weaker_arc : sample=-2% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
@@ -885,7 +1006,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=5% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-3% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
@@ -912,7 +1033,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=3% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=-5% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
@@ -939,7 +1060,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z4makeP7state_ti : sample=-3% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z15FindFirstRemovePy : sample=-10% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
@@ -966,7 +1087,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=-3% (threshold=15.72%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z4makeP7state_ti : sample=5% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
@@ -993,7 +1114,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_mf_bt4_find : sample=0% (threshold=15%)
+DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_mf_bt4_find : sample=-1% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
@@ -1020,7 +1141,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=-2% (threshold=15%)
+DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=0% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
@@ -1047,7 +1168,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_mf_bt4_skip : sample=-1% (threshold=15%)
+DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_mf_bt4_skip : sample=2% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
@@ -1092,7 +1213,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] primal_bea_mpp : sample=0% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] primal_bea_mpp : sample=-1% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -1119,7 +1240,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] price_out_impl : sample=-1% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] price_out_impl : sample=3% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -1146,7 +1267,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] cost_compare : sample=0% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] cost_compare : sample=-1% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -1173,7 +1294,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] replace_weaker_arc : sample=1% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] replace_weaker_arc : sample=-2% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -1200,7 +1321,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=5% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-3% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -1227,7 +1348,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=3% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=-5% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -1254,7 +1375,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z4makeP7state_ti : sample=-3% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z15FindFirstRemovePy : sample=-10% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -1281,7 +1402,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=-3% (threshold=15.72%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z4makeP7state_ti : sample=5% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -1308,7 +1429,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_mf_bt4_find : sample=0% (threshold=15%)
+DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_mf_bt4_find : sample=-1% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -1335,7 +1456,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=-2% (threshold=15%)
+DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=0% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -1362,7 +1483,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_mf_bt4_skip : sample=-1% (threshold=15%)
+DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_mf_bt4_skip : sample=2% (threshold=15%)
output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
@@ -1390,6 +1511,7 @@ output-bmk-results.py(271): f_regr.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
+output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(272): f_ebp.close()
--- modulename: output-bmk-results, funcname: close
output-bmk-results.py(29): if not self.outf:
diff --git a/notify/results.regressions b/notify/results.regressions
deleted file mode 100644
index 131e104..0000000
--- a/notify/results.regressions
+++ /dev/null
@@ -1,2 +0,0 @@
-# lbm_r_base.default,519.lbm_r failed to run
-# cpuxalan_r_base.default,523.xalancbmk_r failed to run