summaryrefslogtreecommitdiff
path: root/notify
diff options
context:
space:
mode:
authorTCWG BuildSlave <tcwg-buildslave@linaro.org>2023-11-22 23:46:37 +0000
committerTCWG BuildSlave <tcwg-buildslave@linaro.org>2023-11-22 23:47:13 +0000
commitfce55b2581bb52653f54880fff5f4aaa346be0d3 (patch)
tree7602258a3dca03076f31e492562a0bf161ce7373 /notify
parent7167e8887de4b148e09752c76d68b053626d4bd7 (diff)
onsuccess: #25: 1: [TCWG CI] https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/25/
Results : | # reset_artifacts: | -10 | # build_bmk_llvm: | -3 | # benchmark -- -O2_marm: | 1 check_regression status : 0
Diffstat (limited to 'notify')
-rw-r--r--notify/jira/comment-template.txt2
-rw-r--r--notify/lnt_report.json46
-rw-r--r--notify/mail-body.txt18
-rw-r--r--notify/mail-subject.txt2
-rw-r--r--notify/output-bmk-results.log127
-rw-r--r--notify/status.improvement1
6 files changed, 113 insertions, 83 deletions
diff --git a/notify/jira/comment-template.txt b/notify/jira/comment-template.txt
index f450703..2ba8bd0 100644
--- a/notify/jira/comment-template.txt
+++ b/notify/jira/comment-template.txt
@@ -1,3 +1,3 @@
[LLVM-651]
No change
-Details: https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/24/artifact/artifacts/notify/mail-body.txt/*view*/
+Details: https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/25/artifact/artifacts/notify/mail-body.txt/*view*/
diff --git a/notify/lnt_report.json b/notify/lnt_report.json
index 2433925..c889262 100644
--- a/notify/lnt_report.json
+++ b/notify/lnt_report.json
@@ -6,47 +6,47 @@
"Run": {
"Info": {
"__report_version__": "1",
- "run_order": "llvmorg-17-init-09356-g815eab2d3cbb",
+ "run_order": "llvmorg-17-init-09924-g52882de0e641",
"tag": "tcwg_bmk-code_speed-cpu2017rate"
},
- "Start Time": "2023-11-22 23:38:36"
+ "Start Time": "2023-11-22 23:46:31"
},
"Tests": [
{
"Data": [
- 192787
+ 91568
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.544.nab_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.531.deepsjeng_r.code_size"
}
,
{
"Data": [
- 29232
+ 3879428
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.505.mcf_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.523.xalancbmk_r.code_size"
}
,
{
"Data": [
- 1719248
+ 29232
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.538.imagick_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.505.mcf_r.code_size"
}
,
{
"Data": [
- 135865
+ 13056
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.541.leela_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.519.lbm_r.code_size"
}
,
{
"Data": [
- 177927
+ 177991
],
"Info": {},
"Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.code_size"
@@ -54,31 +54,31 @@
,
{
"Data": [
- 91600
+ 135917
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.531.deepsjeng_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.541.leela_r.code_size"
}
,
{
"Data": [
- 3879556
+ 1
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.523.xalancbmk_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.519.lbm_r.exec"
}
,
{
"Data": [
- 13056
+ 10156
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.519.lbm_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.exec"
}
,
{
"Data": [
- 10286
+ 10303
],
"Info": {},
"Name": "tcwg_bmk-code_speed-cpu2017rate.531.deepsjeng_r.exec"
@@ -86,18 +86,10 @@
,
{
"Data": [
- 14010
+ 14047
],
"Info": {},
"Name": "tcwg_bmk-code_speed-cpu2017rate.505.mcf_r.exec"
}
- ,
- {
- "Data": [
- 10148
- ],
- "Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.exec"
- }
]
}
diff --git a/notify/mail-body.txt b/notify/mail-body.txt
index 32ba936..06c2405 100644
--- a/notify/mail-body.txt
+++ b/notify/mail-body.txt
@@ -2,13 +2,13 @@ Dear contributor, our automatic CI has detected problems related to your patch(e
In CI config tcwg_bmk-code_speed-cpu2017rate/llvm-arm-master-O2 after:
- | 466 commits in llvm
- | 815eab2d3cbb [DebugLocEntry][nfc] Remove redundant cast
- | fea8c073561f [Support][Parallel] Add sequential mode to TaskGroup::spawn().
- | 329bfcc8df47 [LangRef] Improve wording DW_OP_LLVM_entry_value
- | e23891a3823e [AMDGPU][Disassembler] Fix a spurious error message in an instruction comment.
- | c08dc8b5fbd0 [AMDGPU][Disassembler] Pre-commit a test on an error message generated in an instruction comment.
- | ... and 461 more commits in llvm
+ | 568 commits in llvm
+ | 52882de0e641 [lldb][test] TestCPP20Standard.py: make it a libc++ test
+ | e15d6b520e1e [lldb][DWARFExpression] Fix DW_OP_div to use signed division
+ | 2cdb6b84c157 [clang][dataflow] Expose DataflowAnalysisContext from DataflowEnvironment.
+ | 084ca632ac81 [EarlyCSE] Only combine metadata for load CSE
+ | a67a21bf41da [EarlyCSE] Add additional metadata preservation test (NFC)
+ | ... and 563 more commits in llvm
No change
@@ -28,6 +28,6 @@ This benchmarking CI is work-in-progress, and we welcome feedback and suggestion
-----------------8<--------------------------8<--------------------------8<--------------------------
The information below can be used to reproduce a debug environment:
-Current build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/24/artifact/artifacts
-Reference build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/23/artifact/artifacts
+Current build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/25/artifact/artifacts
+Reference build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O2-build/24/artifact/artifacts
diff --git a/notify/mail-subject.txt b/notify/mail-subject.txt
index dacb113..e006f2a 100644
--- a/notify/mail-subject.txt
+++ b/notify/mail-subject.txt
@@ -1 +1 @@
-[Linaro-TCWG-CI] 466 commits in llvm: No change on arm O2
+[Linaro-TCWG-CI] 568 commits in llvm: No change on arm O2
diff --git a/notify/output-bmk-results.log b/notify/output-bmk-results.log
index 5a78195..2312872 100644
--- a/notify/output-bmk-results.log
+++ b/notify/output-bmk-results.log
@@ -109,18 +109,19 @@ output-bmk-results.py(258): print(results_df)
7 508.namd_r namd_r_base.default ... failed-to-run failed-to-run
8 510.parest_r parest_r_base.default ... failed-to-run failed-to-run
9 511.povray_r povray_r_base.default ... failed-to-run failed-to-run
-10 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
-11 520.omnetpp_r omnetpp_r_base.default ... failed-to-run failed-to-run
-12 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-13 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
-14 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
-15 531.deepsjeng_r deepsjeng_r_base.default ... success success
-20 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
-21 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-22 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
-23 557.xz_r xz_r_base.default ... success success
+10 519.lbm_r lbm_r_base.default ... failed-to-run success
+12 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
+13 520.omnetpp_r omnetpp_r_base.default ... failed-to-run failed-to-run
+14 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+15 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
+16 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
+17 531.deepsjeng_r deepsjeng_r_base.default ... success success
+22 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
+23 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+24 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
+25 557.xz_r xz_r_base.default ... success success
-[16 rows x 20 columns]
+[17 rows x 20 columns]
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -215,6 +216,21 @@ output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["s
output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
+output-bmk-results.py(128): short_diag = "{0} run now OK".format(bmk)
+output-bmk-results.py(129): classif="improvement"
+output-bmk-results.py(134): return classif, short_diag
+output-bmk-results.py(265): if classif != change_kind:
+output-bmk-results.py(266): continue;
+output-bmk-results.py(261): for index, row in results_df.iterrows():
+output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
+ --- modulename: output-bmk-results, funcname: get_status_diag
+output-bmk-results.py(113): bmk = row["benchmark"]
+output-bmk-results.py(115): short_diag=""
+output-bmk-results.py(116): classif=""
+output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
+output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
+output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
+output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
output-bmk-results.py(134): return classif, short_diag
output-bmk-results.py(265): if classif != change_kind:
@@ -369,18 +385,19 @@ output-bmk-results.py(258): print(results_df)
7 508.namd_r namd_r_base.default ... failed-to-run failed-to-run
8 510.parest_r parest_r_base.default ... failed-to-run failed-to-run
9 511.povray_r povray_r_base.default ... failed-to-run failed-to-run
-10 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
-11 520.omnetpp_r omnetpp_r_base.default ... failed-to-run failed-to-run
-12 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-13 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
-14 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
-15 531.deepsjeng_r deepsjeng_r_base.default ... success success
-20 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
-21 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-22 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
-23 557.xz_r xz_r_base.default ... success success
+10 519.lbm_r lbm_r_base.default ... failed-to-run success
+12 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
+13 520.omnetpp_r omnetpp_r_base.default ... failed-to-run failed-to-run
+14 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+15 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
+16 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
+17 531.deepsjeng_r deepsjeng_r_base.default ... success success
+22 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
+23 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+24 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
+25 557.xz_r xz_r_base.default ... success success
-[16 rows x 20 columns]
+[17 rows x 20 columns]
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -475,6 +492,27 @@ output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["s
output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
+output-bmk-results.py(128): short_diag = "{0} run now OK".format(bmk)
+output-bmk-results.py(129): classif="improvement"
+output-bmk-results.py(134): return classif, short_diag
+output-bmk-results.py(265): if classif != change_kind:
+output-bmk-results.py(268): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], short_diag))
+DEBUG: *** 519.lbm_r,lbm_r_base.default : 519.lbm_r run now OK
+output-bmk-results.py(270): f_out.write_csv((100, row["benchmark"], row["symbol"], short_diag, short_diag))
+ --- modulename: output-bmk-results, funcname: write_csv
+output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
+output-bmk-results.py(43): self.csvwriter.writerow(arr)
+output-bmk-results.py(271): if change_kind == "regression":
+output-bmk-results.py(261): for index, row in results_df.iterrows():
+output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
+ --- modulename: output-bmk-results, funcname: get_status_diag
+output-bmk-results.py(113): bmk = row["benchmark"]
+output-bmk-results.py(115): short_diag=""
+output-bmk-results.py(116): classif=""
+output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
+output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
+output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
+output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
output-bmk-results.py(134): return classif, short_diag
output-bmk-results.py(265): if classif != change_kind:
@@ -611,7 +649,6 @@ output-bmk-results.py(275): f_out.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
-output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(297): output_bmk_results_1(exe_df, "exe", "regression", f_regr, f_skip, f_ebp, run_step_artifacts, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_1
output-bmk-results.py(218): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
@@ -671,7 +708,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 531.deepsjeng_r,deepsjeng_r_base.default : sample=1% (threshold=3%)
+DEBUG: checking exe.regression : 531.deepsjeng_r,deepsjeng_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -770,7 +807,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.improvement : 531.deepsjeng_r,deepsjeng_r_base.default : sample=1% (threshold=3%)
+DEBUG: checking exe.improvement : 531.deepsjeng_r,deepsjeng_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -842,7 +879,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] primal_bea_mpp : sample=-1% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] primal_bea_mpp : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -869,7 +906,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] price_out_impl : sample=3% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] price_out_impl : sample=-2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -923,7 +960,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] replace_weaker_arc : sample=-4% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] replace_weaker_arc : sample=1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -950,7 +987,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-2% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=6% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -977,7 +1014,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=-4% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1004,7 +1041,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=2% (threshold=20.79%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z4makeP7state_ti : sample=-5% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1031,7 +1068,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z4makeP7state_ti : sample=9% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=-3% (threshold=18.93%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1058,7 +1095,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_mf_bt4_find : sample=-2% (threshold=15%)
+DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_mf_bt4_find : sample=1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1085,7 +1122,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=1% (threshold=15%)
+DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=-2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1112,7 +1149,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_mf_bt4_skip : sample=2% (threshold=15%)
+DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_mf_bt4_skip : sample=-4% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1157,7 +1194,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] primal_bea_mpp : sample=-1% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] primal_bea_mpp : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1184,7 +1221,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] price_out_impl : sample=3% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] price_out_impl : sample=-2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1238,7 +1275,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] replace_weaker_arc : sample=-4% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] replace_weaker_arc : sample=1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1265,7 +1302,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-2% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=6% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1292,7 +1329,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=-4% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1319,7 +1356,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=2% (threshold=20.79%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z4makeP7state_ti : sample=-5% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1346,7 +1383,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z4makeP7state_ti : sample=9% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=-3% (threshold=18.93%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1373,7 +1410,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_mf_bt4_find : sample=-2% (threshold=15%)
+DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_mf_bt4_find : sample=1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1400,7 +1437,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=1% (threshold=15%)
+DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=-2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1427,7 +1464,7 @@ output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_mf_bt4_skip : sample=2% (threshold=15%)
+DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_mf_bt4_skip : sample=-4% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
diff --git a/notify/status.improvement b/notify/status.improvement
new file mode 100644
index 0000000..12aa99d
--- /dev/null
+++ b/notify/status.improvement
@@ -0,0 +1 @@
+100,519.lbm_r,lbm_r_base.default,519.lbm_r run now OK,519.lbm_r run now OK