summaryrefslogtreecommitdiff
path: root/notify
diff options
context:
space:
mode:
authorTCWG BuildSlave <tcwg-buildslave@linaro.org>2023-11-22 22:12:43 +0000
committerTCWG BuildSlave <tcwg-buildslave@linaro.org>2023-11-22 22:13:29 +0000
commit2fdfe7b4f1aee7dc8a7386f2f33f7f8a3b6bedf7 (patch)
treeaaa92283d6f4bedd2f2a7efc2de114d33e3167d7 /notify
parent98f8fe919dc4e94628e23000368441422e1d1e0a (diff)
force: #21: 1: [TCWG CI] https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/21/
Results : | # reset_artifacts: | -10 | # build_bmk_llvm: | -3 | # benchmark -- -O3_marm: | 1 | # lbm_r_base.default,519.lbm_r failed to run | # omnetpp_r_base.default,520.omnetpp_r failed to run check_regression status : 1
Diffstat (limited to 'notify')
-rw-r--r--notify/exe.improvement1
-rw-r--r--notify/extra-bisect-params2
-rw-r--r--notify/jira/comment-template.txt4
-rw-r--r--notify/lnt_report.json48
-rw-r--r--notify/mail-body.txt15
-rw-r--r--notify/mail-recipients.txt2
-rw-r--r--notify/mail-subject.txt2
-rw-r--r--notify/output-bmk-results.log462
-rw-r--r--notify/results.regressions2
-rw-r--r--notify/status.improvement1
-rw-r--r--notify/status.regression2
11 files changed, 295 insertions, 246 deletions
diff --git a/notify/exe.improvement b/notify/exe.improvement
new file mode 100644
index 0000000..f0e5a5a
--- /dev/null
+++ b/notify/exe.improvement
@@ -0,0 +1 @@
+200,519.lbm_r,lbm_r_base.default,sped up by 200% - 519.lbm_r,sped up by 200% - 519.lbm_r - from 1 to -1 perf samples
diff --git a/notify/extra-bisect-params b/notify/extra-bisect-params
index fa6c7c9..b13b8ee 100644
--- a/notify/extra-bisect-params
+++ b/notify/extra-bisect-params
@@ -1 +1 @@
-extra_build_params=
+extra_build_params=++benchmarks 519.lbm_r ++benchmarks 520.omnetpp_r
diff --git a/notify/jira/comment-template.txt b/notify/jira/comment-template.txt
index e5be918..6489e60 100644
--- a/notify/jira/comment-template.txt
+++ b/notify/jira/comment-template.txt
@@ -1,3 +1,3 @@
[LLVM-651]
-No change
-Details: https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/19/artifact/artifacts/notify/mail-body.txt/*view*/
+sped up by 200% - 519.lbm_r
+Details: https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/21/artifact/artifacts/notify/mail-body.txt/*view*/
diff --git a/notify/lnt_report.json b/notify/lnt_report.json
index 49551f1..99c5655 100644
--- a/notify/lnt_report.json
+++ b/notify/lnt_report.json
@@ -9,31 +9,15 @@
"run_order": "llvmorg-17-init-08842-g6bad76c7ae93",
"tag": "tcwg_bmk-code_speed-cpu2017rate"
},
- "Start Time": "2023-11-22 22:09:06"
+ "Start Time": "2023-11-22 22:12:41"
},
"Tests": [
{
"Data": [
- 1789888
- ],
- "Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.538.imagick_r.code_size"
- }
- ,
- {
- "Data": [
- 146737
- ],
- "Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.541.leela_r.code_size"
- }
- ,
- {
- "Data": [
- 13236
+ 3957824
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.519.lbm_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.523.xalancbmk_r.code_size"
}
,
{
@@ -46,47 +30,47 @@
,
{
"Data": [
- 100020
+ 30264
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.531.deepsjeng_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.505.mcf_r.code_size"
}
,
{
"Data": [
- 3957824
+ 13236
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.523.xalancbmk_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.519.lbm_r.code_size"
}
,
{
"Data": [
- 30264
+ 100020
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.505.mcf_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.531.deepsjeng_r.code_size"
}
,
{
"Data": [
- 1
+ 146737
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.519.lbm_r.exec"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.541.leela_r.code_size"
}
,
{
"Data": [
- 10332
+ 10831
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.exec"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.531.deepsjeng_r.exec"
}
,
{
"Data": [
- 13890
+ 13870
],
"Info": {},
"Name": "tcwg_bmk-code_speed-cpu2017rate.505.mcf_r.exec"
@@ -94,10 +78,10 @@
,
{
"Data": [
- 10940
+ 10341
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.531.deepsjeng_r.exec"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.exec"
}
]
}
diff --git a/notify/mail-body.txt b/notify/mail-body.txt
index a796ab1..cd2c143 100644
--- a/notify/mail-body.txt
+++ b/notify/mail-body.txt
@@ -2,15 +2,10 @@ Dear contributor, our automatic CI has detected problems related to your patch(e
In CI config tcwg_bmk-code_speed-cpu2017rate/llvm-arm-master-O3 after:
- | 848 commits in llvm
- | 6bad76c7ae93 [Demangle] fix windows tests
- | ba38640b9901 [libc] Added checks to src and dest types in bit_cast
- | 8704c3a31f60 [libc] Set minimum CUDA PTX feature to +ptx60
- | 3c9083f6757c Fix i1 vector reduction miscompilation
- | ed35b584daed [mlir][openacc] Restrict types for acc.bounds operands to index or integer type
- | ... and 843 more commits in llvm
+ | baseline build
-No change
+the following benchmarks speeds up by more than 3%:
+- sped up by 200% - 519.lbm_r - from 1 to -1 perf samples
The configuration of this build is:
Below reproducer instructions can be used to re-build both "first_bad" and "last_good" cross-toolchains used in this bisection. Naturally, the scripts will fail when triggerring benchmarking jobs if you don\'t have access to Linaro TCWG CI.
@@ -28,6 +23,6 @@ This benchmarking CI is work-in-progress, and we welcome feedback and suggestion
-----------------8<--------------------------8<--------------------------8<--------------------------
The information below can be used to reproduce a debug environment:
-Current build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/19/artifact/artifacts
-Reference build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/17/artifact/artifacts
+Current build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/21/artifact/artifacts
+Reference build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/19/artifact/artifacts
diff --git a/notify/mail-recipients.txt b/notify/mail-recipients.txt
index 56b65bb..aa219ef 100644
--- a/notify/mail-recipients.txt
+++ b/notify/mail-recipients.txt
@@ -1 +1 @@
-bcc:tcwg-validation@linaro.org,maxim.kuvyrkov@linaro.org
+bcc:tcwg-validation@linaro.org
diff --git a/notify/mail-subject.txt b/notify/mail-subject.txt
index 7c2072f..1429d29 100644
--- a/notify/mail-subject.txt
+++ b/notify/mail-subject.txt
@@ -1 +1 @@
-[Linaro-TCWG-CI] 848 commits in llvm: No change on arm O3
+[Linaro-TCWG-CI] baseline build: sped up by 200% - 519.lbm_r on arm O3
diff --git a/notify/output-bmk-results.log b/notify/output-bmk-results.log
index 53f8bbc..ba2aefa 100644
--- a/notify/output-bmk-results.log
+++ b/notify/output-bmk-results.log
@@ -109,19 +109,18 @@ output-bmk-results.py(258): print(results_df)
6 508.namd_r namd_r_base.default ... failed-to-run failed-to-run
7 510.parest_r parest_r_base.default ... failed-to-run failed-to-run
8 511.povray_r povray_r_base.default ... failed-to-run failed-to-run
-9 519.lbm_r lbm_r_base.default ... failed-to-run success
-10 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
-11 520.omnetpp_r omnetpp_r_base.default ... failed-to-run failed-to-run
-12 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-13 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
-14 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
-15 531.deepsjeng_r deepsjeng_r_base.default ... success success
-20 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
-21 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-22 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
-23 557.xz_r xz_r_base.default ... success success
+9 519.lbm_r lbm_r_base.default ... success failed-to-run
+10 520.omnetpp_r omnetpp_r_base.default ... success failed-to-run
+11 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+12 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
+13 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
+14 531.deepsjeng_r deepsjeng_r_base.default ... success success
+18 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
+19 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+20 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
+21 557.xz_r xz_r_base.default ... success success
-[17 rows x 20 columns]
+[16 rows x 20 columns]
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -214,27 +213,25 @@ output-bmk-results.py(115): short_diag=""
output-bmk-results.py(116): classif=""
output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(128): short_diag = "{0} run now OK".format(bmk)
-output-bmk-results.py(129): classif="improvement"
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
+output-bmk-results.py(122): short_diag = "{0} failed to run".format(bmk)
+output-bmk-results.py(123): classif="regression"
output-bmk-results.py(134): return classif, short_diag
output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
+output-bmk-results.py(268): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], short_diag))
+DEBUG: *** 519.lbm_r,lbm_r_base.default : 519.lbm_r failed to run
+output-bmk-results.py(270): f_out.write_csv((100, row["benchmark"], row["symbol"], short_diag, short_diag))
+ --- modulename: output-bmk-results, funcname: write_csv
+output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
+output-bmk-results.py(43): self.csvwriter.writerow(arr)
+output-bmk-results.py(271): if change_kind == "regression":
+output-bmk-results.py(272): f_regr.write("# {0},{1}\n".format(row["symbol"], short_diag))
+ --- modulename: output-bmk-results, funcname: write
+output-bmk-results.py(36): if not self.predicate or not self.outf:
+output-bmk-results.py(38): self.outf.write(string)
+output-bmk-results.py(273): f_ebp.write("++benchmarks {0} ".format(row["benchmark"]))
+ --- modulename: output-bmk-results, funcname: write
+output-bmk-results.py(36): if not self.predicate or not self.outf:
+output-bmk-results.py(38): self.outf.write(string)
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -243,12 +240,25 @@ output-bmk-results.py(115): short_diag=""
output-bmk-results.py(116): classif=""
output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
+output-bmk-results.py(122): short_diag = "{0} failed to run".format(bmk)
+output-bmk-results.py(123): classif="regression"
output-bmk-results.py(134): return classif, short_diag
output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
+output-bmk-results.py(268): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], short_diag))
+DEBUG: *** 520.omnetpp_r,omnetpp_r_base.default : 520.omnetpp_r failed to run
+output-bmk-results.py(270): f_out.write_csv((100, row["benchmark"], row["symbol"], short_diag, short_diag))
+ --- modulename: output-bmk-results, funcname: write_csv
+output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
+output-bmk-results.py(43): self.csvwriter.writerow(arr)
+output-bmk-results.py(271): if change_kind == "regression":
+output-bmk-results.py(272): f_regr.write("# {0},{1}\n".format(row["symbol"], short_diag))
+ --- modulename: output-bmk-results, funcname: write
+output-bmk-results.py(36): if not self.predicate or not self.outf:
+output-bmk-results.py(38): self.outf.write(string)
+output-bmk-results.py(273): f_ebp.write("++benchmarks {0} ".format(row["benchmark"]))
+ --- modulename: output-bmk-results, funcname: write
+output-bmk-results.py(36): if not self.predicate or not self.outf:
+output-bmk-results.py(38): self.outf.write(string)
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -367,7 +377,6 @@ output-bmk-results.py(275): f_out.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
-output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(294): output_bmk_results_status(exe_df, "improvement", None, None, run_step_artifacts, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_status
output-bmk-results.py(256): f_out = Outfile("{0}/status.{1}".format(run_step_artifacts, change_kind), "w", predicate=(details=="verbose"))
@@ -385,19 +394,18 @@ output-bmk-results.py(258): print(results_df)
6 508.namd_r namd_r_base.default ... failed-to-run failed-to-run
7 510.parest_r parest_r_base.default ... failed-to-run failed-to-run
8 511.povray_r povray_r_base.default ... failed-to-run failed-to-run
-9 519.lbm_r lbm_r_base.default ... failed-to-run success
-10 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
-11 520.omnetpp_r omnetpp_r_base.default ... failed-to-run failed-to-run
-12 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-13 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
-14 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
-15 531.deepsjeng_r deepsjeng_r_base.default ... success success
-20 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
-21 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-22 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
-23 557.xz_r xz_r_base.default ... success success
+9 519.lbm_r lbm_r_base.default ... success failed-to-run
+10 520.omnetpp_r omnetpp_r_base.default ... success failed-to-run
+11 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+12 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
+13 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
+14 531.deepsjeng_r deepsjeng_r_base.default ... success success
+18 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
+19 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+20 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
+21 557.xz_r xz_r_base.default ... success success
-[17 rows x 20 columns]
+[16 rows x 20 columns]
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -490,30 +498,8 @@ output-bmk-results.py(115): short_diag=""
output-bmk-results.py(116): classif=""
output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(128): short_diag = "{0} run now OK".format(bmk)
-output-bmk-results.py(129): classif="improvement"
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(268): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], short_diag))
-DEBUG: *** 519.lbm_r,lbm_r_base.default : 519.lbm_r run now OK
-output-bmk-results.py(270): f_out.write_csv((100, row["benchmark"], row["symbol"], short_diag, short_diag))
- --- modulename: output-bmk-results, funcname: write_csv
-output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
-output-bmk-results.py(43): self.csvwriter.writerow(arr)
-output-bmk-results.py(271): if change_kind == "regression":
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
+output-bmk-results.py(122): short_diag = "{0} failed to run".format(bmk)
+output-bmk-results.py(123): classif="regression"
output-bmk-results.py(134): return classif, short_diag
output-bmk-results.py(265): if classif != change_kind:
output-bmk-results.py(266): continue;
@@ -525,9 +511,8 @@ output-bmk-results.py(115): short_diag=""
output-bmk-results.py(116): classif=""
output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
+output-bmk-results.py(122): short_diag = "{0} failed to run".format(bmk)
+output-bmk-results.py(123): classif="regression"
output-bmk-results.py(134): return classif, short_diag
output-bmk-results.py(265): if classif != change_kind:
output-bmk-results.py(266): continue;
@@ -649,6 +634,7 @@ output-bmk-results.py(275): f_out.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
+output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(297): output_bmk_results_1(exe_df, "exe", "regression", f_regr, f_skip, f_ebp, run_step_artifacts, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_1
output-bmk-results.py(218): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
@@ -671,14 +657,39 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.regression : 505.mcf_r,mcf_r_base.default : sample=0% (threshold=3%)
+output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_regression
+output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(184): return (result - 100 > threshold)
+output-bmk-results.py(233): continue
+output-bmk-results.py(224): for index, row in out_df.iterrows():
+output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(62): return np.nan
output-bmk-results.py(100): if not np.isnan(spec_thr):
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 505.mcf_r,mcf_r_base.default : sample=0% (threshold=3%)
+DEBUG: checking exe.regression : 519.lbm_r,lbm_r_base.default : sample=200% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -695,14 +706,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 531.deepsjeng_r,deepsjeng_r_base.default : sample=-1% (threshold=3%)
+DEBUG: checking exe.regression : 531.deepsjeng_r,deepsjeng_r_base.default : sample=1% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -761,10 +775,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -783,16 +800,67 @@ output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mod
output-bmk-results.py(57): if specific_variability is None:
output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
output-bmk-results.py(61): if var.empty:
-output-bmk-results.py(63): elif len(var)>1:
-output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(62): return np.nan
output-bmk-results.py(100): if not np.isnan(spec_thr):
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.improvement : 531.deepsjeng_r,deepsjeng_r_base.default : sample=-1% (threshold=3%)
+DEBUG: checking exe.improvement : 519.lbm_r,lbm_r_base.default : sample=200% (threshold=3%)
+output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_improvement
+output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(193): return (100 - result > threshold)
+output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
+ --- modulename: output-bmk-results, funcname: get_short_long_diag
+output-bmk-results.py(137): bmk = row["benchmark"]
+output-bmk-results.py(139): rel_value = row["rel_" + metric]
+output-bmk-results.py(140): prev_value = row[metric + "_x"]
+output-bmk-results.py(141): curr_value = row[metric + "_y"]
+output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(152): suffix = ""
+output-bmk-results.py(153): if metric == "sample":
+output-bmk-results.py(154): prefix_regression = "slowed down by"
+output-bmk-results.py(155): prefix_improvement = "sped up by"
+output-bmk-results.py(156): suffix = "perf samples"
+output-bmk-results.py(167): if sym_type=="symbol":
+output-bmk-results.py(170): item=bmk
+output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
+output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
+output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
+output-bmk-results.py(239): if metric == "sample" \
+output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
+output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
+output-bmk-results.py(246): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
+DEBUG: *** 519.lbm_r,lbm_r_base.default : sped up by 200% - 519.lbm_r - from 1 to -1 perf samples
+output-bmk-results.py(248): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
+ --- modulename: output-bmk-results, funcname: write_csv
+output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
+output-bmk-results.py(43): self.csvwriter.writerow(arr)
+output-bmk-results.py(249): if change_kind == "regression":
+output-bmk-results.py(224): for index, row in out_df.iterrows():
+output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
+output-bmk-results.py(61): if var.empty:
+output-bmk-results.py(63): elif len(var)>1:
+output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking exe.improvement : 531.deepsjeng_r,deepsjeng_r_base.default : sample=1% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -828,7 +896,6 @@ output-bmk-results.py(253): f_out.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
-output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(300): output_bmk_results_1(sym_df, "symbol", "regression", f_regr, f_skip, f_ebp, run_step_artifacts, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_1
output-bmk-results.py(218): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
@@ -851,10 +918,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -875,14 +945,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] primal_bea_mpp : sample=-2% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] primal_bea_mpp : sample=1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -899,14 +972,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] cost_compare : sample=1% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] cost_compare : sample=-2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -923,14 +999,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-5% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -947,14 +1026,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=-3% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=6% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -971,14 +1053,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=-4% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -995,34 +1080,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z15FindFirstRemovePy : sample=3% (threshold=15%)
-output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
- --- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(184): return (result - 100 > threshold)
-output-bmk-results.py(233): continue
-output-bmk-results.py(224): for index, row in out_df.iterrows():
-output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
- --- modulename: output-bmk-results, funcname: get_threshold
-output-bmk-results.py(98): if metric == "sample":
-output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
- --- modulename: output-bmk-results, funcname: get_specific_thresholds
-output-bmk-results.py(57): if specific_variability is None:
-output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
-output-bmk-results.py(61): if var.empty:
-output-bmk-results.py(63): elif len(var)>1:
-output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
-output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -1050,7 +1114,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=2% (threshold=15%)
+DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1067,10 +1131,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -1109,10 +1176,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -1133,38 +1203,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
-output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] primal_bea_mpp : sample=-2% (threshold=15%)
-output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
- --- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(193): return (100 - result > threshold)
-output-bmk-results.py(233): continue
-output-bmk-results.py(224): for index, row in out_df.iterrows():
-output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
- --- modulename: output-bmk-results, funcname: get_threshold
-output-bmk-results.py(98): if metric == "sample":
-output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
- --- modulename: output-bmk-results, funcname: get_specific_thresholds
-output-bmk-results.py(57): if specific_variability is None:
-output-bmk-results.py(60): var = specific_variability[ (specific_variability['benchmark'] == bmk) & (specific_variability['symbol'].str.strip() == symb)]
-output-bmk-results.py(61): if var.empty:
-output-bmk-results.py(63): elif len(var)>1:
-output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] cost_compare : sample=1% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] primal_bea_mpp : sample=1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1181,14 +1230,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-5% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] cost_compare : sample=-2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1205,14 +1257,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=-3% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1229,14 +1284,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=-4% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=6% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1253,14 +1311,17 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z15FindFirstRemovePy : sample=3% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1277,10 +1338,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -1308,7 +1372,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=2% (threshold=15%)
+DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1325,10 +1389,13 @@ output-bmk-results.py(60): var = specific_variability[ (specific_variability
output-bmk-results.py(61): if var.empty:
output-bmk-results.py(63): elif len(var)>1:
output-bmk-results.py(68): if var.iloc[0]['sample_variation_average']>0 :
-output-bmk-results.py(83): return np.nan
+output-bmk-results.py(69): threshold = ( var.iloc[0]['sample_variation_average'] )
+output-bmk-results.py(70): if mode == "build":
+output-bmk-results.py(74): threshold *= 3
+output-bmk-results.py(81): return threshold
output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(104): spec_thr=max(spec_thr, default_threshold[(change_kind,metric,mode)])
+output-bmk-results.py(105): return spec_thr
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
@@ -1360,7 +1427,6 @@ output-bmk-results.py(306): f_regr.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
-output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(307): f_ebp.close()
--- modulename: output-bmk-results, funcname: close
output-bmk-results.py(29): if not self.outf:
diff --git a/notify/results.regressions b/notify/results.regressions
new file mode 100644
index 0000000..c1eab88
--- /dev/null
+++ b/notify/results.regressions
@@ -0,0 +1,2 @@
+# lbm_r_base.default,519.lbm_r failed to run
+# omnetpp_r_base.default,520.omnetpp_r failed to run
diff --git a/notify/status.improvement b/notify/status.improvement
deleted file mode 100644
index 12aa99d..0000000
--- a/notify/status.improvement
+++ /dev/null
@@ -1 +0,0 @@
-100,519.lbm_r,lbm_r_base.default,519.lbm_r run now OK,519.lbm_r run now OK
diff --git a/notify/status.regression b/notify/status.regression
new file mode 100644
index 0000000..8bdb17f
--- /dev/null
+++ b/notify/status.regression
@@ -0,0 +1,2 @@
+100,519.lbm_r,lbm_r_base.default,519.lbm_r failed to run,519.lbm_r failed to run
+100,520.omnetpp_r,omnetpp_r_base.default,520.omnetpp_r failed to run,520.omnetpp_r failed to run