summaryrefslogtreecommitdiff
path: root/notify
diff options
context:
space:
mode:
Diffstat (limited to 'notify')
-rw-r--r--notify/any.skipped2
-rw-r--r--notify/exe.improvement1
-rw-r--r--notify/exe.regression1
-rw-r--r--notify/extra-bisect-params2
-rw-r--r--notify/jira/comment-card.txt3
-rw-r--r--notify/jira/comment-template.txt5
-rw-r--r--notify/jira/components1
-rw-r--r--notify/jira/description25
-rw-r--r--notify/jira/key1
-rw-r--r--notify/jira/startdate1
-rw-r--r--notify/jira/summary1
-rw-r--r--notify/jira/yaml33
-rw-r--r--notify/lnt_report.json126
-rw-r--r--notify/mail-body.txt46
-rw-r--r--notify/mail-subject.txt2
-rw-r--r--notify/output-bmk-results.log726
-rw-r--r--notify/results.regressions5
-rw-r--r--notify/symbol.improvement3
-rw-r--r--notify/symbol.regression4
19 files changed, 407 insertions, 581 deletions
diff --git a/notify/any.skipped b/notify/any.skipped
new file mode 100644
index 0000000..b7b7db0
--- /dev/null
+++ b/notify/any.skipped
@@ -0,0 +1,2 @@
+541.leela_r,[.] _ZN7MatcherC2Ev,slowed down by 100% - 541.leela_r:[.] _ZN7MatcherC2Ev,slowed down by 100% - 541.leela_r:[.] _ZN7MatcherC2Ev - from 1 to 2 perf samples
+541.leela_r,[.] _ZN9FastBoard25get_pattern3_augment_specEiib,sped up by 50% - 541.leela_r:[.] _ZN9FastBoard25get_pattern3_augment_specEiib,sped up by 50% - 541.leela_r:[.] _ZN9FastBoard25get_pattern3_augment_specEiib - from 2 to 1 perf samples
diff --git a/notify/exe.improvement b/notify/exe.improvement
new file mode 100644
index 0000000..2ef9a3d
--- /dev/null
+++ b/notify/exe.improvement
@@ -0,0 +1 @@
+200,508.namd_r,namd_r_base.default,sped up by 200% - 508.namd_r,sped up by 200% - 508.namd_r - from 1 to -1 perf samples
diff --git a/notify/exe.regression b/notify/exe.regression
deleted file mode 100644
index 1942582..0000000
--- a/notify/exe.regression
+++ /dev/null
@@ -1 +0,0 @@
-4,531.deepsjeng_r,deepsjeng_r_base.default,slowed down by 4% - 531.deepsjeng_r,slowed down by 4% - 531.deepsjeng_r - from 10630 to 11005 perf samples
diff --git a/notify/extra-bisect-params b/notify/extra-bisect-params
index 8877bb0..c892f41 100644
--- a/notify/extra-bisect-params
+++ b/notify/extra-bisect-params
@@ -1 +1 @@
-extra_build_params=++benchmarks 531.deepsjeng_r ++benchmarks 538.imagick_r ++benchmarks 538.imagick_r ++benchmarks 544.nab_r
+extra_build_params=++benchmarks 508.namd_r
diff --git a/notify/jira/comment-card.txt b/notify/jira/comment-card.txt
new file mode 100644
index 0000000..398c181
--- /dev/null
+++ b/notify/jira/comment-card.txt
@@ -0,0 +1,3 @@
+[LLVM-1027]
+slowed down by 100% - 508.namd_r:libc.so.6
+Details: https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/104/artifact/artifacts/notify/mail-body.txt/*view*/
diff --git a/notify/jira/comment-template.txt b/notify/jira/comment-template.txt
index 4ed232b..90c5b65 100644
--- a/notify/jira/comment-template.txt
+++ b/notify/jira/comment-template.txt
@@ -1,3 +1,4 @@
[LLVM-651]
-slowed down by 4% - 531.deepsjeng_r
-Details: https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/103/artifact/artifacts/notify/mail-body.txt/*view*/
+https://linaro.atlassian.net/browse/LLVM-1027
+slowed down by 100% - 508.namd_r:libc.so.6
+Details: https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/104/artifact/artifacts/notify/mail-body.txt/*view*/
diff --git a/notify/jira/components b/notify/jira/components
new file mode 100644
index 0000000..8072713
--- /dev/null
+++ b/notify/jira/components
@@ -0,0 +1 @@
+LLVM
diff --git a/notify/jira/description b/notify/jira/description
new file mode 100644
index 0000000..c83b31a
--- /dev/null
+++ b/notify/jira/description
@@ -0,0 +1,25 @@
+Commit: https://github.com/llvm/llvm-project/commit/147c5d6686b935ecd93f8fa0e2dcf38deb593890
+commit llvmorg-18-init-12505-g147c5d6686b9
+Author: Zhaoxuan Jiang <jiangzhaoxuan94@gmail.com>
+Date: Thu Nov 23 16:21:27 2023 +0800
+
+ [AArch64] Allow LDR merge with same destination register by renaming (#71908)
+
+ The patch is based on a reverted patch:
+ https://reviews.llvm.org/D103597. It was trying to rename registers
+ before alias check, which is not safe and causes miscompiles. This patch
+ does 2 things:
+
+... 8 lines of the commit log omitted.
+
+* tcwg_bmk-code_speed-cpu2017rate
+** llvm-aarch64-master-O3
+*** slowed down by 7% - 549.fotonik3d_r
+*** https://git-us.linaro.org/toolchain/ci/interesting-commits.git/plain/llvm/sha1/147c5d6686b935ecd93f8fa0e2dcf38deb593890/tcwg_bmk-code_speed-cpu2017rate/llvm-aarch64-master-O3/details.txt
+*** https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-aarch64-master-O3-build/145/
+** llvm-arm-master-O3
+*** slowed down by 100% - 508.namd_r:libc.so.6
+*** https://git-us.linaro.org/toolchain/ci/interesting-commits.git/plain/llvm/sha1/147c5d6686b935ecd93f8fa0e2dcf38deb593890/tcwg_bmk-code_speed-cpu2017rate/llvm-arm-master-O3/details.txt
+*** https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/104/
+
+Latest data: https://git-us.linaro.org/toolchain/ci/interesting-commits.git/plain/llvm/sha1/147c5d6686b935ecd93f8fa0e2dcf38deb593890/jira/yaml
diff --git a/notify/jira/key b/notify/jira/key
new file mode 100644
index 0000000..ed74c1b
--- /dev/null
+++ b/notify/jira/key
@@ -0,0 +1 @@
+LLVM-1027
diff --git a/notify/jira/startdate b/notify/jira/startdate
new file mode 100644
index 0000000..04d7a1f
--- /dev/null
+++ b/notify/jira/startdate
@@ -0,0 +1 @@
+2023-11-23
diff --git a/notify/jira/summary b/notify/jira/summary
new file mode 100644
index 0000000..2aac293
--- /dev/null
+++ b/notify/jira/summary
@@ -0,0 +1 @@
+llvmorg-18-init-12505-g147c5d6686b9: slowed down by 100% - 508.namd_r:libc.so.6
diff --git a/notify/jira/yaml b/notify/jira/yaml
new file mode 100644
index 0000000..5c761c6
--- /dev/null
+++ b/notify/jira/yaml
@@ -0,0 +1,33 @@
+- Project: LLVM
+ IssueType: Sub-task
+ Key: LLVM-1027
+ Summary: |
+ llvmorg-18-init-12505-g147c5d6686b9: slowed down by 100% - 508.namd_r:libc.so.6
+ Components: LLVM
+ Start date: 2023-11-23
+ Description: |
+ Commit: https://github.com/llvm/llvm-project/commit/147c5d6686b935ecd93f8fa0e2dcf38deb593890
+ commit llvmorg-18-init-12505-g147c5d6686b9
+ Author: Zhaoxuan Jiang <jiangzhaoxuan94@gmail.com>
+ Date: Thu Nov 23 16:21:27 2023 +0800
+
+ [AArch64] Allow LDR merge with same destination register by renaming (#71908)
+
+ The patch is based on a reverted patch:
+ https://reviews.llvm.org/D103597. It was trying to rename registers
+ before alias check, which is not safe and causes miscompiles. This patch
+ does 2 things:
+
+ ... 8 lines of the commit log omitted.
+
+ * tcwg_bmk-code_speed-cpu2017rate
+ ** llvm-aarch64-master-O3
+ *** slowed down by 7% - 549.fotonik3d_r
+ *** https://git-us.linaro.org/toolchain/ci/interesting-commits.git/plain/llvm/sha1/147c5d6686b935ecd93f8fa0e2dcf38deb593890/tcwg_bmk-code_speed-cpu2017rate/llvm-aarch64-master-O3/details.txt
+ *** https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-aarch64-master-O3-build/145/
+ ** llvm-arm-master-O3
+ *** slowed down by 100% - 508.namd_r:libc.so.6
+ *** https://git-us.linaro.org/toolchain/ci/interesting-commits.git/plain/llvm/sha1/147c5d6686b935ecd93f8fa0e2dcf38deb593890/tcwg_bmk-code_speed-cpu2017rate/llvm-arm-master-O3/details.txt
+ *** https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/104/
+
+ Latest data: https://git-us.linaro.org/toolchain/ci/interesting-commits.git/plain/llvm/sha1/147c5d6686b935ecd93f8fa0e2dcf38deb593890/jira/yaml
diff --git a/notify/lnt_report.json b/notify/lnt_report.json
index 3026b05..f51e3d3 100644
--- a/notify/lnt_report.json
+++ b/notify/lnt_report.json
@@ -6,16 +6,32 @@
"Run": {
"Info": {
"tag": "tcwg_bmk-code_speed-cpu2017rate",
- "run_order": "llvmorg-18-init-12504-ga3cab1fa17eb",
- "test_url": "https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/103/",
- "git_llvm": "https://github.com/llvm/llvm-project/commit/a3cab1fa17ebf29b01fcb566648e2f3fb2955993",
+ "run_order": "llvmorg-18-init-12505-g147c5d6686b9",
+ "test_url": "https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/104/",
+ "git_llvm": "https://github.com/llvm/llvm-project/commit/147c5d6686b935ecd93f8fa0e2dcf38deb593890",
"__report_version__": "1"
},
- "Start Time": "2023-11-23 02:39:14"
+ "Start Time": "2023-11-23 05:30:20"
},
"Tests": [
{
"Data": [
+ 30192
+ ],
+ "Info": {},
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.505.mcf_r.code_size"
+ }
+ ,
+ {
+ "Data": [
+ 13260
+ ],
+ "Info": {},
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.519.lbm_r.code_size"
+ }
+ ,
+ {
+ "Data": [
3971452
],
"Info": {},
@@ -32,42 +48,50 @@
,
{
"Data": [
- 13260
+ 185083
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.519.lbm_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.code_size"
}
,
{
"Data": [
- 625300
+ 146837
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.508.namd_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.541.leela_r.code_size"
}
,
{
"Data": [
- 30192
+ 14124
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.505.mcf_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.505.mcf_r.exec"
}
,
{
"Data": [
- 185083
+ 3
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.541.leela_r.exec"
}
,
{
"Data": [
- 146837
+ 10155
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.541.leela_r.code_size"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.exec"
+ }
+ ,
+ {
+ "Data": [
+ 1
+ ],
+ "Info": {},
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.519.lbm_r.exec"
}
,
{
@@ -80,7 +104,7 @@
,
{
"Data": [
- 11005
+ 10900
],
"Info": {},
"Name": "tcwg_bmk-code_speed-cpu2017rate.531.deepsjeng_r.exec"
@@ -88,10 +112,26 @@
,
{
"Data": [
- 14116
+ 0
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.505.mcf_r.exec"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.505.mcf_r.compile_status"
+ }
+ ,
+ {
+ "Data": [
+ 0
+ ],
+ "Info": {},
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.505.mcf_r.execution_status"
+ }
+ ,
+ {
+ "Data": [
+ 0
+ ],
+ "Info": {},
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.500.perlbench_r.compile_status"
}
,
{
@@ -99,7 +139,15 @@
1
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.508.namd_r.exec"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.500.perlbench_r.execution_status"
+ }
+ ,
+ {
+ "Data": [
+ 0
+ ],
+ "Info": {},
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.502.gcc_r.compile_status"
}
,
{
@@ -107,23 +155,23 @@
1
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.519.lbm_r.exec"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.502.gcc_r.execution_status"
}
,
{
"Data": [
- 3
+ 0
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.541.leela_r.exec"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.508.namd_r.compile_status"
}
,
{
"Data": [
- 10161
+ 1
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.exec"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.508.namd_r.execution_status"
}
,
{
@@ -131,7 +179,7 @@
0
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.531.deepsjeng_r.compile_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.compile_status"
}
,
{
@@ -139,7 +187,7 @@
0
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.531.deepsjeng_r.execution_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.execution_status"
}
,
{
@@ -147,7 +195,7 @@
0
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.523.xalancbmk_r.compile_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.538.imagick_r.compile_status"
}
,
{
@@ -155,7 +203,7 @@
1
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.523.xalancbmk_r.execution_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.538.imagick_r.execution_status"
}
,
{
@@ -163,7 +211,7 @@
0
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.525.x264_r.compile_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.544.nab_r.compile_status"
}
,
{
@@ -171,7 +219,7 @@
1
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.525.x264_r.execution_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.544.nab_r.execution_status"
}
,
{
@@ -179,7 +227,7 @@
0
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.526.blender_r.compile_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.541.leela_r.compile_status"
}
,
{
@@ -187,7 +235,7 @@
1
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.526.blender_r.execution_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.541.leela_r.execution_status"
}
,
{
@@ -195,7 +243,7 @@
0
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.compile_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.531.deepsjeng_r.compile_status"
}
,
{
@@ -203,7 +251,7 @@
0
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.557.xz_r.execution_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.531.deepsjeng_r.execution_status"
}
,
{
@@ -211,7 +259,7 @@
0
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.538.imagick_r.compile_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.523.xalancbmk_r.compile_status"
}
,
{
@@ -219,7 +267,7 @@
1
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.538.imagick_r.execution_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.523.xalancbmk_r.execution_status"
}
,
{
@@ -227,7 +275,7 @@
0
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.544.nab_r.compile_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.525.x264_r.compile_status"
}
,
{
@@ -235,7 +283,7 @@
1
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.544.nab_r.execution_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.525.x264_r.execution_status"
}
,
{
@@ -243,7 +291,7 @@
0
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.541.leela_r.compile_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.526.blender_r.compile_status"
}
,
{
@@ -251,7 +299,7 @@
1
],
"Info": {},
- "Name": "tcwg_bmk-code_speed-cpu2017rate.541.leela_r.execution_status"
+ "Name": "tcwg_bmk-code_speed-cpu2017rate.526.blender_r.execution_status"
}
]
}
diff --git a/notify/mail-body.txt b/notify/mail-body.txt
index c27494d..66adb01 100644
--- a/notify/mail-body.txt
+++ b/notify/mail-body.txt
@@ -1,21 +1,24 @@
Dear contributor, our automatic CI has detected problems related to your patch(es). Please find some details below. If you have any questions, please follow up on linaro-toolchain@lists.linaro.org mailing list, Libera's #linaro-tcwg channel, or ping your favourite Linaro toolchain developer on the usual project channel.
+We track this report status in https://linaro.atlassian.net/browse/LLVM-1027 , please let us know if you are looking at the problem and/or when you have a fix.
+
In CI config tcwg_bmk-code_speed-cpu2017rate/llvm-arm-master-O3 after:
- | 31 commits in llvm
- | a3cab1fa17eb [X86][MC] Support encoding/decoding for PUSHP/POPP (#73092)
- | d76d8e541dab [AMDGPU][NFC] Update GISel memory-legalizer-atomic-fence test (#72829)
- | 083a53971758 clang/CodeGen/RISCV: test lowering of math builtins (#71399)
- | 49f55d107548 Revert "[Flang] Add partial support for lowering procedure pointer assignment. (#70461)"
- | 7414c0db962f [LoongArch] Precommit a test for smul with overflow (NFC) (#73212)
- | ... and 26 more commits in llvm
+ | commit llvmorg-18-init-12505-g147c5d6686b9
+ | Author: Zhaoxuan Jiang <jiangzhaoxuan94@gmail.com>
+ | Date: Thu Nov 23 16:21:27 2023 +0800
+ |
+ | [AArch64] Allow LDR merge with same destination register by renaming (#71908)
+ |
+ | The patch is based on a reverted patch:
+ | https://reviews.llvm.org/D103597. It was trying to rename registers
+ | before alias check, which is not safe and causes miscompiles. This patch
+ | does 2 things:
+ |
+ | ... 8 lines of the commit log omitted.
-the following benchmarks slowed down by more than 3%:
-- slowed down by 4% - 531.deepsjeng_r - from 10630 to 11005 perf samples
the following hot functions slowed down by more than 15% (but their benchmarks slowed down by less than 3%):
-- slowed down by 50% - 538.imagick_r:libc.so.6 - from 2 to 3 perf samples
-- slowed down by 50% - 538.imagick_r:[.] _IO_fread - from 2 to 3 perf samples
-- slowed down by 100% - 544.nab_r:[.] __vfscanf_internal - from 1 to 2 perf samples
+- slowed down by 100% - 508.namd_r:libc.so.6 - from 1 to 2 perf samples
The configuration of this build is:
Below reproducer instructions can be used to re-build both "first_bad" and "last_good" cross-toolchains used in this bisection. Naturally, the scripts will fail when triggerring benchmarking jobs if you don\'t have access to Linaro TCWG CI.
@@ -33,6 +36,21 @@ This benchmarking CI is work-in-progress, and we welcome feedback and suggestion
-----------------8<--------------------------8<--------------------------8<--------------------------
The information below can be used to reproduce a debug environment:
-Current build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/103/artifact/artifacts
-Reference build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/101/artifact/artifacts
+Current build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/104/artifact/artifacts
+Reference build : https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/103/artifact/artifacts
+
+Reproduce last good and first bad builds: https://git-us.linaro.org/toolchain/ci/interesting-commits.git/plain/llvm/sha1/147c5d6686b935ecd93f8fa0e2dcf38deb593890/tcwg_bmk-code_speed-cpu2017rate/llvm-arm-master-O3/reproduction_instructions.txt
+
+Full commit : https://github.com/llvm/llvm-project/commit/147c5d6686b935ecd93f8fa0e2dcf38deb593890
+
+List of configurations that regressed due to this commit :
+* tcwg_bmk-code_speed-cpu2017rate
+** llvm-aarch64-master-O3
+*** slowed down by 7% - 549.fotonik3d_r
+*** https://git-us.linaro.org/toolchain/ci/interesting-commits.git/plain/llvm/sha1/147c5d6686b935ecd93f8fa0e2dcf38deb593890/tcwg_bmk-code_speed-cpu2017rate/llvm-aarch64-master-O3/details.txt
+*** https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-aarch64-master-O3-build/145/
+** llvm-arm-master-O3
+*** slowed down by 100% - 508.namd_r:libc.so.6
+*** https://git-us.linaro.org/toolchain/ci/interesting-commits.git/plain/llvm/sha1/147c5d6686b935ecd93f8fa0e2dcf38deb593890/tcwg_bmk-code_speed-cpu2017rate/llvm-arm-master-O3/details.txt
+*** https://ci.linaro.org/job/tcwg_bmk-code_speed-cpu2017rate--llvm-arm-master-O3-build/104/
diff --git a/notify/mail-subject.txt b/notify/mail-subject.txt
index 07c8e21..1dc4192 100644
--- a/notify/mail-subject.txt
+++ b/notify/mail-subject.txt
@@ -1 +1 @@
-[Linaro-TCWG-CI] 31 commits in llvm: slowed down by 4% - 531.deepsjeng_r on arm O3
+[Linaro-TCWG-CI] llvmorg-18-init-12505-g147c5d6686b9: slowed down by 100% - 508.namd_r:libc.so.6 on arm O3
diff --git a/notify/output-bmk-results.log b/notify/output-bmk-results.log
index 62a732f..e89da20 100644
--- a/notify/output-bmk-results.log
+++ b/notify/output-bmk-results.log
@@ -103,126 +103,21 @@ output-bmk-results.py(22): self.outf = open(filename, mode)
output-bmk-results.py(23): self.csvwriter = csv.writer(self.outf)
output-bmk-results.py(258): print(results_df)
benchmark symbol ... status_x status_y
-0 500.perlbench_r perlbench_r_base.default ... failed-to-run failed-to-run
-1 502.gcc_r cpugcc_r_base.default ... failed-to-run failed-to-run
-2 505.mcf_r mcf_r_base.default ... success -1
-8 508.namd_r namd_r_base.default ... failed-to-run -1
-9 508.namd_r namd_r_base.default ... failed-to-run failed-to-run
-10 510.parest_r parest_r_base.default ... failed-to-run failed-to-run
-11 511.povray_r povray_r_base.default ... failed-to-run failed-to-run
-12 519.lbm_r lbm_r_base.default ... failed-to-run -1
-13 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
-15 520.omnetpp_r omnetpp_r_base.default ... failed-to-run failed-to-run
-16 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-17 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-20 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
-21 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
-22 531.deepsjeng_r deepsjeng_r_base.default ... success success
-29 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
-30 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-31 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-36 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
-37 557.xz_r xz_r_base.default ... success success
+0 505.mcf_r mcf_r_base.default ... -1 success
+7 508.namd_r namd_r_base.default ... -1 failed-to-run
+8 519.lbm_r lbm_r_base.default ... -1 -1
+10 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+11 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+14 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
+15 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
+16 531.deepsjeng_r deepsjeng_r_base.default ... success success
+23 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
+24 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+25 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+30 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
+31 557.xz_r xz_r_base.default ... success success
-[20 rows x 20 columns]
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
+[13 rows x 20 columns]
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -423,126 +318,21 @@ output-bmk-results.py(22): self.outf = open(filename, mode)
output-bmk-results.py(23): self.csvwriter = csv.writer(self.outf)
output-bmk-results.py(258): print(results_df)
benchmark symbol ... status_x status_y
-0 500.perlbench_r perlbench_r_base.default ... failed-to-run failed-to-run
-1 502.gcc_r cpugcc_r_base.default ... failed-to-run failed-to-run
-2 505.mcf_r mcf_r_base.default ... success -1
-8 508.namd_r namd_r_base.default ... failed-to-run -1
-9 508.namd_r namd_r_base.default ... failed-to-run failed-to-run
-10 510.parest_r parest_r_base.default ... failed-to-run failed-to-run
-11 511.povray_r povray_r_base.default ... failed-to-run failed-to-run
-12 519.lbm_r lbm_r_base.default ... failed-to-run -1
-13 519.lbm_r lbm_r_base.default ... failed-to-run failed-to-run
-15 520.omnetpp_r omnetpp_r_base.default ... failed-to-run failed-to-run
-16 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-17 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
-20 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
-21 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
-22 531.deepsjeng_r deepsjeng_r_base.default ... success success
-29 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
-30 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-31 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
-36 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
-37 557.xz_r xz_r_base.default ... success success
+0 505.mcf_r mcf_r_base.default ... -1 success
+7 508.namd_r namd_r_base.default ... -1 failed-to-run
+8 519.lbm_r lbm_r_base.default ... -1 -1
+10 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+11 523.xalancbmk_r cpuxalan_r_base.default ... failed-to-run failed-to-run
+14 525.x264_r x264_r_base.default ... failed-to-run failed-to-run
+15 526.blender_r blender_r_base.default ... failed-to-run failed-to-run
+16 531.deepsjeng_r deepsjeng_r_base.default ... success success
+23 538.imagick_r imagick_r_base.default ... failed-to-run failed-to-run
+24 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+25 541.leela_r leela_r_base.default ... failed-to-run failed-to-run
+30 544.nab_r nab_r_base.default ... failed-to-run failed-to-run
+31 557.xz_r xz_r_base.default ... success success
-[20 rows x 20 columns]
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
-output-bmk-results.py(261): for index, row in results_df.iterrows():
-output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
- --- modulename: output-bmk-results, funcname: get_status_diag
-output-bmk-results.py(113): bmk = row["benchmark"]
-output-bmk-results.py(115): short_diag=""
-output-bmk-results.py(116): classif=""
-output-bmk-results.py(118): if row["status_x"]!="failed-to-build" and row["status_y"]=="failed-to-build":
-output-bmk-results.py(121): elif row["status_x"]=="success" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(124): elif row["status_x"]=="failed-to-build" and row["status_y"]=="failed-to-run":
-output-bmk-results.py(127): elif row["status_x"]=="failed-to-run" and row["status_y"]=="success":
-output-bmk-results.py(130): elif row["status_x"]=="failed-to-build" and row["status_y"]=="success":
-output-bmk-results.py(134): return classif, short_diag
-output-bmk-results.py(265): if classif != change_kind:
-output-bmk-results.py(266): continue;
+[13 rows x 20 columns]
output-bmk-results.py(261): for index, row in results_df.iterrows():
output-bmk-results.py(263): classif, short_diag = get_status_diag(row)
--- modulename: output-bmk-results, funcname: get_status_diag
@@ -777,7 +567,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 519.lbm_r,lbm_r_base.default : sample=0% (threshold=3%)
+DEBUG: checking exe.regression : 508.namd_r,namd_r_base.default : sample=200% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -817,7 +607,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 523.xalancbmk_r,cpuxalan_r_base.default : sample=-1% (threshold=3%)
+DEBUG: checking exe.regression : 523.xalancbmk_r,cpuxalan_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -857,60 +647,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 531.deepsjeng_r,deepsjeng_r_base.default : sample=-4% (threshold=3%)
-output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
- --- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(184): return (result - 100 > threshold)
-output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
- --- modulename: output-bmk-results, funcname: get_short_long_diag
-output-bmk-results.py(137): bmk = row["benchmark"]
-output-bmk-results.py(139): rel_value = row["rel_" + metric]
-output-bmk-results.py(140): prev_value = row[metric + "_x"]
-output-bmk-results.py(141): curr_value = row[metric + "_y"]
-output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(152): suffix = ""
-output-bmk-results.py(153): if metric == "sample":
-output-bmk-results.py(154): prefix_regression = "slowed down by"
-output-bmk-results.py(155): prefix_improvement = "sped up by"
-output-bmk-results.py(156): suffix = "perf samples"
-output-bmk-results.py(167): if sym_type=="symbol":
-output-bmk-results.py(170): item=bmk
-output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
-output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
-output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
-output-bmk-results.py(239): if metric == "sample" \
-output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
-output-bmk-results.py(246): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
-DEBUG: *** 531.deepsjeng_r,deepsjeng_r_base.default : slowed down by 4% - 531.deepsjeng_r - from 10630 to 11005 perf samples
-output-bmk-results.py(248): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
- --- modulename: output-bmk-results, funcname: write_csv
-output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
-output-bmk-results.py(43): self.csvwriter.writerow(arr)
-output-bmk-results.py(249): if change_kind == "regression":
-output-bmk-results.py(250): f_regr.write("# {0},{1}\n".format(row["symbol"], long_diag))
- --- modulename: output-bmk-results, funcname: write
-output-bmk-results.py(36): if not self.predicate or not self.outf:
-output-bmk-results.py(38): self.outf.write(string)
-output-bmk-results.py(251): f_ebp.write("++benchmarks {0} ".format(row["benchmark"]))
- --- modulename: output-bmk-results, funcname: write
-output-bmk-results.py(36): if not self.predicate or not self.outf:
-output-bmk-results.py(38): self.outf.write(string)
-output-bmk-results.py(224): for index, row in out_df.iterrows():
-output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
- --- modulename: output-bmk-results, funcname: get_threshold
-output-bmk-results.py(98): if metric == "sample":
-output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
- --- modulename: output-bmk-results, funcname: get_specific_thresholds
-output-bmk-results.py(57): if specific_variability is None:
-output-bmk-results.py(58): return np.nan
-output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.regression : 538.imagick_r,imagick_r_base.default : sample=0% (threshold=3%)
+DEBUG: checking exe.regression : 531.deepsjeng_r,deepsjeng_r_base.default : sample=1% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -982,6 +719,7 @@ output-bmk-results.py(253): f_out.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
+output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(298): output_bmk_results_1(exe_df, "exe", "improvement", None, f_skip, None, run_step_artifacts, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_1
output-bmk-results.py(218): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
@@ -1027,12 +765,38 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.improvement : 519.lbm_r,lbm_r_base.default : sample=0% (threshold=3%)
+DEBUG: checking exe.improvement : 508.namd_r,namd_r_base.default : sample=200% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
output-bmk-results.py(193): return (100 - result > threshold)
-output-bmk-results.py(233): continue
+output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
+ --- modulename: output-bmk-results, funcname: get_short_long_diag
+output-bmk-results.py(137): bmk = row["benchmark"]
+output-bmk-results.py(139): rel_value = row["rel_" + metric]
+output-bmk-results.py(140): prev_value = row[metric + "_x"]
+output-bmk-results.py(141): curr_value = row[metric + "_y"]
+output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(152): suffix = ""
+output-bmk-results.py(153): if metric == "sample":
+output-bmk-results.py(154): prefix_regression = "slowed down by"
+output-bmk-results.py(155): prefix_improvement = "sped up by"
+output-bmk-results.py(156): suffix = "perf samples"
+output-bmk-results.py(167): if sym_type=="symbol":
+output-bmk-results.py(170): item=bmk
+output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
+output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
+output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
+output-bmk-results.py(239): if metric == "sample" \
+output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
+output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
+output-bmk-results.py(246): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
+DEBUG: *** 508.namd_r,namd_r_base.default : sped up by 200% - 508.namd_r - from 1 to -1 perf samples
+output-bmk-results.py(248): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
+ --- modulename: output-bmk-results, funcname: write_csv
+output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
+output-bmk-results.py(43): self.csvwriter.writerow(arr)
+output-bmk-results.py(249): if change_kind == "regression":
output-bmk-results.py(224): for index, row in out_df.iterrows():
output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
@@ -1067,26 +831,6 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.improvement : 523.xalancbmk_r,cpuxalan_r_base.default : sample=-1% (threshold=3%)
-output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
- --- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(193): return (100 - result > threshold)
-output-bmk-results.py(233): continue
-output-bmk-results.py(224): for index, row in out_df.iterrows():
-output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
- --- modulename: output-bmk-results, funcname: get_threshold
-output-bmk-results.py(98): if metric == "sample":
-output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
- --- modulename: output-bmk-results, funcname: get_specific_thresholds
-output-bmk-results.py(57): if specific_variability is None:
-output-bmk-results.py(58): return np.nan
-output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 523.xalancbmk_r,cpuxalan_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
@@ -1107,7 +851,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.improvement : 531.deepsjeng_r,deepsjeng_r_base.default : sample=-4% (threshold=3%)
+DEBUG: checking exe.improvement : 523.xalancbmk_r,cpuxalan_r_base.default : sample=0% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1127,7 +871,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking exe.improvement : 538.imagick_r,imagick_r_base.default : sample=0% (threshold=3%)
+DEBUG: checking exe.improvement : 531.deepsjeng_r,deepsjeng_r_base.default : sample=1% (threshold=3%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1199,7 +943,6 @@ output-bmk-results.py(253): f_out.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
-output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(300): output_bmk_results_1(sym_df, "symbol", "regression", f_regr, f_skip, f_ebp, run_step_artifacts, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_1
output-bmk-results.py(218): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
@@ -1245,7 +988,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] primal_bea_mpp : sample=-1% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] primal_bea_mpp : sample=5% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1265,7 +1008,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 505.mcf_r,[.] cost_compare : sample=2% (threshold=15%)
+DEBUG: checking symbol.regression : 505.mcf_r,[.] cost_compare : sample=-4% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1285,12 +1028,46 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 508.namd_r,[.] __vfscanf_internal : sample=50% (threshold=15%)
+DEBUG: checking symbol.regression : 508.namd_r,libc.so.6 : sample=-100% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
output-bmk-results.py(184): return (result - 100 > threshold)
-output-bmk-results.py(233): continue
+output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
+ --- modulename: output-bmk-results, funcname: get_short_long_diag
+output-bmk-results.py(137): bmk = row["benchmark"]
+output-bmk-results.py(139): rel_value = row["rel_" + metric]
+output-bmk-results.py(140): prev_value = row[metric + "_x"]
+output-bmk-results.py(141): curr_value = row[metric + "_y"]
+output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(152): suffix = ""
+output-bmk-results.py(153): if metric == "sample":
+output-bmk-results.py(154): prefix_regression = "slowed down by"
+output-bmk-results.py(155): prefix_improvement = "sped up by"
+output-bmk-results.py(156): suffix = "perf samples"
+output-bmk-results.py(167): if sym_type=="symbol":
+output-bmk-results.py(168): item=bmk+":"+row["symbol"]
+output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
+output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
+output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
+output-bmk-results.py(239): if metric == "sample" \
+output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
+output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
+output-bmk-results.py(246): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
+DEBUG: *** 508.namd_r,libc.so.6 : slowed down by 100% - 508.namd_r:libc.so.6 - from 1 to 2 perf samples
+output-bmk-results.py(248): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
+ --- modulename: output-bmk-results, funcname: write_csv
+output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
+output-bmk-results.py(43): self.csvwriter.writerow(arr)
+output-bmk-results.py(249): if change_kind == "regression":
+output-bmk-results.py(250): f_regr.write("# {0},{1}\n".format(row["symbol"], long_diag))
+ --- modulename: output-bmk-results, funcname: write
+output-bmk-results.py(36): if not self.predicate or not self.outf:
+output-bmk-results.py(38): self.outf.write(string)
+output-bmk-results.py(251): f_ebp.write("++benchmarks {0} ".format(row["benchmark"]))
+ --- modulename: output-bmk-results, funcname: write
+output-bmk-results.py(36): if not self.predicate or not self.outf:
+output-bmk-results.py(38): self.outf.write(string)
output-bmk-results.py(224): for index, row in out_df.iterrows():
output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
@@ -1305,7 +1082,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 508.namd_r,libc.so.6 : sample=50% (threshold=15%)
+DEBUG: checking symbol.regression : 508.namd_r,[.] __vfscanf_internal : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1345,7 +1122,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 523.xalancbmk_r,[.] _ZN11xercesc_2_710ValueStore8containsEPKNS_13FieldValueMapE : sample=3% (threshold=15%)
+DEBUG: checking symbol.regression : 523.xalancbmk_r,[.] _ZN11xercesc_2_710ValueStore8containsEPKNS_13FieldValueMapE : sample=-1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1365,7 +1142,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 523.xalancbmk_r,[.] _ZN11xercesc_2_710ValueStore13isDuplicateOfEPNS_17DatatypeValidatorEPKtS2_S4_ : sample=-7% (threshold=15%)
+DEBUG: checking symbol.regression : 523.xalancbmk_r,[.] _ZN11xercesc_2_710ValueStore13isDuplicateOfEPNS_17DatatypeValidatorEPKtS2_S4_ : sample=-1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1385,7 +1162,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-8% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=3% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1405,7 +1182,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=-1% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1425,7 +1202,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=-2% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=-4% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1445,7 +1222,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z15FindFirstRemovePy : sample=-7% (threshold=15%)
+DEBUG: checking symbol.regression : 531.deepsjeng_r,[.] _Z15FindFirstRemovePy : sample=2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1465,46 +1242,32 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 538.imagick_r,libc.so.6 : sample=-50% (threshold=15%)
+DEBUG: checking symbol.regression : 538.imagick_r,libc.so.6 : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
output-bmk-results.py(184): return (result - 100 > threshold)
-output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
- --- modulename: output-bmk-results, funcname: get_short_long_diag
-output-bmk-results.py(137): bmk = row["benchmark"]
-output-bmk-results.py(139): rel_value = row["rel_" + metric]
-output-bmk-results.py(140): prev_value = row[metric + "_x"]
-output-bmk-results.py(141): curr_value = row[metric + "_y"]
-output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(152): suffix = ""
-output-bmk-results.py(153): if metric == "sample":
-output-bmk-results.py(154): prefix_regression = "slowed down by"
-output-bmk-results.py(155): prefix_improvement = "sped up by"
-output-bmk-results.py(156): suffix = "perf samples"
-output-bmk-results.py(167): if sym_type=="symbol":
-output-bmk-results.py(168): item=bmk+":"+row["symbol"]
-output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
-output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
-output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
-output-bmk-results.py(239): if metric == "sample" \
-output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
-output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
-output-bmk-results.py(246): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
-DEBUG: *** 538.imagick_r,libc.so.6 : slowed down by 50% - 538.imagick_r:libc.so.6 - from 2 to 3 perf samples
-output-bmk-results.py(248): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
- --- modulename: output-bmk-results, funcname: write_csv
-output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
-output-bmk-results.py(43): self.csvwriter.writerow(arr)
-output-bmk-results.py(249): if change_kind == "regression":
-output-bmk-results.py(250): f_regr.write("# {0},{1}\n".format(row["symbol"], long_diag))
- --- modulename: output-bmk-results, funcname: write
-output-bmk-results.py(36): if not self.predicate or not self.outf:
-output-bmk-results.py(38): self.outf.write(string)
-output-bmk-results.py(251): f_ebp.write("++benchmarks {0} ".format(row["benchmark"]))
- --- modulename: output-bmk-results, funcname: write
-output-bmk-results.py(36): if not self.predicate or not self.outf:
-output-bmk-results.py(38): self.outf.write(string)
+output-bmk-results.py(233): continue
+output-bmk-results.py(224): for index, row in out_df.iterrows():
+output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+ --- modulename: output-bmk-results, funcname: get_threshold
+output-bmk-results.py(98): if metric == "sample":
+output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
+ --- modulename: output-bmk-results, funcname: get_specific_thresholds
+output-bmk-results.py(57): if specific_variability is None:
+output-bmk-results.py(58): return np.nan
+output-bmk-results.py(100): if not np.isnan(spec_thr):
+output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+DEBUG: checking symbol.regression : 538.imagick_r,[.] _IO_fread : sample=33% (threshold=15%)
+output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+ --- modulename: output-bmk-results, funcname: is_entry_regression
+output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(184): return (result - 100 > threshold)
+output-bmk-results.py(233): continue
output-bmk-results.py(224): for index, row in out_df.iterrows():
output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
@@ -1519,7 +1282,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 538.imagick_r,[.] _IO_fread : sample=-50% (threshold=15%)
+DEBUG: checking symbol.regression : 541.leela_r,[.] _ZN7MatcherC2Ev : sample=-100% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1544,21 +1307,12 @@ output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_di
output-bmk-results.py(239): if metric == "sample" \
output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
-output-bmk-results.py(246): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
-DEBUG: *** 538.imagick_r,[.] _IO_fread : slowed down by 50% - 538.imagick_r:[.] _IO_fread - from 2 to 3 perf samples
-output-bmk-results.py(248): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
+output-bmk-results.py(242): and row['symbol_md5sum_x'] != "d41d8cd98f00b204e9800998ecf8427e":
+output-bmk-results.py(243): f_skip.write_csv((row["benchmark"], row["symbol"], short_diag, long_diag))
--- modulename: output-bmk-results, funcname: write_csv
output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
output-bmk-results.py(43): self.csvwriter.writerow(arr)
-output-bmk-results.py(249): if change_kind == "regression":
-output-bmk-results.py(250): f_regr.write("# {0},{1}\n".format(row["symbol"], long_diag))
- --- modulename: output-bmk-results, funcname: write
-output-bmk-results.py(36): if not self.predicate or not self.outf:
-output-bmk-results.py(38): self.outf.write(string)
-output-bmk-results.py(251): f_ebp.write("++benchmarks {0} ".format(row["benchmark"]))
- --- modulename: output-bmk-results, funcname: write
-output-bmk-results.py(36): if not self.predicate or not self.outf:
-output-bmk-results.py(38): self.outf.write(string)
+output-bmk-results.py(244): continue
output-bmk-results.py(224): for index, row in out_df.iterrows():
output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
@@ -1573,27 +1327,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 541.leela_r,[.] _ZN9FastBoard25get_pattern3_augment_specEiib : sample=0% (threshold=15%)
-output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
- --- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(184): return (result - 100 > threshold)
-output-bmk-results.py(233): continue
-output-bmk-results.py(224): for index, row in out_df.iterrows():
-output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
- --- modulename: output-bmk-results, funcname: get_threshold
-output-bmk-results.py(98): if metric == "sample":
-output-bmk-results.py(99): spec_thr = get_specific_thresholds(metric, mode, bmk, symb)
- --- modulename: output-bmk-results, funcname: get_specific_thresholds
-output-bmk-results.py(57): if specific_variability is None:
-output-bmk-results.py(58): return np.nan
-output-bmk-results.py(100): if not np.isnan(spec_thr):
-output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 541.leela_r,[.] _ZN7MatcherC2Ev : sample=0% (threshold=15%)
+DEBUG: checking symbol.regression : 541.leela_r,[.] _ZN9FastBoard25get_pattern3_augment_specEiib : sample=50% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1633,46 +1367,12 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 544.nab_r,[.] __vfscanf_internal : sample=-100% (threshold=15%)
+DEBUG: checking symbol.regression : 544.nab_r,[.] __vfscanf_internal : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
output-bmk-results.py(184): return (result - 100 > threshold)
-output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
- --- modulename: output-bmk-results, funcname: get_short_long_diag
-output-bmk-results.py(137): bmk = row["benchmark"]
-output-bmk-results.py(139): rel_value = row["rel_" + metric]
-output-bmk-results.py(140): prev_value = row[metric + "_x"]
-output-bmk-results.py(141): curr_value = row[metric + "_y"]
-output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(152): suffix = ""
-output-bmk-results.py(153): if metric == "sample":
-output-bmk-results.py(154): prefix_regression = "slowed down by"
-output-bmk-results.py(155): prefix_improvement = "sped up by"
-output-bmk-results.py(156): suffix = "perf samples"
-output-bmk-results.py(167): if sym_type=="symbol":
-output-bmk-results.py(168): item=bmk+":"+row["symbol"]
-output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
-output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
-output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
-output-bmk-results.py(239): if metric == "sample" \
-output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
-output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
-output-bmk-results.py(246): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
-DEBUG: *** 544.nab_r,[.] __vfscanf_internal : slowed down by 100% - 544.nab_r:[.] __vfscanf_internal - from 1 to 2 perf samples
-output-bmk-results.py(248): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
- --- modulename: output-bmk-results, funcname: write_csv
-output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
-output-bmk-results.py(43): self.csvwriter.writerow(arr)
-output-bmk-results.py(249): if change_kind == "regression":
-output-bmk-results.py(250): f_regr.write("# {0},{1}\n".format(row["symbol"], long_diag))
- --- modulename: output-bmk-results, funcname: write
-output-bmk-results.py(36): if not self.predicate or not self.outf:
-output-bmk-results.py(38): self.outf.write(string)
-output-bmk-results.py(251): f_ebp.write("++benchmarks {0} ".format(row["benchmark"]))
- --- modulename: output-bmk-results, funcname: write
-output-bmk-results.py(36): if not self.predicate or not self.outf:
-output-bmk-results.py(38): self.outf.write(string)
+output-bmk-results.py(233): continue
output-bmk-results.py(224): for index, row in out_df.iterrows():
output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
@@ -1687,7 +1387,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_mf_bt4_find : sample=2% (threshold=15%)
+DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_mf_bt4_find : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1707,7 +1407,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=-3% (threshold=15%)
+DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1727,7 +1427,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_mf_bt4_skip : sample=-3% (threshold=15%)
+DEBUG: checking symbol.regression : 557.xz_r,[.] lzma_mf_bt4_skip : sample=2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
output-bmk-results.py(183): if metric in metric_utils.higher_regress_metrics:
@@ -1784,7 +1484,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] primal_bea_mpp : sample=-1% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] primal_bea_mpp : sample=5% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1804,7 +1504,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 505.mcf_r,[.] cost_compare : sample=2% (threshold=15%)
+DEBUG: checking symbol.improvement : 505.mcf_r,[.] cost_compare : sample=-4% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1824,38 +1524,12 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 508.namd_r,[.] __vfscanf_internal : sample=50% (threshold=15%)
+DEBUG: checking symbol.improvement : 508.namd_r,libc.so.6 : sample=-100% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
output-bmk-results.py(193): return (100 - result > threshold)
-output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
- --- modulename: output-bmk-results, funcname: get_short_long_diag
-output-bmk-results.py(137): bmk = row["benchmark"]
-output-bmk-results.py(139): rel_value = row["rel_" + metric]
-output-bmk-results.py(140): prev_value = row[metric + "_x"]
-output-bmk-results.py(141): curr_value = row[metric + "_y"]
-output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(152): suffix = ""
-output-bmk-results.py(153): if metric == "sample":
-output-bmk-results.py(154): prefix_regression = "slowed down by"
-output-bmk-results.py(155): prefix_improvement = "sped up by"
-output-bmk-results.py(156): suffix = "perf samples"
-output-bmk-results.py(167): if sym_type=="symbol":
-output-bmk-results.py(168): item=bmk+":"+row["symbol"]
-output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
-output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
-output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
-output-bmk-results.py(239): if metric == "sample" \
-output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
-output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
-output-bmk-results.py(246): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
-DEBUG: *** 508.namd_r,[.] __vfscanf_internal : sped up by 50% - 508.namd_r:[.] __vfscanf_internal - from 2 to 1 perf samples
-output-bmk-results.py(248): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
- --- modulename: output-bmk-results, funcname: write_csv
-output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
-output-bmk-results.py(43): self.csvwriter.writerow(arr)
-output-bmk-results.py(249): if change_kind == "regression":
+output-bmk-results.py(233): continue
output-bmk-results.py(224): for index, row in out_df.iterrows():
output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
@@ -1870,38 +1544,12 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 508.namd_r,libc.so.6 : sample=50% (threshold=15%)
+DEBUG: checking symbol.improvement : 508.namd_r,[.] __vfscanf_internal : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
output-bmk-results.py(193): return (100 - result > threshold)
-output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
- --- modulename: output-bmk-results, funcname: get_short_long_diag
-output-bmk-results.py(137): bmk = row["benchmark"]
-output-bmk-results.py(139): rel_value = row["rel_" + metric]
-output-bmk-results.py(140): prev_value = row[metric + "_x"]
-output-bmk-results.py(141): curr_value = row[metric + "_y"]
-output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
-output-bmk-results.py(152): suffix = ""
-output-bmk-results.py(153): if metric == "sample":
-output-bmk-results.py(154): prefix_regression = "slowed down by"
-output-bmk-results.py(155): prefix_improvement = "sped up by"
-output-bmk-results.py(156): suffix = "perf samples"
-output-bmk-results.py(167): if sym_type=="symbol":
-output-bmk-results.py(168): item=bmk+":"+row["symbol"]
-output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
-output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
-output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
-output-bmk-results.py(239): if metric == "sample" \
-output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
-output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
-output-bmk-results.py(246): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
-DEBUG: *** 508.namd_r,libc.so.6 : sped up by 50% - 508.namd_r:libc.so.6 - from 2 to 1 perf samples
-output-bmk-results.py(248): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
- --- modulename: output-bmk-results, funcname: write_csv
-output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
-output-bmk-results.py(43): self.csvwriter.writerow(arr)
-output-bmk-results.py(249): if change_kind == "regression":
+output-bmk-results.py(233): continue
output-bmk-results.py(224): for index, row in out_df.iterrows():
output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
@@ -1936,7 +1584,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 523.xalancbmk_r,[.] _ZN11xercesc_2_710ValueStore8containsEPKNS_13FieldValueMapE : sample=3% (threshold=15%)
+DEBUG: checking symbol.improvement : 523.xalancbmk_r,[.] _ZN11xercesc_2_710ValueStore8containsEPKNS_13FieldValueMapE : sample=-1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1956,7 +1604,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 523.xalancbmk_r,[.] _ZN11xercesc_2_710ValueStore13isDuplicateOfEPNS_17DatatypeValidatorEPKtS2_S4_ : sample=-7% (threshold=15%)
+DEBUG: checking symbol.improvement : 523.xalancbmk_r,[.] _ZN11xercesc_2_710ValueStore13isDuplicateOfEPNS_17DatatypeValidatorEPKtS2_S4_ : sample=-1% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1976,7 +1624,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=-8% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z5fevalP7state_tiP12t_eval_comps : sample=3% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -1996,7 +1644,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=-1% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z7ProbeTTP7state_tPiiiPjS1_S1_S1_S1_i : sample=2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -2016,7 +1664,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=-2% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z6searchP7state_tiiiii : sample=-4% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -2036,7 +1684,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z15FindFirstRemovePy : sample=-7% (threshold=15%)
+DEBUG: checking symbol.improvement : 531.deepsjeng_r,[.] _Z15FindFirstRemovePy : sample=2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -2056,7 +1704,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 538.imagick_r,libc.so.6 : sample=-50% (threshold=15%)
+DEBUG: checking symbol.improvement : 538.imagick_r,libc.so.6 : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -2076,12 +1724,38 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 538.imagick_r,[.] _IO_fread : sample=-50% (threshold=15%)
+DEBUG: checking symbol.improvement : 538.imagick_r,[.] _IO_fread : sample=33% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
output-bmk-results.py(193): return (100 - result > threshold)
-output-bmk-results.py(233): continue
+output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
+ --- modulename: output-bmk-results, funcname: get_short_long_diag
+output-bmk-results.py(137): bmk = row["benchmark"]
+output-bmk-results.py(139): rel_value = row["rel_" + metric]
+output-bmk-results.py(140): prev_value = row[metric + "_x"]
+output-bmk-results.py(141): curr_value = row[metric + "_y"]
+output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(152): suffix = ""
+output-bmk-results.py(153): if metric == "sample":
+output-bmk-results.py(154): prefix_regression = "slowed down by"
+output-bmk-results.py(155): prefix_improvement = "sped up by"
+output-bmk-results.py(156): suffix = "perf samples"
+output-bmk-results.py(167): if sym_type=="symbol":
+output-bmk-results.py(168): item=bmk+":"+row["symbol"]
+output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
+output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
+output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
+output-bmk-results.py(239): if metric == "sample" \
+output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
+output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
+output-bmk-results.py(246): print("DEBUG: *** {0},{1} : {2}".format(row["benchmark"], row["symbol"], long_diag))
+DEBUG: *** 538.imagick_r,[.] _IO_fread : sped up by 33% - 538.imagick_r:[.] _IO_fread - from 3 to 2 perf samples
+output-bmk-results.py(248): f_out.write_csv((percent_change, row["benchmark"], row["symbol"], short_diag, long_diag))
+ --- modulename: output-bmk-results, funcname: write_csv
+output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
+output-bmk-results.py(43): self.csvwriter.writerow(arr)
+output-bmk-results.py(249): if change_kind == "regression":
output-bmk-results.py(224): for index, row in out_df.iterrows():
output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
@@ -2096,7 +1770,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 541.leela_r,[.] _ZN9FastBoard25get_pattern3_augment_specEiib : sample=0% (threshold=15%)
+DEBUG: checking symbol.improvement : 541.leela_r,[.] _ZN7MatcherC2Ev : sample=-100% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -2116,12 +1790,37 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 541.leela_r,[.] _ZN7MatcherC2Ev : sample=0% (threshold=15%)
+DEBUG: checking symbol.improvement : 541.leela_r,[.] _ZN9FastBoard25get_pattern3_augment_specEiib : sample=50% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
output-bmk-results.py(193): return (100 - result > threshold)
-output-bmk-results.py(233): continue
+output-bmk-results.py(235): percent_change, short_diag, long_diag = get_short_long_diag(row, metric, sym_type, change_kind)
+ --- modulename: output-bmk-results, funcname: get_short_long_diag
+output-bmk-results.py(137): bmk = row["benchmark"]
+output-bmk-results.py(139): rel_value = row["rel_" + metric]
+output-bmk-results.py(140): prev_value = row[metric + "_x"]
+output-bmk-results.py(141): curr_value = row[metric + "_y"]
+output-bmk-results.py(142): if metric == "num_vect_loops" or metric == "num_sve_loops":
+output-bmk-results.py(152): suffix = ""
+output-bmk-results.py(153): if metric == "sample":
+output-bmk-results.py(154): prefix_regression = "slowed down by"
+output-bmk-results.py(155): prefix_improvement = "sped up by"
+output-bmk-results.py(156): suffix = "perf samples"
+output-bmk-results.py(167): if sym_type=="symbol":
+output-bmk-results.py(168): item=bmk+":"+row["symbol"]
+output-bmk-results.py(172): short_diag = "{1} {2}% - {0}".format(item, locals()["prefix_" + change_kind], abs(rel_value - 100))
+output-bmk-results.py(173): long_diag = "{0} - from {1} to {2} {3}".format(short_diag, prev_value, curr_value, suffix)
+output-bmk-results.py(174): return abs(rel_value - 100), short_diag, long_diag
+output-bmk-results.py(239): if metric == "sample" \
+output-bmk-results.py(240): and row['symbol_md5sum_x'] == row['symbol_md5sum_y'] \
+output-bmk-results.py(241): and row['symbol_md5sum_x'] != "-1" \
+output-bmk-results.py(242): and row['symbol_md5sum_x'] != "d41d8cd98f00b204e9800998ecf8427e":
+output-bmk-results.py(243): f_skip.write_csv((row["benchmark"], row["symbol"], short_diag, long_diag))
+ --- modulename: output-bmk-results, funcname: write_csv
+output-bmk-results.py(41): if not self.predicate or not self.csvwriter:
+output-bmk-results.py(43): self.csvwriter.writerow(arr)
+output-bmk-results.py(244): continue
output-bmk-results.py(224): for index, row in out_df.iterrows():
output-bmk-results.py(226): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
@@ -2156,7 +1855,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 544.nab_r,[.] __vfscanf_internal : sample=-100% (threshold=15%)
+DEBUG: checking symbol.improvement : 544.nab_r,[.] __vfscanf_internal : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -2176,7 +1875,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_mf_bt4_find : sample=2% (threshold=15%)
+DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_mf_bt4_find : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -2196,7 +1895,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=-3% (threshold=15%)
+DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_lzma_optimum_normal : sample=0% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -2216,7 +1915,7 @@ output-bmk-results.py(110): return default_threshold[(change_kind,metric,mod
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
output-bmk-results.py(229): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
output-bmk-results.py(228): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_mf_bt4_skip : sample=-3% (threshold=15%)
+DEBUG: checking symbol.improvement : 557.xz_r,[.] lzma_mf_bt4_skip : sample=2% (threshold=15%)
output-bmk-results.py(232): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
output-bmk-results.py(192): if metric in metric_utils.higher_regress_metrics:
@@ -2237,7 +1936,6 @@ output-bmk-results.py(305): f_skip.close()
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
-output-bmk-results.py(33): os.remove(self.filename)
output-bmk-results.py(306): f_regr.close()
--- modulename: output-bmk-results, funcname: close
output-bmk-results.py(29): if not self.outf:
diff --git a/notify/results.regressions b/notify/results.regressions
index ca0ba62..64d60d1 100644
--- a/notify/results.regressions
+++ b/notify/results.regressions
@@ -1,4 +1 @@
-# deepsjeng_r_base.default,slowed down by 4% - 531.deepsjeng_r - from 10630 to 11005 perf samples
-# libc.so.6,slowed down by 50% - 538.imagick_r:libc.so.6 - from 2 to 3 perf samples
-# [.] _IO_fread,slowed down by 50% - 538.imagick_r:[.] _IO_fread - from 2 to 3 perf samples
-# [.] __vfscanf_internal,slowed down by 100% - 544.nab_r:[.] __vfscanf_internal - from 1 to 2 perf samples
+# libc.so.6,slowed down by 100% - 508.namd_r:libc.so.6 - from 1 to 2 perf samples
diff --git a/notify/symbol.improvement b/notify/symbol.improvement
index 1283161..6a6604e 100644
--- a/notify/symbol.improvement
+++ b/notify/symbol.improvement
@@ -1,2 +1 @@
-50,508.namd_r,[.] __vfscanf_internal,sped up by 50% - 508.namd_r:[.] __vfscanf_internal,sped up by 50% - 508.namd_r:[.] __vfscanf_internal - from 2 to 1 perf samples
-50,508.namd_r,libc.so.6,sped up by 50% - 508.namd_r:libc.so.6,sped up by 50% - 508.namd_r:libc.so.6 - from 2 to 1 perf samples
+33,538.imagick_r,[.] _IO_fread,sped up by 33% - 538.imagick_r:[.] _IO_fread,sped up by 33% - 538.imagick_r:[.] _IO_fread - from 3 to 2 perf samples
diff --git a/notify/symbol.regression b/notify/symbol.regression
index d95c2e1..5cce02f 100644
--- a/notify/symbol.regression
+++ b/notify/symbol.regression
@@ -1,3 +1 @@
-50,538.imagick_r,libc.so.6,slowed down by 50% - 538.imagick_r:libc.so.6,slowed down by 50% - 538.imagick_r:libc.so.6 - from 2 to 3 perf samples
-50,538.imagick_r,[.] _IO_fread,slowed down by 50% - 538.imagick_r:[.] _IO_fread,slowed down by 50% - 538.imagick_r:[.] _IO_fread - from 2 to 3 perf samples
-100,544.nab_r,[.] __vfscanf_internal,slowed down by 100% - 544.nab_r:[.] __vfscanf_internal,slowed down by 100% - 544.nab_r:[.] __vfscanf_internal - from 1 to 2 perf samples
+100,508.namd_r,libc.so.6,slowed down by 100% - 508.namd_r:libc.so.6,slowed down by 100% - 508.namd_r:libc.so.6 - from 1 to 2 perf samples