summaryrefslogtreecommitdiff
path: root/notify/output-bmk-results.log
diff options
context:
space:
mode:
Diffstat (limited to 'notify/output-bmk-results.log')
-rw-r--r--notify/output-bmk-results.log924
1 files changed, 462 insertions, 462 deletions
diff --git a/notify/output-bmk-results.log b/notify/output-bmk-results.log
index 49c79b5..fc05154 100644
--- a/notify/output-bmk-results.log
+++ b/notify/output-bmk-results.log
@@ -1,814 +1,814 @@
--- modulename: output-bmk-results, funcname: <module>
<string>(1): --- modulename: output-bmk-results, funcname: main
-output-bmk-results.py(278): results_csv = sys.argv[1]
-output-bmk-results.py(279): variability_file = sys.argv[2]
-output-bmk-results.py(280): run_step_artifacts_dir = sys.argv[3]
-output-bmk-results.py(281): metric = sys.argv[4]
-output-bmk-results.py(282): mode = sys.argv[5]
-output-bmk-results.py(283): details = sys.argv[6]
-output-bmk-results.py(285): merged_df = read_results_csv(results_csv)
+output-bmk-results.py(287): results_csv = sys.argv[1]
+output-bmk-results.py(288): variability_file = sys.argv[2]
+output-bmk-results.py(289): run_step_artifacts_dir = sys.argv[3]
+output-bmk-results.py(290): metric = sys.argv[4]
+output-bmk-results.py(291): mode = sys.argv[5]
+output-bmk-results.py(292): details = sys.argv[6]
+output-bmk-results.py(294): merged_df = read_results_csv(results_csv)
--- modulename: output-bmk-results, funcname: read_results_csv
-output-bmk-results.py(268): df = pd.read_csv(results_csv)
-output-bmk-results.py(269): df = df.fillna(-1)
-output-bmk-results.py(271): for metric in get_comparable_metrics(df):
+output-bmk-results.py(277): df = pd.read_csv(results_csv)
+output-bmk-results.py(278): df = df.fillna(-1)
+output-bmk-results.py(280): for metric in get_comparable_metrics(df):
--- modulename: output-bmk-results, funcname: get_comparable_metrics
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
--- modulename: output-bmk-results, funcname: <genexpr>
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
--- modulename: output-bmk-results, funcname: <genexpr>
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
--- modulename: output-bmk-results, funcname: <genexpr>
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
--- modulename: output-bmk-results, funcname: <genexpr>
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
--- modulename: output-bmk-results, funcname: <genexpr>
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
--- modulename: output-bmk-results, funcname: <genexpr>
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
-output-bmk-results.py(190): & metric_utils.comparable_metrics
-output-bmk-results.py(189): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
-output-bmk-results.py(272): df["rel_" + metric] = df["rel_" + metric].astype("int")
-output-bmk-results.py(273): df[metric + "_x"] = df[metric + "_x"].astype("int")
-output-bmk-results.py(274): df[metric + "_y"] = df[metric + "_y"].astype("int")
-output-bmk-results.py(271): for metric in get_comparable_metrics(df):
-output-bmk-results.py(272): df["rel_" + metric] = df["rel_" + metric].astype("int")
-output-bmk-results.py(273): df[metric + "_x"] = df[metric + "_x"].astype("int")
-output-bmk-results.py(274): df[metric + "_y"] = df[metric + "_y"].astype("int")
-output-bmk-results.py(271): for metric in get_comparable_metrics(df):
-output-bmk-results.py(272): df["rel_" + metric] = df["rel_" + metric].astype("int")
-output-bmk-results.py(273): df[metric + "_x"] = df[metric + "_x"].astype("int")
-output-bmk-results.py(274): df[metric + "_y"] = df[metric + "_y"].astype("int")
-output-bmk-results.py(271): for metric in get_comparable_metrics(df):
-output-bmk-results.py(272): df["rel_" + metric] = df["rel_" + metric].astype("int")
-output-bmk-results.py(273): df[metric + "_x"] = df[metric + "_x"].astype("int")
-output-bmk-results.py(274): df[metric + "_y"] = df[metric + "_y"].astype("int")
-output-bmk-results.py(271): for metric in get_comparable_metrics(df):
-output-bmk-results.py(275): return df
-output-bmk-results.py(286): read_specific_variability_file(variability_file)
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(199): & metric_utils.comparable_metrics
+output-bmk-results.py(198): return set(metric[len("rel_"):] for metric in df.columns[2:] if metric.startswith("rel_")) \
+output-bmk-results.py(281): df["rel_" + metric] = df["rel_" + metric].astype("int")
+output-bmk-results.py(282): df[metric + "_x"] = df[metric + "_x"].astype("int")
+output-bmk-results.py(283): df[metric + "_y"] = df[metric + "_y"].astype("int")
+output-bmk-results.py(280): for metric in get_comparable_metrics(df):
+output-bmk-results.py(281): df["rel_" + metric] = df["rel_" + metric].astype("int")
+output-bmk-results.py(282): df[metric + "_x"] = df[metric + "_x"].astype("int")
+output-bmk-results.py(283): df[metric + "_y"] = df[metric + "_y"].astype("int")
+output-bmk-results.py(280): for metric in get_comparable_metrics(df):
+output-bmk-results.py(281): df["rel_" + metric] = df["rel_" + metric].astype("int")
+output-bmk-results.py(282): df[metric + "_x"] = df[metric + "_x"].astype("int")
+output-bmk-results.py(283): df[metric + "_y"] = df[metric + "_y"].astype("int")
+output-bmk-results.py(280): for metric in get_comparable_metrics(df):
+output-bmk-results.py(281): df["rel_" + metric] = df["rel_" + metric].astype("int")
+output-bmk-results.py(282): df[metric + "_x"] = df[metric + "_x"].astype("int")
+output-bmk-results.py(283): df[metric + "_y"] = df[metric + "_y"].astype("int")
+output-bmk-results.py(280): for metric in get_comparable_metrics(df):
+output-bmk-results.py(284): return df
+output-bmk-results.py(295): read_specific_variability_file(variability_file)
--- modulename: output-bmk-results, funcname: read_specific_variability_file
output-bmk-results.py(51): if not os.path.exists(bmk_specific_filename):
output-bmk-results.py(53): specific_variability = pd.read_csv(bmk_specific_filename, index_col=False)
-output-bmk-results.py(287): output_bmk_results(merged_df, run_step_artifacts_dir, metric, mode, details)
+output-bmk-results.py(296): output_bmk_results(merged_df, run_step_artifacts_dir, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results
-output-bmk-results.py(239): f_regr = Outfile("{0}/results.regressions".format(run_step_artifacts), "w")
+output-bmk-results.py(248): f_regr = Outfile("{0}/results.regressions".format(run_step_artifacts), "w")
--- modulename: output-bmk-results, funcname: __init__
output-bmk-results.py(19): self.filename=filename
output-bmk-results.py(20): self.predicate=predicate
output-bmk-results.py(21): if predicate:
output-bmk-results.py(22): self.outf = open(filename, mode)
output-bmk-results.py(23): self.csvwriter = csv.writer(self.outf)
-output-bmk-results.py(240): f_ebp = Outfile("{0}/extra-bisect-params".format(run_step_artifacts), "w")
+output-bmk-results.py(249): f_ebp = Outfile("{0}/extra-bisect-params".format(run_step_artifacts), "w")
--- modulename: output-bmk-results, funcname: __init__
output-bmk-results.py(19): self.filename=filename
output-bmk-results.py(20): self.predicate=predicate
output-bmk-results.py(21): if predicate:
output-bmk-results.py(22): self.outf = open(filename, mode)
output-bmk-results.py(23): self.csvwriter = csv.writer(self.outf)
-output-bmk-results.py(241): f_skip = Outfile("{0}/any.skipped".format(run_step_artifacts), "w", predicate=(details=="verbose"))
+output-bmk-results.py(250): f_skip = Outfile("{0}/any.skipped".format(run_step_artifacts), "w", predicate=(details=="verbose"))
--- modulename: output-bmk-results, funcname: __init__
output-bmk-results.py(19): self.filename=filename
output-bmk-results.py(20): self.predicate=predicate
output-bmk-results.py(21): if predicate:
output-bmk-results.py(22): self.outf = open(filename, mode)
output-bmk-results.py(23): self.csvwriter = csv.writer(self.outf)
-output-bmk-results.py(243): f_ebp.write("extra_build_params=")
+output-bmk-results.py(252): f_ebp.write("extra_build_params=")
--- modulename: output-bmk-results, funcname: write
output-bmk-results.py(36): if not self.predicate or not self.outf:
output-bmk-results.py(38): self.outf.write(string)
-output-bmk-results.py(247): df = merged_df[merged_df["benchmark"] != "Mean"]
-output-bmk-results.py(250): exe_df = df[df["symbol"].str.endswith("_base.default")]
-output-bmk-results.py(251): sym_df = df[~df["symbol"].str.endswith("_base.default")]
-output-bmk-results.py(253): output_bmk_results_1(exe_df, "exe", "regression", f_regr, f_skip, f_ebp, run_step_artifacts, metric, mode, details)
+output-bmk-results.py(256): df = merged_df[merged_df["benchmark"] != "Mean"]
+output-bmk-results.py(259): exe_df = df[df["symbol"].str.endswith("_base.default")]
+output-bmk-results.py(260): sym_df = df[~df["symbol"].str.endswith("_base.default")]
+output-bmk-results.py(262): output_bmk_results_1(exe_df, "exe", "regression", f_regr, f_skip, f_ebp, run_step_artifacts, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_1
-output-bmk-results.py(201): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
+output-bmk-results.py(210): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
--- modulename: output-bmk-results, funcname: __init__
output-bmk-results.py(19): self.filename=filename
output-bmk-results.py(20): self.predicate=predicate
output-bmk-results.py(21): if predicate:
output-bmk-results.py(22): self.outf = open(filename, mode)
output-bmk-results.py(23): self.csvwriter = csv.writer(self.outf)
-output-bmk-results.py(203): rel_metric = "rel_" + metric
-output-bmk-results.py(204): out_df = results_df[results_df[rel_metric] != -1]
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(212): rel_metric = "rel_" + metric
+output-bmk-results.py(213): out_df = results_df[results_df[rel_metric] != -1]
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 400.perlbench,perlbench_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 401.bzip2,bzip2_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 403.gcc,gcc_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 429.mcf,mcf_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 433.milc,milc_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 444.namd,namd_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 445.gobmk,gobmk_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 447.dealII,dealII_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 450.soplex,soplex_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 453.povray,povray_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 456.hmmer,hmmer_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 458.sjeng,sjeng_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 462.libquantum,libquantum_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 464.h264ref,h264ref_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 470.lbm,lbm_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 471.omnetpp,omnetpp_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 473.astar,astar_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 482.sphinx3,sphinx_livepretend_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.regression : 483.xalancbmk,Xalan_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(236): f_out.close()
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(245): f_out.close()
--- modulename: output-bmk-results, funcname: close
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
output-bmk-results.py(33): os.remove(self.filename)
-output-bmk-results.py(254): output_bmk_results_1(exe_df, "exe", "improvement", None, f_skip, None, run_step_artifacts, metric, mode, details)
+output-bmk-results.py(263): output_bmk_results_1(exe_df, "exe", "improvement", None, f_skip, None, run_step_artifacts, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_1
-output-bmk-results.py(201): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
+output-bmk-results.py(210): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
--- modulename: output-bmk-results, funcname: __init__
output-bmk-results.py(19): self.filename=filename
output-bmk-results.py(20): self.predicate=predicate
output-bmk-results.py(21): if predicate:
output-bmk-results.py(22): self.outf = open(filename, mode)
output-bmk-results.py(23): self.csvwriter = csv.writer(self.outf)
-output-bmk-results.py(203): rel_metric = "rel_" + metric
-output-bmk-results.py(204): out_df = results_df[results_df[rel_metric] != -1]
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(212): rel_metric = "rel_" + metric
+output-bmk-results.py(213): out_df = results_df[results_df[rel_metric] != -1]
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 400.perlbench,perlbench_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 401.bzip2,bzip2_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 403.gcc,gcc_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 429.mcf,mcf_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 433.milc,milc_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 444.namd,namd_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 445.gobmk,gobmk_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 447.dealII,dealII_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 450.soplex,soplex_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 453.povray,povray_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 456.hmmer,hmmer_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 458.sjeng,sjeng_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 462.libquantum,libquantum_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 464.h264ref,h264ref_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 470.lbm,lbm_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 471.omnetpp,omnetpp_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 473.astar,astar_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 482.sphinx3,sphinx_livepretend_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking exe.improvement : 483.xalancbmk,Xalan_base.default : size=0% (threshold=1%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(236): f_out.close()
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(245): f_out.close()
--- modulename: output-bmk-results, funcname: close
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
output-bmk-results.py(33): os.remove(self.filename)
-output-bmk-results.py(256): output_bmk_results_1(sym_df, "symbol", "regression", f_regr, f_skip, f_ebp, run_step_artifacts, metric, mode, details)
+output-bmk-results.py(265): output_bmk_results_1(sym_df, "symbol", "regression", f_regr, f_skip, f_ebp, run_step_artifacts, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_1
-output-bmk-results.py(201): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
+output-bmk-results.py(210): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
--- modulename: output-bmk-results, funcname: __init__
output-bmk-results.py(19): self.filename=filename
output-bmk-results.py(20): self.predicate=predicate
output-bmk-results.py(21): if predicate:
output-bmk-results.py(22): self.outf = open(filename, mode)
output-bmk-results.py(23): self.csvwriter = csv.writer(self.outf)
-output-bmk-results.py(203): rel_metric = "rel_" + metric
-output-bmk-results.py(204): out_df = results_df[results_df[rel_metric] != -1]
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(212): rel_metric = "rel_" + metric
+output-bmk-results.py(213): out_df = results_df[results_df[rel_metric] != -1]
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking symbol.regression : 447.dealII,libstdc++.so.6.0.30 : size=0% (threshold=10%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking symbol.regression : 471.omnetpp,libc.so.6 : size=0% (threshold=10%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_regression
-output-bmk-results.py(166): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(167): return (result - 100 > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(236): f_out.close()
+output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(176): return (result - 100 > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(245): f_out.close()
--- modulename: output-bmk-results, funcname: close
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
output-bmk-results.py(33): os.remove(self.filename)
-output-bmk-results.py(257): output_bmk_results_1(sym_df, "symbol", "improvement", None, f_skip, None, run_step_artifacts, metric, mode, details)
+output-bmk-results.py(266): output_bmk_results_1(sym_df, "symbol", "improvement", None, f_skip, None, run_step_artifacts, metric, mode, details)
--- modulename: output-bmk-results, funcname: output_bmk_results_1
-output-bmk-results.py(201): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
+output-bmk-results.py(210): f_out = Outfile("{0}/{1}.{2}".format(run_step_artifacts, sym_type, change_kind), "w", predicate=(details=="verbose"))
--- modulename: output-bmk-results, funcname: __init__
output-bmk-results.py(19): self.filename=filename
output-bmk-results.py(20): self.predicate=predicate
output-bmk-results.py(21): if predicate:
output-bmk-results.py(22): self.outf = open(filename, mode)
output-bmk-results.py(23): self.csvwriter = csv.writer(self.outf)
-output-bmk-results.py(203): rel_metric = "rel_" + metric
-output-bmk-results.py(204): out_df = results_df[results_df[rel_metric] != -1]
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(212): rel_metric = "rel_" + metric
+output-bmk-results.py(213): out_df = results_df[results_df[rel_metric] != -1]
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking symbol.improvement : 447.dealII,libstdc++.so.6.0.30 : size=0% (threshold=10%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(209): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(218): threshold = get_threshold(sym_type, metric, mode, row["benchmark"], row["symbol"])
--- modulename: output-bmk-results, funcname: get_threshold
output-bmk-results.py(98): if metric == "sample":
output-bmk-results.py(107): if metric == "num_vect_loops" or metric == "num_sve_loops":
output-bmk-results.py(110): return default_threshold[(change_kind,metric,mode)]
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
-output-bmk-results.py(212): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
-output-bmk-results.py(211): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
+output-bmk-results.py(221): .format(sym_type, change_kind, row["benchmark"], row["symbol"], metric, 100-row["rel_" + metric], threshold))
+output-bmk-results.py(220): print("DEBUG: checking {0}.{1} : {2},{3} : {4}={5}% (threshold={6}%)"\
DEBUG: checking symbol.improvement : 471.omnetpp,libc.so.6 : size=0% (threshold=10%)
-output-bmk-results.py(215): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
+output-bmk-results.py(224): if not is_entry_xxx[change_kind](metric, row["rel_" + metric], threshold):
--- modulename: output-bmk-results, funcname: is_entry_improvement
-output-bmk-results.py(175): if metric in metric_utils.higher_regress_metrics:
-output-bmk-results.py(176): return (100 - result > threshold)
-output-bmk-results.py(216): continue
-output-bmk-results.py(207): for index, row in out_df.iterrows():
-output-bmk-results.py(236): f_out.close()
+output-bmk-results.py(184): if metric in metric_utils.higher_regress_metrics:
+output-bmk-results.py(185): return (100 - result > threshold)
+output-bmk-results.py(225): continue
+output-bmk-results.py(216): for index, row in out_df.iterrows():
+output-bmk-results.py(245): f_out.close()
--- modulename: output-bmk-results, funcname: close
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
output-bmk-results.py(33): os.remove(self.filename)
-output-bmk-results.py(259): f_ebp.write("\n")
+output-bmk-results.py(268): f_ebp.write("\n")
--- modulename: output-bmk-results, funcname: write
output-bmk-results.py(36): if not self.predicate or not self.outf:
output-bmk-results.py(38): self.outf.write(string)
-output-bmk-results.py(261): f_skip.close()
+output-bmk-results.py(270): f_skip.close()
--- modulename: output-bmk-results, funcname: close
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
output-bmk-results.py(33): os.remove(self.filename)
-output-bmk-results.py(262): f_regr.close()
+output-bmk-results.py(271): f_regr.close()
--- modulename: output-bmk-results, funcname: close
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
output-bmk-results.py(33): os.remove(self.filename)
-output-bmk-results.py(263): f_ebp.close()
+output-bmk-results.py(272): f_ebp.close()
--- modulename: output-bmk-results, funcname: close
output-bmk-results.py(29): if not self.outf:
output-bmk-results.py(31): self.outf.close()
output-bmk-results.py(32): if os.stat(self.filename).st_size == 0:
-output-bmk-results.py(288): return 0
+output-bmk-results.py(297): return 0