aboutsummaryrefslogtreecommitdiff
path: root/tools/perf/builtin-stat.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-04-27 04:36:37 +0200
committerIngo Molnar <mingo@elte.hu>2011-04-26 20:04:56 +0200
commitdcd9936a5a6d89512b5323c1145647f2dbe0236f (patch)
tree5ad83bc4f2a61e5ff65d4d0950cee6a36b65696d /tools/perf/builtin-stat.c
parent749141d926faf23ef811686a8050e7cf13dc223f (diff)
perf stat: Factor our shadow stats
Create update_shadow_stats() which is then used in both read_counter_aggr() and read_counter(). This not only simplifies the code but also fixes a bug: HW_CACHE_REFERENCES was not updated in read_counter(). Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Link: http://lkml.kernel.org/n/tip-9uc55z3g88r47exde7zxjm6p@git.kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r--tools/perf/builtin-stat.c33
1 files changed, 19 insertions, 14 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 0de3a2002f4..e5e82f62c78 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -194,6 +194,23 @@ static inline int nsec_counter(struct perf_evsel *evsel)
}
/*
+ * Update various tracking values we maintain to print
+ * more semantic information such as miss/hit ratios,
+ * instruction rates, etc:
+ */
+static void update_shadow_stats(struct perf_evsel *counter, u64 *count)
+{
+ if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
+ update_stats(&runtime_nsecs_stats[0], count[0]);
+ else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
+ update_stats(&runtime_cycles_stats[0], count[0]);
+ else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
+ update_stats(&runtime_branches_stats[0], count[0]);
+ else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
+ update_stats(&runtime_cacherefs_stats[0], count[0]);
+}
+
+/*
* Read out the results of a single counter:
* aggregate counts across CPUs in system-wide mode
*/
@@ -218,14 +235,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
/*
* Save the full runtime - to allow normalization during printout:
*/
- if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
- update_stats(&runtime_nsecs_stats[0], count[0]);
- else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
- update_stats(&runtime_cycles_stats[0], count[0]);
- else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
- update_stats(&runtime_branches_stats[0], count[0]);
- else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
- update_stats(&runtime_cacherefs_stats[0], count[0]);
+ update_shadow_stats(counter, count);
return 0;
}
@@ -245,12 +255,7 @@ static int read_counter(struct perf_evsel *counter)
count = counter->counts->cpu[cpu].values;
- if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
- update_stats(&runtime_nsecs_stats[cpu], count[0]);
- if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
- update_stats(&runtime_cycles_stats[cpu], count[0]);
- if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
- update_stats(&runtime_branches_stats[cpu], count[0]);
+ update_shadow_stats(counter, count);
}
return 0;