summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicolas Pitre <nicolas.pitre@linaro.org>2015-01-12 17:21:09 -0500
committerDaniel Lezcano <daniel.lezcano@linaro.org>2015-01-15 13:38:04 +0100
commite4ca629c8788b10e631e6322bc2b7d5e31948511 (patch)
treec66a162f78f2fbff0bb337f2ca9c93736aff5dc6
parent57a61a9ba3e991e2cf9a769114eb264cf47d7c47 (diff)
irq_timings: function to retrieve time of next predicted IRQ
Those events in the past, if any, are purged and then the first item on the list, if any, contains our next predicted IRQ time. We have access to the standard deviation and could use it to qualify our confidence in the prediction eventually. For now it's only the raw prediction that is returned. Signed-off-by: Nicolas Pitre <nico@linaro.org> Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org> Conflicts: include/linux/irq.h
-rw-r--r--include/linux/irq.h6
-rw-r--r--kernel/irq/timings.c28
2 files changed, 34 insertions, 0 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h
index f26e7363a261..169b4ff9a72c 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -868,4 +868,10 @@ static inline u32 irq_reg_readl(struct irq_chip_generic *gc,
return readl(gc->reg_base + reg_offset);
}
+#ifdef CONFIG_IRQ_TIMINGS
+extern u32 irqt_get_next_prediction(int cpu);
+#else
+static inline u32 irqt_get_next_prediction(int cpu) { return 0; }
+#endif
+
#endif /* _LINUX_IRQ_H */
diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
index b9498f306d61..6bc64aa6685d 100644
--- a/kernel/irq/timings.c
+++ b/kernel/irq/timings.c
@@ -106,6 +106,34 @@ static void irqt_enqueue_prediction(ktime_t now, struct irqt_stat *s)
raw_spin_unlock(lock);
}
+/**
+ * irqt_get_next_prediction - get relative time before next predicted IRQ
+ *
+ * @cpu: the CPU number for which a prediction is wanted
+ *
+ * This returns the relative time in microsecs before the next expected IRQ
+ * on given CPU, or zero if no prediction is available. Those predictions
+ * are not guaranteed to be reliable, and guaranteed to fail from time to
+ * time i.e. when the predicted IRQ simply never comes, etc.
+ */
+u32 irqt_get_next_prediction(int cpu)
+{
+ raw_spinlock_t *lock = &per_cpu(irqt_predictions_lock, cpu);
+ struct list_head *head = &per_cpu(irqt_predictions, cpu);
+ unsigned long flags;
+ ktime_t now;
+ struct irqt_prediction *next;
+ u32 result;
+
+ raw_spin_lock_irqsave(lock, flags);
+ now = ktime_get();
+ irqt_purge(now, head);
+ next = list_first_entry_or_null(head, struct irqt_prediction, node);
+ result = next ? ktime_us_delta(next->time, now) : 0;
+ raw_spin_unlock_irqrestore(lock, flags);
+ return result;
+}
+
/*
* irqt_process - update timing interval statistics for the given IRQ
*