aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJuri Lelli <juri.lelli@arm.com>2015-04-30 11:53:48 +0100
committerVincent Guittot <vincent.guittot@linaro.org>2015-07-31 16:25:01 +0200
commita80e7a405baab8797accafde6db87b5572ef6c33 (patch)
tree7a1701462b2179784ecf2f25b943bd4423043472
parent3c024f0ea6521a6877919fbb9f87cf9822b67f9b (diff)
WIP: arm64: Cpu invariant scheduler load-tracking support
arm64 counterpart of arm bits, with some variations. Use the max cap states for each type of CPU to setup cpu_scale. Change-Id: Ib33b5fa379d520ff84985bca8ecd2257ef0fcab9 Signed-off-by: Juri Lelli <juri.lelli@arm.com>
-rw-r--r--arch/arm64/include/asm/topology.h3
-rw-r--r--arch/arm64/kernel/topology.c45
2 files changed, 48 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
index d8075eaefb42..38f6ed6ff3f1 100644
--- a/arch/arm64/include/asm/topology.h
+++ b/arch/arm64/include/asm/topology.h
@@ -31,6 +31,9 @@ unsigned long arm_arch_scale_freq_capacity(struct sched_domain *sd, int cpu);
DECLARE_PER_CPU(atomic_long_t, cpu_freq_capacity);
+#define arch_scale_cpu_capacity arm_arch_scale_cpu_capacity
+extern unsigned long arm_arch_scale_cpu_capacity(struct sched_domain *sd, int cpu);
+
#else
static inline void init_cpu_topology(void) { }
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
index cd5a481c0d7d..2c558e7dc7c6 100644
--- a/arch/arm64/kernel/topology.c
+++ b/arch/arm64/kernel/topology.c
@@ -23,6 +23,18 @@
#include <asm/cputype.h>
#include <asm/topology.h>
+static DEFINE_PER_CPU(unsigned long, cpu_scale);
+
+unsigned long arm_arch_scale_cpu_capacity(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+static void set_capacity_scale(unsigned int cpu, unsigned long capacity)
+{
+ per_cpu(cpu_scale, cpu) = capacity;
+}
+
static int __init get_cpu_for_node(struct device_node *node)
{
struct device_node *cpu_node;
@@ -223,11 +235,33 @@ unsigned long arm_arch_scale_freq_capacity(struct sched_domain *sd, int cpu)
struct cpu_topology cpu_topology[NR_CPUS];
EXPORT_SYMBOL_GPL(cpu_topology);
+static inline const struct sched_group_energy *cpu_core_energy(int cpu)
+{
+ return NULL;
+}
+
const struct cpumask *cpu_coregroup_mask(int cpu)
{
return &cpu_topology[cpu].core_sibling;
}
+static void update_cpu_capacity(unsigned int cpu)
+{
+ unsigned long capacity;
+
+ if (!cpu_core_energy(cpu)) {
+ capacity = SCHED_CAPACITY_SCALE;
+ } else {
+ int max_cap_idx = cpu_core_energy(cpu)->nr_cap_states - 1;
+ capacity = cpu_core_energy(cpu)->cap_states[max_cap_idx].cap;
+ }
+
+ set_capacity_scale(cpu, capacity);
+
+ pr_info("CPU%d: update cpu_capacity %lu\n",
+ cpu, arch_scale_cpu_capacity(NULL, cpu));
+}
+
static void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -289,6 +323,7 @@ void store_cpu_topology(unsigned int cpuid)
topology_populated:
update_siblings_masks(cpuid);
+ update_cpu_capacity(cpuid);
}
static void __init reset_cpu_topology(void)
@@ -309,6 +344,14 @@ static void __init reset_cpu_topology(void)
}
}
+static void __init reset_cpu_capacity(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ set_capacity_scale(cpu, SCHED_CAPACITY_SCALE);
+}
+
void __init init_cpu_topology(void)
{
reset_cpu_topology();
@@ -319,4 +362,6 @@ void __init init_cpu_topology(void)
*/
if (parse_dt_topology())
reset_cpu_topology();
+
+ reset_cpu_capacity();
}