aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/topology.c69
-rw-r--r--kernel/sched/fair.c18
2 files changed, 75 insertions, 12 deletions
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 8200deaa14f6..28d2be00ed89 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -126,6 +126,75 @@ void store_cpu_topology(unsigned int cpuid)
cpu_topology[cpuid].socket_id, mpidr);
}
+
+#ifdef CONFIG_SCHED_HMP
+
+static const char * const little_cores[] = {
+ "arm,cortex-a7",
+ NULL,
+};
+
+static bool is_little_cpu(struct device_node *cn)
+{
+ const char * const *lc;
+ for (lc = little_cores; *lc; lc++)
+ if (of_device_is_compatible(cn, *lc))
+ return true;
+ return false;
+}
+
+void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
+ struct cpumask *slow)
+{
+ struct device_node *cn = NULL;
+ int cpu = 0;
+
+ cpumask_clear(fast);
+ cpumask_clear(slow);
+
+ /*
+ * Use the config options if they are given. This helps testing
+ * HMP scheduling on systems without a big.LITTLE architecture.
+ */
+ if (strlen(CONFIG_HMP_FAST_CPU_MASK) && strlen(CONFIG_HMP_SLOW_CPU_MASK)) {
+ if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, fast))
+ WARN(1, "Failed to parse HMP fast cpu mask!\n");
+ if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, slow))
+ WARN(1, "Failed to parse HMP slow cpu mask!\n");
+ return;
+ }
+
+ /*
+ * Else, parse device tree for little cores.
+ */
+ while ((cn = of_find_node_by_type(cn, "cpu"))) {
+
+ if (cpu >= num_possible_cpus())
+ break;
+
+ if (is_little_cpu(cn))
+ cpumask_set_cpu(cpu, slow);
+ else
+ cpumask_set_cpu(cpu, fast);
+
+ cpu++;
+ }
+
+ if (!cpumask_empty(fast) && !cpumask_empty(slow))
+ return;
+
+ /*
+ * We didn't find both big and little cores so let's call all cores
+ * fast as this will keep the system running, with all cores being
+ * treated equal.
+ */
+ cpumask_setall(fast);
+ cpumask_clear(slow);
+}
+
+#endif /* CONFIG_SCHED_HMP */
+
+
/*
* init_cpu_topology is called at boot when only one cpu is running
* which prevent simultaneous write access to cpu_topology array
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f705a87ac7b5..e05636144c62 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3084,25 +3084,19 @@ done:
#ifdef CONFIG_SCHED_HMP
/* Heterogenous multiprocessor (HMP) optimizations
- * We need to know which cpus that are fast and slow. Ideally, this
- * information would be provided by the platform in some way. For now it is
- * set in the kernel config. */
+ * We need to know which cpus that are fast and slow. */
static struct cpumask hmp_fast_cpu_mask;
static struct cpumask hmp_slow_cpu_mask;
-/* Setup fast and slow cpumasks.
- * This should be setup based on device tree somehow. */
+extern void __init arch_get_fast_and_slow_cpus(struct cpumask *fast,
+ struct cpumask *slow);
+
+/* Setup fast and slow cpumasks. */
static int __init hmp_cpu_mask_setup(void)
{
char buf[64];
- cpumask_clear(&hmp_fast_cpu_mask);
- cpumask_clear(&hmp_slow_cpu_mask);
-
- if (cpulist_parse(CONFIG_HMP_FAST_CPU_MASK, &hmp_fast_cpu_mask))
- WARN(1, "Failed to parse HMP fast cpu mask!\n");
- if (cpulist_parse(CONFIG_HMP_SLOW_CPU_MASK, &hmp_slow_cpu_mask))
- WARN(1, "Failed to parse HMP slow cpu mask!\n");
+ arch_get_fast_and_slow_cpus(&hmp_fast_cpu_mask, &hmp_slow_cpu_mask);
printk(KERN_DEBUG "Initializing HMP scheduler:\n");
cpulist_scnprintf(buf, 64, &hmp_fast_cpu_mask);