aboutsummaryrefslogtreecommitdiff
path: root/arch/arm64/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/smp.c')
-rw-r--r--arch/arm64/kernel/smp.c207
1 files changed, 128 insertions, 79 deletions
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 78db90dcc910..e671f33d5c05 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,6 +39,7 @@
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/topology.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -54,7 +55,6 @@
* where to place its SVC stack
*/
struct secondary_data secondary_data;
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
enum ipi_msg_type {
IPI_RESCHEDULE,
@@ -63,22 +63,7 @@ enum ipi_msg_type {
IPI_CPU_STOP,
};
-static DEFINE_RAW_SPINLOCK(boot_lock);
-
-/*
- * Write secondary_holding_pen_release in a way that is guaranteed to be
- * visible to all observers, irrespective of whether they're taking part
- * in coherency or not. This is necessary for the hotplug code to work
- * reliably.
- */
-static void write_pen_release(u64 val)
-{
- void *start = (void *)&secondary_holding_pen_release;
- unsigned long size = sizeof(secondary_holding_pen_release);
-
- secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
-}
+static const struct smp_operations *smp_ops[NR_CPUS];
/*
* Boot a secondary CPU, and assign it the specified idle task.
@@ -86,38 +71,10 @@ static void write_pen_release(u64 val)
*/
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- unsigned long timeout;
-
- /*
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- raw_spin_lock(&boot_lock);
-
- /*
- * Update the pen release flag.
- */
- write_pen_release(cpu_logical_map(cpu));
-
- /*
- * Send an event, causing the secondaries to read pen_release.
- */
- sev();
-
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- if (secondary_holding_pen_release == INVALID_HWID)
- break;
- udelay(10);
- }
-
- /*
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- raw_spin_unlock(&boot_lock);
+ if (smp_ops[cpu]->cpu_boot)
+ return smp_ops[cpu]->cpu_boot(cpu);
- return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+ return -EOPNOTSUPP;
}
static DECLARE_COMPLETION(cpu_running);
@@ -187,17 +144,10 @@ asmlinkage void secondary_start_kernel(void)
preempt_disable();
trace_hardirqs_off();
- /*
- * Let the primary processor know we're out of the
- * pen, then head off into the C entry point
- */
- write_pen_release(INVALID_HWID);
+ if (smp_ops[cpu]->cpu_postboot)
+ smp_ops[cpu]->cpu_postboot();
- /*
- * Synchronise with the boot thread.
- */
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
+ store_cpu_topology(cpu);
/*
* OK, now it's safe to let the boot CPU continue. Wait for
@@ -221,6 +171,103 @@ asmlinkage void secondary_start_kernel(void)
cpu_startup_entry(CPUHP_ONLINE);
}
+#ifdef CONFIG_HOTPLUG_CPU
+static int op_cpu_disable(unsigned int cpu)
+{
+ /*
+ * If we don't have a cpu_die method, abort before we reach the point
+ * of no return. CPU0 may not have an smp_ops, so test for it.
+ */
+ if (!smp_ops[cpu] || !smp_ops[cpu]->cpu_die)
+ return -EOPNOTSUPP;
+
+ /*
+ * We may need to abort a hot unplug for some other mechanism-specific
+ * reason.
+ */
+ if (smp_ops[cpu]->cpu_disable)
+ return smp_ops[cpu]->cpu_disable(cpu);
+
+ return 0;
+}
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+ int ret;
+
+ ret = op_cpu_disable(cpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Remove this CPU from the vm mask set of all processes.
+ */
+ clear_tasks_mm_cpumask(cpu);
+
+ return 0;
+}
+
+static DECLARE_COMPLETION(cpu_died);
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+ pr_crit("CPU%u: cpu didn't die\n", cpu);
+ return;
+ }
+ pr_notice("CPU%u: shutdown\n", cpu);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void __ref cpu_die(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ idle_task_exit();
+
+ local_irq_disable();
+ mb();
+
+ /* Tell __cpu_die() that this CPU is now safe to dispose of */
+ complete(&cpu_died);
+
+ /*
+ * Actually shutdown the CPU. This must never fail. The specific hotplug
+ * mechanism must perform all required cache maintenance to ensure that
+ * no dirty lines are lost in the process of shutting down the CPU.
+ */
+ smp_ops[cpu]->cpu_die(cpu);
+
+ BUG();
+}
+#endif
+
void __init smp_cpus_done(unsigned int max_cpus)
{
pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
@@ -232,17 +279,15 @@ void __init smp_prepare_boot_cpu(void)
static void (*smp_cross_call)(const struct cpumask *, unsigned int);
-static const struct smp_enable_ops *enable_ops[] __initconst = {
+static const struct smp_operations *supported_smp_ops[] __initconst = {
&smp_spin_table_ops,
&smp_psci_ops,
NULL,
};
-static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
-
-static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
+static const struct smp_operations * __init smp_get_ops(const char *name)
{
- const struct smp_enable_ops **ops = enable_ops;
+ const struct smp_operations **ops = supported_smp_ops;
while (*ops) {
if (!strcmp(name, (*ops)->name))
@@ -263,7 +308,7 @@ void __init smp_init_cpus(void)
{
const char *enable_method;
struct device_node *dn = NULL;
- int i, cpu = 1;
+ unsigned int i, cpu = 1;
bool bootcpu_valid = false;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
@@ -305,6 +350,8 @@ void __init smp_init_cpus(void)
}
}
+ enable_method = of_get_property(dn, "enable-method", NULL);
+
/*
* The numbering scheme requires that the boot CPU
* must be assigned logical id 0. Record it so that
@@ -320,11 +367,12 @@ void __init smp_init_cpus(void)
bootcpu_valid = true;
+ if (enable_method)
+ smp_ops[0] = smp_get_ops(enable_method);
+
/*
- * cpu_logical_map has already been
- * initialized and the boot cpu doesn't need
- * the enable-method so continue without
- * incrementing cpu.
+ * cpu_logical_map has already been initialized so
+ * continue without incrementing cpu.
*/
continue;
}
@@ -332,25 +380,21 @@ void __init smp_init_cpus(void)
if (cpu >= NR_CPUS)
goto next;
- /*
- * We currently support only the "spin-table" enable-method.
- */
- enable_method = of_get_property(dn, "enable-method", NULL);
if (!enable_method) {
pr_err("%s: missing enable-method property\n",
dn->full_name);
goto next;
}
- smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
+ smp_ops[cpu] = smp_get_ops(enable_method);
- if (!smp_enable_ops[cpu]) {
+ if (!smp_ops[cpu]) {
pr_err("%s: invalid enable-method property: %s\n",
dn->full_name, enable_method);
goto next;
}
- if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
+ if (smp_ops[cpu]->cpu_init(dn, cpu))
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
@@ -380,8 +424,11 @@ next:
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int cpu, err;
- unsigned int ncores = num_possible_cpus();
+ int err;
+ unsigned int cpu, ncores = num_possible_cpus();
+
+ init_cpu_topology();
+ store_cpu_topology(smp_processor_id());
/*
* are we trying to boot more cores than exist?
@@ -408,14 +455,16 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (cpu == smp_processor_id())
continue;
- if (!smp_enable_ops[cpu])
+ if (!smp_ops[cpu])
continue;
- err = smp_enable_ops[cpu]->prepare_cpu(cpu);
+ err = smp_ops[cpu]->cpu_prepare(cpu);
if (err)
continue;
+#ifndef CONFIG_ACPI
set_cpu_present(cpu, true);
+#endif
max_cpus--;
}
}