aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSireesh Tripurari <sireesh.tripurari@linaro.org>2013-12-10 11:50:10 +0000
committerSireesh Tripurari <sireesh.tripurari@linaro.org>2013-12-10 11:50:10 +0000
commitd39a5b58079dfaa9dfcefd49e9f2ca2d9e7aacc7 (patch)
tree0f9627f9d64214a5e504a1e2f996b1f57139cec5
parent3cb1af7ae4286b953b6d363ec481b6be6513b79a (diff)
downloadjuice-linux-linaro-juice-13.12.09.tar.gz
-rw-r--r--arch/arm64/include/asm/smp.h36
-rw-r--r--arch/arm64/kernel/smp.c89
-rw-r--r--arch/arm64/kernel/smp_spin_table.c14
3 files changed, 39 insertions, 100 deletions
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index d2ef02e67f6..a498f2cd2c2 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -65,45 +65,9 @@ extern void secondary_entry(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-struct device_node;
-
extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
extern void cpu_die(void);
-/**
- * struct smp_operations - Callback operations for hotplugging CPUs.
- *
- * @name: Name of the property as appears in a devicetree cpu node's
- * enable-method property.
- * @cpu_init: Reads any data necessary for a specific enable-method form the
- * devicetree, for a given cpu node and proposed logical id.
- * @cpu_prepare: Early one-time preparation step for a cpu. If there is a
- * mechanism for doing so, tests whether it is possible to boot
- * the given CPU.
- * @cpu_boot: Boots a cpu into the kernel.
- * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
- * synchronisation. Called from the cpu being booted.
- * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
- * reason, which will cause the hot unplug to be aborted. Called
- * from the cpu to be killed.
- * @cpu_die: Makes the a leave the kernel. Must not fail. Called from the
- * cpu being killed.
- */
-struct smp_operations {
- const char *name;
- int (*cpu_init)(struct device_node *, unsigned int);
- int (*cpu_prepare)(unsigned int);
- int (*cpu_boot)(unsigned int);
- void (*cpu_postboot)(void);
-#ifdef CONFIG_HOTPLUG_CPU
- int (*cpu_disable)(unsigned int cpu);
- void (*cpu_die)(unsigned int cpu);
-#endif
-};
-
-extern const struct smp_operations smp_spin_table_ops;
-extern const struct smp_operations smp_psci_ops;
-
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index ffc78bbfa7c..d3a945ca2a5 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,6 +39,7 @@
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -62,16 +63,14 @@ enum ipi_msg_type {
IPI_CPU_STOP,
};
-static const struct smp_operations *smp_ops[NR_CPUS];
-
/*
* Boot a secondary CPU, and assign it the specified idle task.
* This also gives us the initial stack to use for this CPU.
*/
static int boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- if (smp_ops[cpu]->cpu_boot)
- return smp_ops[cpu]->cpu_boot(cpu);
+ if (cpu_ops[cpu]->cpu_boot)
+ return cpu_ops[cpu]->cpu_boot(cpu);
return -EOPNOTSUPP;
}
@@ -143,8 +142,13 @@ asmlinkage void secondary_start_kernel(void)
preempt_disable();
trace_hardirqs_off();
- if (smp_ops[cpu]->cpu_postboot)
- smp_ops[cpu]->cpu_postboot();
+ if (cpu_ops[cpu]->cpu_postboot)
+ cpu_ops[cpu]->cpu_postboot();
+
+ /*
+ * Enable GIC and timers.
+ */
+ notify_cpu_starting(cpu);
/*
* OK, now it's safe to let the boot CPU continue. Wait for
@@ -154,13 +158,9 @@ asmlinkage void secondary_start_kernel(void)
set_cpu_online(cpu, true);
complete(&cpu_running);
- /*
- * Enable GIC and timers.
- */
- notify_cpu_starting(cpu);
-
local_irq_enable();
local_fiq_enable();
+ local_async_enable();
/*
* OK, it's off to the idle thread for us
@@ -173,17 +173,17 @@ static int op_cpu_disable(unsigned int cpu)
{
/*
* If we don't have a cpu_die method, abort before we reach the point
- * of no return. CPU0 may not have an smp_ops, so test for it.
+ * of no return. CPU0 may not have an cpu_ops, so test for it.
*/
- if (!smp_ops[cpu] || !smp_ops[cpu]->cpu_die)
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
return -EOPNOTSUPP;
/*
* We may need to abort a hot unplug for some other mechanism-specific
* reason.
*/
- if (smp_ops[cpu]->cpu_disable)
- return smp_ops[cpu]->cpu_disable(cpu);
+ if (cpu_ops[cpu]->cpu_disable)
+ return cpu_ops[cpu]->cpu_disable(cpu);
return 0;
}
@@ -259,7 +259,7 @@ void __ref cpu_die(void)
* mechanism must perform all required cache maintenance to ensure that
* no dirty lines are lost in the process of shutting down the CPU.
*/
- smp_ops[cpu]->cpu_die(cpu);
+ cpu_ops[cpu]->cpu_die(cpu);
BUG();
}
@@ -276,26 +276,6 @@ void __init smp_prepare_boot_cpu(void)
static void (*smp_cross_call)(const struct cpumask *, unsigned int);
-static const struct smp_operations *supported_smp_ops[] __initconst = {
- &smp_spin_table_ops,
- &smp_psci_ops,
- NULL,
-};
-
-static const struct smp_operations * __init smp_get_ops(const char *name)
-{
- const struct smp_operations **ops = supported_smp_ops;
-
- while (*ops) {
- if (!strcmp(name, (*ops)->name))
- return *ops;
-
- ops++;
- }
-
- return NULL;
-}
-
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
@@ -303,7 +283,6 @@ static const struct smp_operations * __init smp_get_ops(const char *name)
*/
void __init smp_init_cpus(void)
{
- const char *enable_method;
struct device_node *dn = NULL;
unsigned int i, cpu = 1;
bool bootcpu_valid = false;
@@ -347,8 +326,6 @@ void __init smp_init_cpus(void)
}
}
- enable_method = of_get_property(dn, "enable-method", NULL);
-
/*
* The numbering scheme requires that the boot CPU
* must be assigned logical id 0. Record it so that
@@ -364,34 +341,22 @@ void __init smp_init_cpus(void)
bootcpu_valid = true;
- if (enable_method)
- smp_ops[0] = smp_get_ops(enable_method);
-
- /*
- * cpu_logical_map has already been initialized so
- * continue without incrementing cpu.
- */
+ /*
+ * cpu_logical_map has already been
+ * initialized and the boot cpu doesn't need
+ * the enable-method so continue without
+ * incrementing cpu.
+ */
continue;
}
if (cpu >= NR_CPUS)
goto next;
- if (!enable_method) {
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
- goto next;
- }
-
- smp_ops[cpu] = smp_get_ops(enable_method);
-
- if (!smp_ops[cpu]) {
- pr_err("%s: invalid enable-method property: %s\n",
- dn->full_name, enable_method);
+ if (cpu_read_ops(dn, cpu) != 0)
goto next;
- }
- if (smp_ops[cpu]->cpu_init(dn, cpu))
+ if (cpu_ops[cpu]->cpu_init(dn, cpu))
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
@@ -449,10 +414,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
if (cpu == smp_processor_id())
continue;
- if (!smp_ops[cpu])
+ if (!cpu_ops[cpu])
continue;
- err = smp_ops[cpu]->cpu_prepare(cpu);
+ err = cpu_ops[cpu]->cpu_prepare(cpu);
if (err)
continue;
@@ -492,7 +457,7 @@ void show_ipi_list(struct seq_file *p, int prec)
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
prec >= 4 ? " " : "");
- for_each_present_cpu(cpu)
+ for_each_online_cpu(cpu)
seq_printf(p, "%10u ",
__get_irq_stat(cpu, ipi_irqs[i]));
seq_printf(p, " %s\n", ipi_types[i]);
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 87af6bb551a..c897a027617 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -23,6 +23,7 @@
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
#include <asm/smp_plat.h>
extern void secondary_holding_pen(void);
@@ -71,7 +72,16 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
return -ENODEV;
release_addr = __va(cpu_release_addr[cpu]);
- release_addr[0] = (void *)__pa(secondary_holding_pen);
+
+ /*
+ * We write the release address as LE regardless of the native
+ * endianess of the kernel. Therefore, any boot-loaders that
+ * read this address need to convert this address to the
+ * boot-loader's endianess before jumping. This is mandated by
+ * the boot protocol.
+ */
+ release_addr[0] = (void *) cpu_to_le64(__pa(secondary_holding_pen));
+
__flush_dcache_area(release_addr, sizeof(release_addr[0]));
/*
@@ -132,7 +142,7 @@ void smp_spin_table_cpu_postboot(void)
raw_spin_unlock(&boot_lock);
}
-const struct smp_operations smp_spin_table_ops = {
+const struct cpu_operations smp_spin_table_ops = {
.name = "spin-table",
.cpu_init = smp_spin_table_cpu_init,
.cpu_prepare = smp_spin_table_cpu_prepare,