summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJon Medhurst <tixy@linaro.org>2013-01-21 18:36:28 +0000
committerJon Medhurst <tixy@linaro.org>2013-01-21 18:36:28 +0000
commit3ce8436b927bbaf715e8f89641ad42cc2f1df5ff (patch)
tree15c0bd8216564817988b01f6f04448d59153470c
parent424ccfb264f4bf937192ff88017ca8f5b7c6ddbf (diff)
parent92353551318ee1bdf8ebeadbaa256d4b3594b973 (diff)
downloadlinux-topics-3ce8436b927bbaf715e8f89641ad42cc2f1df5ff.tar.gz
Merge branch 'tracking-armlt-tc2-pm' into integration-linaro-vexpress
Conflicts: drivers/misc/Kconfig drivers/misc/Makefile
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts18
-rw-r--r--arch/arm/common/gic.c8
-rw-r--r--arch/arm/kernel/sleep.S32
-rw-r--r--arch/arm/mach-vexpress/Kconfig16
-rw-r--r--arch/arm/mach-vexpress/Makefile1
-rw-r--r--arch/arm/mach-vexpress/cpuidle-tc2.c293
-rw-r--r--arch/arm/mach-vexpress/hotplug-asm.S28
-rw-r--r--arch/arm/mach-vexpress/tc2-sleep.S76
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/vexpress_bL_cpufreq.c284
-rw-r--r--drivers/misc/Kconfig5
-rw-r--r--drivers/misc/Makefile2
-rw-r--r--drivers/misc/arm-cci.c533
-rw-r--r--drivers/misc/vexpress/Kconfig3
-rw-r--r--drivers/misc/vexpress/Makefile1
-rw-r--r--drivers/misc/vexpress/arm-spc.c390
-rw-r--r--include/linux/arm-cci.h30
-rw-r--r--include/linux/vexpress.h46
19 files changed, 1770 insertions, 6 deletions
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts
index 4609562de9b..8cfb7b965ac 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts
@@ -37,6 +37,7 @@
cluster0: cluster@0 {
reg = <0>;
+ freqs = <500000000 600000000 700000000 800000000 900000000 1000000000 1100000000 1200000000>;
cores {
#address-cells = <1>;
#size-cells = <0>;
@@ -54,6 +55,7 @@
cluster1: cluster@1 {
reg = <1>;
+ freqs = <350000000 400000000 500000000 600000000 700000000 800000000 900000000 1000000000>;
cores {
#address-cells = <1>;
#size-cells = <0>;
@@ -146,6 +148,22 @@
clock-names = "pxlclk";
};
+ spc@7fff0000 {
+ compatible = "arm,spc";
+ reg = <0 0x7FFF0000 0 0x1000>;
+ interrupts = <0 95 4>;
+ };
+
+ cci@2c090000 {
+ compatible = "arm,cci";
+ reg = <0 0x2c090000 0 0x10000>;
+ interrupts = <0 101 4>,
+ <0 102 4>,
+ <0 103 4>,
+ <0 104 4>,
+ <0 105 4>;
+ };
+
memory-controller@2b0a0000 {
compatible = "arm,pl341", "arm,primecell";
reg = <0 0x2b0a0000 0 0x1000>;
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 36ae03a3f5d..763fd7ca7ae 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -530,6 +530,14 @@ static void gic_cpu_save(unsigned int gic_nr)
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+ /*
+ * Disable GIC CPU IF and IRQ bybass. When a CPU is shutdown we must
+ * insure that it does not exit wfi if an IRQ is pending on the IF.
+ * The GIC allows this operation by disabling the GIC CPU IF and the
+ * IRQ bypass mode. The raw IRQ line is still delivered to the power
+ * controller that use the IRQ to wake up the respective core.
+ */
+ writel_relaxed(0x1e0, cpu_base + GIC_CPU_CTRL);
}
static void gic_cpu_restore(unsigned int gic_nr)
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 987dcf33415..b5c1e636ed8 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -4,6 +4,7 @@
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
+#include "entry-header.S"
.text
/*
@@ -30,9 +31,8 @@ ENTRY(__cpu_suspend)
mov r2, r5 @ virtual SP
ldr r3, =sleep_save_sp
#ifdef CONFIG_SMP
- ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
- ALT_UP(mov lr, #0)
- and lr, lr, #15
+ get_thread_info r5
+ ldr lr, [r5, #TI_CPU] @ cpu logical index
add r3, r3, lr, lsl #2
#endif
bl __cpu_suspend_save
@@ -82,10 +82,13 @@ ENDPROC(cpu_resume_after_mmu)
.align
ENTRY(cpu_resume)
#ifdef CONFIG_SMP
+ mov r1, #0 @ fall-back logical index for UP
+ ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
+ ALT_UP_B(1f)
+ bic r0, #0xff000000
+ bl cpu_logical_index @ return logical index in r1
+1:
adr r0, sleep_save_sp
- ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
- ALT_UP(mov r1, #0)
- and r1, r1, #15
ldr r0, [r0, r1, lsl #2] @ stack phys addr
#else
ldr r0, sleep_save_sp @ stack phys addr
@@ -102,3 +105,20 @@ sleep_save_sp:
.rept CONFIG_NR_CPUS
.long 0 @ preserve stack phys ptr here
.endr
+
+#ifdef CONFIG_SMP
+cpu_logical_index:
+ adr r3, cpu_map_ptr
+ ldr r2, [r3]
+ add r3, r3, r2 @ virt_to_phys(__cpu_logical_map)
+ mov r1, #0
+1:
+ ldr r2, [r3, r1, lsl #2]
+ cmp r2, r0
+ moveq pc, lr
+ add r1, r1, #1
+ b 1b
+
+cpu_map_ptr:
+ .long __cpu_logical_map - .
+#endif
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 52d315b792c..c822dbed5f8 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -1,5 +1,6 @@
config ARCH_VEXPRESS
bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7
+ select ARCH_HAS_CPUFREQ
select ARCH_REQUIRE_GPIOLIB
select ARM_AMBA
select ARM_GIC
@@ -49,6 +50,21 @@ config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
build a working kernel, you must also enable relevant core
tile support or Flattened Device Tree based support options.
+config ARCH_VEXPRESS_TC2_PM
+ bool "Power Management Support for TC2 test-chip (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on CPU_IDLE && PM
+ select ARM_CPU_SUSPEND
+ select ARCH_NEEDS_CPU_IDLE_COUPLED
+ select ARM_SPC
+ select ARM_CCI
+ help
+ Provides code that enables CPU idle power management on the
+ TC2 testchip. It enables the CPU idle driver so that the kernel
+ can enter cluster power down states provided by the power
+ controller. Code is built on top of coupled C-state idle code
+ since all CPUs need to be idle to enter cluster shutdown.
+
config ARCH_VEXPRESS_CA9X4
bool "Versatile Express Cortex-A9x4 tile"
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 80b64971fbd..03d9029dbd6 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -8,3 +8,4 @@ obj-y := v2m.o reset.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += cpuidle-tc2.o hotplug-asm.o tc2-sleep.o
diff --git a/arch/arm/mach-vexpress/cpuidle-tc2.c b/arch/arm/mach-vexpress/cpuidle-tc2.c
new file mode 100644
index 00000000000..3b73d4a5dc5
--- /dev/null
+++ b/arch/arm/mach-vexpress/cpuidle-tc2.c
@@ -0,0 +1,293 @@
+/*
+ * TC2 CPU idle driver.
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/arm-cci.h>
+#include <linux/bitmap.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/clockchips.h>
+#include <linux/debugfs.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tick.h>
+#include <linux/vexpress.h>
+#include <asm/cpuidle.h>
+#include <asm/cputype.h>
+#include <asm/idmap.h>
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+
+#include <mach/motherboard.h>
+
+static int tc2_cpuidle_simple_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ ktime_t time_start, time_end;
+ s64 diff;
+
+ time_start = ktime_get();
+
+ cpu_do_idle();
+
+ time_end = ktime_get();
+
+ local_irq_enable();
+
+ diff = ktime_to_us(ktime_sub(time_end, time_start));
+ if (diff > INT_MAX)
+ diff = INT_MAX;
+
+ dev->last_residency = (int) diff;
+
+ return index;
+}
+
+static int tc2_enter_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx);
+
+static struct cpuidle_state tc2_cpuidle_set[] __initdata = {
+ [0] = {
+ .enter = tc2_cpuidle_simple_enter,
+ .exit_latency = 1,
+ .target_residency = 1,
+ .power_usage = UINT_MAX,
+ .flags = CPUIDLE_FLAG_TIME_VALID,
+ .name = "WFI",
+ .desc = "ARM WFI",
+ },
+ [1] = {
+ .enter = tc2_enter_coupled,
+ .exit_latency = 300,
+ .target_residency = 1000,
+ .flags = CPUIDLE_FLAG_TIME_VALID |
+ CPUIDLE_FLAG_COUPLED,
+ .name = "C1",
+ .desc = "ARM power down",
+ },
+};
+
+struct cpuidle_driver tc2_idle_driver = {
+ .name = "tc2_idle",
+ .owner = THIS_MODULE,
+ .safe_state_index = 0
+};
+
+static DEFINE_PER_CPU(struct cpuidle_device, tc2_idle_dev);
+
+#define NR_CLUSTERS 2
+static cpumask_t cluster_mask = CPU_MASK_NONE;
+
+extern void disable_clean_inv_dcache(int);
+static atomic_t abort_barrier[NR_CLUSTERS];
+
+extern void tc2_cpu_resume(void);
+extern void disable_snoops(void);
+
+static int notrace tc2_coupled_finisher(unsigned long arg)
+{
+ unsigned int mpidr = read_cpuid_mpidr();
+ unsigned int cpu = smp_processor_id();
+ unsigned int cluster = (mpidr >> 8) & 0xf;
+ unsigned int weight = cpumask_weight(topology_core_cpumask(cpu));
+ u8 wfi_weight = 0;
+
+ cpuidle_coupled_parallel_barrier((struct cpuidle_device *)arg,
+ &abort_barrier[cluster]);
+ if (mpidr & 0xf) {
+ disable_clean_inv_dcache(0);
+ wfi();
+ /* not reached */
+ }
+
+ while (wfi_weight != (weight - 1)) {
+ wfi_weight = vexpress_spc_wfi_cpustat(cluster);
+ wfi_weight = hweight8(wfi_weight);
+ }
+
+ vexpress_spc_powerdown_enable(cluster, 1);
+ disable_clean_inv_dcache(1);
+ disable_cci(cluster);
+ disable_snoops();
+ return 1;
+}
+
+/*
+ * tc2_enter_coupled - Programs CPU to enter the specified state
+ * @dev: cpuidle device
+ * @drv: The target state to be programmed
+ * @idx: state index
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified target state selected by the governor.
+ */
+static int tc2_enter_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ struct timespec ts_preidle, ts_postidle, ts_idle;
+ int ret;
+ int cluster = (read_cpuid_mpidr() >> 8) & 0xf;
+ /* Used to keep track of the total time in idle */
+ getnstimeofday(&ts_preidle);
+
+ if (!cpu_isset(cluster, cluster_mask)) {
+ cpuidle_coupled_parallel_barrier(dev,
+ &abort_barrier[cluster]);
+ goto shallow_out;
+ }
+
+ BUG_ON(!irqs_disabled());
+
+ cpu_pm_enter();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+
+ ret = cpu_suspend((unsigned long) dev, tc2_coupled_finisher);
+
+ if (ret)
+ BUG();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+
+ cpu_pm_exit();
+
+shallow_out:
+ getnstimeofday(&ts_postidle);
+ ts_idle = timespec_sub(ts_postidle, ts_preidle);
+
+ dev->last_residency = ts_idle.tv_nsec / NSEC_PER_USEC +
+ ts_idle.tv_sec * USEC_PER_SEC;
+ return idx;
+}
+
+static int idle_mask_show(struct seq_file *f, void *p)
+{
+ char buf[256];
+ bitmap_scnlistprintf(buf, 256, cpumask_bits(&cluster_mask),
+ NR_CLUSTERS);
+
+ seq_printf(f, "%s\n", buf);
+
+ return 0;
+}
+
+static int idle_mask_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, idle_mask_show, inode->i_private);
+}
+
+static const struct file_operations cpuidle_fops = {
+ .open = idle_mask_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int idle_debug_set(void *data, u64 val)
+{
+ if (val >= (unsigned)NR_CLUSTERS && val != 0xff) {
+ pr_warning("Wrong parameter passed\n");
+ return -EINVAL;
+ }
+ cpuidle_pause_and_lock();
+ if (val == 0xff)
+ cpumask_clear(&cluster_mask);
+ else
+ cpumask_set_cpu(val, &cluster_mask);
+
+ cpuidle_resume_and_unlock();
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(idle_debug_fops, NULL, idle_debug_set, "%llu\n");
+
+/*
+ * tc2_idle_init
+ *
+ * Registers the TC2 specific cpuidle driver with the cpuidle
+ * framework with the valid set of states.
+ */
+int __init tc2_idle_init(void)
+{
+ struct cpuidle_device *dev;
+ int i, cpu_id;
+ struct dentry *idle_debug, *file_debug;
+ struct cpuidle_driver *drv = &tc2_idle_driver;
+
+ if (!vexpress_spc_check_loaded()) {
+ pr_info("TC2 CPUidle not registered because no SPC found\n");
+ return -ENODEV;
+ }
+
+ drv->state_count = (sizeof(tc2_cpuidle_set) /
+ sizeof(struct cpuidle_state));
+
+ for (i = 0; i < drv->state_count; i++) {
+ memcpy(&drv->states[i], &tc2_cpuidle_set[i],
+ sizeof(struct cpuidle_state));
+ }
+
+ cpuidle_register_driver(drv);
+
+ for_each_cpu(cpu_id, cpu_online_mask) {
+ pr_err("CPUidle for CPU%d registered\n", cpu_id);
+ dev = &per_cpu(tc2_idle_dev, cpu_id);
+ dev->cpu = cpu_id;
+ dev->safe_state_index = 0;
+
+ cpumask_copy(&dev->coupled_cpus,
+ topology_core_cpumask(cpu_id));
+ dev->state_count = drv->state_count;
+
+ if (cpuidle_register_device(dev)) {
+ printk(KERN_ERR "%s: Cpuidle register device failed\n",
+ __func__);
+ return -EIO;
+ }
+ }
+
+ idle_debug = debugfs_create_dir("idle_debug", NULL);
+
+ if (IS_ERR_OR_NULL(idle_debug)) {
+ printk(KERN_INFO "Error in creating idle debugfs directory\n");
+ return 0;
+ }
+
+ file_debug = debugfs_create_file("enable_idle", S_IRUGO | S_IWGRP,
+ idle_debug, NULL, &idle_debug_fops);
+
+ if (IS_ERR_OR_NULL(file_debug)) {
+ printk(KERN_INFO "Error in creating enable_idle file\n");
+ return 0;
+ }
+
+ file_debug = debugfs_create_file("enable_mask", S_IRUGO | S_IWGRP,
+ idle_debug, NULL, &cpuidle_fops);
+
+ if (IS_ERR_OR_NULL(file_debug))
+ printk(KERN_INFO "Error in creating enable_mask file\n");
+
+ /* enable all wake-up IRQs by default */
+ vexpress_spc_set_wake_intr(0x7ff);
+ vexpress_flags_set(virt_to_phys(tc2_cpu_resume));
+
+ /*
+ * Enable idle by default for all possible clusters.
+ * This must be done after all other setup to prevent the
+ * possibility of clusters being powered down before they
+ * are fully configured.
+ */
+ for (i = 0; i < NR_CLUSTERS; i++)
+ cpumask_set_cpu(i, &cluster_mask);
+
+ return 0;
+}
+
+late_initcall(tc2_idle_init);
diff --git a/arch/arm/mach-vexpress/hotplug-asm.S b/arch/arm/mach-vexpress/hotplug-asm.S
new file mode 100644
index 00000000000..f63472edcc4
--- /dev/null
+++ b/arch/arm/mach-vexpress/hotplug-asm.S
@@ -0,0 +1,28 @@
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+
+ .text
+ENTRY(disable_clean_inv_dcache)
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+
+ mrc p15, 0, r3, c1, c0, 0
+ bic r3, #4 @ clear C bit
+ mcr p15, 0, r3, c1, c0, 0
+ dsb
+ isb
+ mov r12, r0
+ cmp r12, #0
+ bleq v7_flush_dcache_louis
+ cmp r12, #0
+ blne v7_flush_dcache_all
+ clrex
+ mrc p15, 0, r3, c1, c0, 1
+ bic r3, #0x40 @ clear SMP bit
+ mcr p15, 0, r3, c1, c0, 1
+ isb
+ dsb
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ mov pc, lr
+ENDPROC(disable_clean_inv_dcache)
diff --git a/arch/arm/mach-vexpress/tc2-sleep.S b/arch/arm/mach-vexpress/tc2-sleep.S
new file mode 100644
index 00000000000..9bf8348fcc8
--- /dev/null
+++ b/arch/arm/mach-vexpress/tc2-sleep.S
@@ -0,0 +1,76 @@
+#include <linux/linkage.h>
+
+#define SPC_PHYS_BASE 0x7FFF0000
+#define A15_CONF 0x400
+
+ENTRY(tc2_cpu_resume)
+ mrc p15, 0, r0, c0, c0, 5
+ ands r0, r0, #0xff00
+ ldr r1, =SPC_PHYS_BASE
+ mov r2, #A15_CONF
+ add r1, r1, r2
+ ldr r1, [r1]
+ and r1, r1, #0x7
+ cmp r1, r0, lsr #8
+ adr r0, value
+ addne r0, r0, #16
+ ldmia r0, {r1, r2, r3, r4} @ CCI address, SCC snoop control & val
+ mvn r3, r3 @ undo actions done at shutdown
+ ldr r0, [r2]
+ and r5, r0, r3
+ str r5, [r2]
+ mov r0, #3 @ enable CCI for the cluster
+ str r0, [r1]
+ adr r1, cci_ctrl
+ ldr r1, [r1]
+loop:
+ ldr r0, [r1]
+ ands r0, r0, #1
+ bne loop
+ mov r0, #0 @ disable power down enable
+ str r0, [r4]
+ b cpu_resume
+ENDPROC(tc2_cpu_resume)
+
+ENTRY(disable_snoops)
+ mrc p15, 0, r0, c0, c0, 5
+ ands r0, r0, #0xff00
+ ldr r1, scc_ptr
+ ldr r1, [r1]
+ mov r2, #A15_CONF
+ add r1, r1, r2
+ ldr r1, [r1]
+ and r1, r1, #0x7
+ cmp r1, r0, lsr #8
+ adr r0, vvalue
+ addne r0, r0, #8
+ ldmia r0, {r2, r3} @ CCI address, SCC snoop control & val
+ ldr r1, scc_ptr
+ ldr r1, [r1]
+ add r2, r1, r2
+ ldr r0, [r2]
+ orr r0, r0, r3
+ dsb
+ isb
+ str r0, [r2]
+ wfi
+ENDPROC(disable_snoops)
+
+cci_ctrl:
+ .long 0x2c09000c
+value:
+ .long 0x2c094000
+ .long 0x7fff0404
+ .long 0x180
+ .long 0x7fff0b30
+ .long 0x2c095000
+ .long 0x7fff0504
+ .long 0x2000
+ .long 0x7fff0b34
+vvalue:
+ .long 0x404
+ .long 0x180
+ .long 0x504
+ .long 0x2000
+scc_ptr:
+ .long vscc
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index a0b3661d90b..8db6f17b749 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -83,3 +83,12 @@ config ARM_SPEAR_CPUFREQ
default y
help
This adds the CPUFreq driver support for SPEAr SOCs.
+
+config ARM_VEXPRESS_BL_CPUFREQ
+ tristate "CPUfreq driver for ARM Vexpress big.LITTLE CPUs"
+ depends on ARCH_VEXPRESS && CPU_FREQ
+ help
+ This enables the CPUfreq driver for ARM Vexpress big.LITTLE
+ platform.
+
+ If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index ce4ffd77ffa..fb5bb4423b8 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -53,6 +53,7 @@ obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o
+obj-$(CONFIG_ARM_VEXPRESS_BL_CPUFREQ) += vexpress_bL_cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/vexpress_bL_cpufreq.c b/drivers/cpufreq/vexpress_bL_cpufreq.c
new file mode 100644
index 00000000000..228542a21d5
--- /dev/null
+++ b/drivers/cpufreq/vexpress_bL_cpufreq.c
@@ -0,0 +1,284 @@
+/*
+ * Vexpress big.LITTLE CPUFreq support
+ * Based on mach-integrator
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <linux/vexpress.h>
+
+#define VEXPRESS_MAX_CLUSTER 2
+
+static struct cpufreq_frequency_table *freq_table[VEXPRESS_MAX_CLUSTER];
+static atomic_t freq_table_users = ATOMIC_INIT(0);
+
+/* Cached current cluster for each CPU to save on IPIs */
+static DEFINE_PER_CPU(unsigned int, cpu_cur_cluster);
+
+/*
+ * Functions to get the current status.
+ *
+ * Beware that the cluster for another CPU may change unexpectedly.
+ */
+
+static unsigned int get_local_cluster(void)
+{
+ unsigned int mpidr;
+ asm ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (mpidr));
+ return (mpidr >> 8) & 0xf;
+}
+
+static void __get_current_cluster(void *_data)
+{
+ unsigned int *_cluster = _data;
+ *_cluster = get_local_cluster();
+}
+
+static int get_current_cluster(unsigned int cpu)
+{
+ unsigned int cluster = 0;
+ smp_call_function_single(cpu, __get_current_cluster, &cluster, 1);
+ return cluster;
+}
+
+static int get_current_cached_cluster(unsigned int cpu)
+{
+ return per_cpu(cpu_cur_cluster, cpu);
+}
+
+/* Validate policy frequency range */
+static int vexpress_cpufreq_verify_policy(struct cpufreq_policy *policy)
+{
+ uint32_t cur_cluster = get_current_cached_cluster(policy->cpu);
+
+ /* This call takes care of it all using freq_table */
+ return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
+}
+
+/* Set clock frequency */
+static int vexpress_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ uint32_t cpu = policy->cpu;
+ struct cpufreq_freqs freqs;
+ uint32_t freq_tab_idx;
+ uint32_t cur_cluster;
+ int ret = 0;
+
+ /* Read current clock rate */
+ cur_cluster = get_current_cached_cluster(cpu);
+
+ if (vexpress_spc_get_performance(cur_cluster, &freq_tab_idx))
+ return -EIO;
+
+ freqs.old = freq_table[cur_cluster][freq_tab_idx].frequency;
+
+ /* Make sure that target_freq is within supported range */
+ if (target_freq > policy->max)
+ target_freq = policy->max;
+ if (target_freq < policy->min)
+ target_freq = policy->min;
+
+ /* Determine valid target frequency using freq_table */
+ cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
+ target_freq, relation, &freq_tab_idx);
+ freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
+
+ freqs.cpu = policy->cpu;
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ pr_debug("Requested Freq %d cpu %d\n", freqs.new, cpu);
+
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ ret = vexpress_spc_set_performance(cur_cluster, freq_tab_idx);
+ if (ret) {
+ pr_err("Error %d while setting required OPP\n", ret);
+ return ret;
+ }
+
+ policy->cur = freqs.new;
+
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+/* Get current clock frequency */
+static unsigned int vexpress_cpufreq_get(unsigned int cpu)
+{
+ uint32_t freq_tab_idx = 0;
+ uint32_t cur_cluster = get_current_cached_cluster(cpu);
+
+ /*
+ * Read current clock rate with vexpress_spc call
+ */
+ if (vexpress_spc_get_performance(cur_cluster, &freq_tab_idx))
+ return -EIO;
+
+ return freq_table[cur_cluster][freq_tab_idx].frequency;
+}
+
+/* translate the integer array into cpufreq_frequency_table entries */
+static inline void _cpufreq_copy_table_from_array(uint32_t *table,
+ struct cpufreq_frequency_table *freq_table, int size)
+{
+ int i;
+ for (i = 0; i < size; i++) {
+ freq_table[i].index = i;
+ freq_table[i].frequency = table[i] / 1000; /* in kHZ */
+ }
+ freq_table[i].index = size;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+}
+
+static int vexpress_cpufreq_of_init(void)
+{
+ uint32_t cpu_opp_num;
+ struct cpufreq_frequency_table *freqtable[VEXPRESS_MAX_CLUSTER];
+ uint32_t *cpu_freqs;
+ int ret = 0, cluster_id = 0, len;
+ struct device_node *cluster = NULL;
+ const struct property *pp;
+ const u32 *hwid;
+
+ while ((cluster = of_find_node_by_name(cluster, "cluster"))) {
+ hwid = of_get_property(cluster, "reg", &len);
+ if (hwid && len == 4)
+ cluster_id = be32_to_cpup(hwid);
+
+ pp = of_find_property(cluster, "freqs", NULL);
+ if (!pp)
+ return -EINVAL;
+ cpu_opp_num = pp->length / sizeof(u32);
+ if (!cpu_opp_num)
+ return -ENODATA;
+
+ cpu_freqs = kzalloc(sizeof(uint32_t) * cpu_opp_num, GFP_KERNEL);
+ freqtable[cluster_id] =
+ kzalloc(sizeof(struct cpufreq_frequency_table) *
+ (cpu_opp_num + 1), GFP_KERNEL);
+ if (!cpu_freqs || !freqtable[cluster_id]) {
+ ret = -ENOMEM;
+ goto free_mem;
+ }
+ of_property_read_u32_array(cluster, "freqs",
+ cpu_freqs, cpu_opp_num);
+ _cpufreq_copy_table_from_array(cpu_freqs,
+ freqtable[cluster_id], cpu_opp_num);
+ freq_table[cluster_id] = freqtable[cluster_id];
+
+ kfree(cpu_freqs);
+ }
+ return ret;
+free_mem:
+ while (cluster_id >= 0)
+ kfree(freqtable[cluster_id--]);
+ kfree(cpu_freqs);
+ return ret;
+}
+
+/* Per-CPU initialization */
+static int vexpress_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+ uint32_t cur_cluster = get_current_cluster(policy->cpu);
+
+ if (atomic_inc_return(&freq_table_users) == 1)
+ result = vexpress_cpufreq_of_init();
+
+ if (freq_table[cur_cluster] == NULL)
+ result = -ENODATA;
+
+ if (result) {
+ atomic_dec_return(&freq_table_users);
+ pr_err("CPUFreq - CPU %d failed to initialize\n", policy->cpu);
+ return result;
+ }
+
+ result =
+ cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+ if (result)
+ return result;
+
+ cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
+
+ per_cpu(cpu_cur_cluster, policy->cpu) = cur_cluster;
+
+ /* set default policy and cpuinfo */
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ policy->cpuinfo.transition_latency = 1000000; /* 1 ms assumed */
+ policy->cur = vexpress_cpufreq_get(policy->cpu);
+
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+ cpumask_copy(policy->related_cpus, policy->cpus);
+
+ pr_info("CPUFreq for CPU %d initialized\n", policy->cpu);
+ return result;
+}
+
+/* Export freq_table to sysfs */
+static struct freq_attr *vexpress_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver vexpress_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = vexpress_cpufreq_verify_policy,
+ .target = vexpress_cpufreq_set_target,
+ .get = vexpress_cpufreq_get,
+ .init = vexpress_cpufreq_init,
+ .name = "vexpress-spc",
+ .attr = vexpress_cpufreq_attr,
+};
+
+static int __init vexpress_cpufreq_modinit(void)
+{
+ if (!vexpress_spc_check_loaded()) {
+ pr_info("vexpress cpufreq not initialised because no SPC found\n");
+ return -ENODEV;
+ }
+
+ return cpufreq_register_driver(&vexpress_cpufreq_driver);
+}
+
+static void __exit vexpress_cpufreq_modexit(void)
+{
+ cpufreq_unregister_driver(&vexpress_cpufreq_driver);
+}
+
+MODULE_DESCRIPTION("cpufreq driver for ARM vexpress big.LITTLE platform");
+MODULE_LICENSE("GPL");
+
+module_init(vexpress_cpufreq_modinit);
+module_exit(vexpress_cpufreq_modexit);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0a38c1ac34a..37d2cf64ff5 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -511,6 +511,10 @@ config USB_SWITCH_FSA9480
stereo and mono audio, video, microphone and UART data to use
a common connector port.
+config ARM_CCI
+ bool "ARM CCI driver support"
+ depends on ARM
+
config WL127X_RFKILL
tristate "Bluetooth power control driver for TI wl127x"
depends on RFKILL
@@ -527,4 +531,5 @@ source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
+source "drivers/misc/vexpress/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 92b24bcbeb9..d3d6100d614 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -50,6 +50,8 @@ obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
+obj-$(CONFIG_ARCH_VEXPRESS) += vexpress/
+obj-$(CONFIG_ARM_CCI) += arm-cci.o
obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o
obj-$(CONFIG_SENSORS_AK8975) += akm8975.o
diff --git a/drivers/misc/arm-cci.c b/drivers/misc/arm-cci.c
new file mode 100644
index 00000000000..d2c16f7142b
--- /dev/null
+++ b/drivers/misc/arm-cci.c
@@ -0,0 +1,533 @@
+/*
+ * CCI support
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/arm-cci.h>
+
+#include <asm/cacheflush.h>
+#include <asm/memory.h>
+#include <asm/outercache.h>
+
+#include <asm/irq_regs.h>
+#include <asm/pmu.h>
+
+#define CCI400_PMCR 0x0100
+#define CCI400_EAG_OFFSET 0x4000
+#define CCI400_KF_OFFSET 0x5000
+
+#define DRIVER_NAME "CCI"
+struct cci_drvdata {
+ void __iomem *baseaddr;
+ spinlock_t lock;
+};
+
+static struct cci_drvdata *info;
+
+#ifdef CONFIG_HW_PERF_EVENTS
+
+#define CCI400_PMU_CYCLE_CNTR_BASE 0x9000
+#define CCI400_PMU_CNTR_BASE(idx) (CCI400_PMU_CYCLE_CNTR_BASE + (idx) * 0x1000)
+
+#define CCI400_PMCR_CEN 0x00000001
+#define CCI400_PMCR_RST 0x00000002
+#define CCI400_PMCR_CCR 0x00000004
+#define CCI400_PMCR_CCD 0x00000008
+#define CCI400_PMCR_EX 0x00000010
+#define CCI400_PMCR_DP 0x00000020
+#define CCI400_PMCR_NCNT_MASK 0x0000F800
+#define CCI400_PMCR_NCNT_SHIFT 11
+
+#define CCI400_PMU_EVT_SEL 0x000
+#define CCI400_PMU_CNTR 0x004
+#define CCI400_PMU_CNTR_CTRL 0x008
+#define CCI400_PMU_OVERFLOW 0x00C
+
+#define CCI400_PMU_OVERFLOW_FLAG 1
+
+enum cci400_perf_events {
+ CCI400_PMU_CYCLES = 0xFF
+};
+
+#define CCI400_PMU_EVENT_MASK 0xff
+#define CCI400_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
+#define CCI400_PMU_EVENT_CODE(event) (event & 0x1f)
+
+#define CCI400_PMU_EVENT_SOURCE_S0 0
+#define CCI400_PMU_EVENT_SOURCE_S4 4
+#define CCI400_PMU_EVENT_SOURCE_M0 5
+#define CCI400_PMU_EVENT_SOURCE_M2 7
+
+#define CCI400_PMU_EVENT_SLAVE_MIN 0x0
+#define CCI400_PMU_EVENT_SLAVE_MAX 0x13
+
+#define CCI400_PMU_EVENT_MASTER_MIN 0x14
+#define CCI400_PMU_EVENT_MASTER_MAX 0x1A
+
+#define CCI400_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
+
+#define CCI400_PMU_CYCLE_COUNTER_IDX 0
+#define CCI400_PMU_COUNTER0_IDX 1
+#define CCI400_PMU_COUNTER_LAST(cci_pmu) (CCI400_PMU_CYCLE_COUNTER_IDX + cci_pmu->num_events - 1)
+
+
+static struct perf_event *events[CCI400_PMU_MAX_HW_EVENTS];
+static unsigned long used_mask[BITS_TO_LONGS(CCI400_PMU_MAX_HW_EVENTS)];
+static struct pmu_hw_events cci_hw_events = {
+ .events = events,
+ .used_mask = used_mask,
+};
+
+static int cci_pmu_validate_hw_event(u8 hw_event)
+{
+ u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event);
+ u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event);
+
+ if (ev_source <= CCI400_PMU_EVENT_SOURCE_S4 &&
+ ev_code <= CCI400_PMU_EVENT_SLAVE_MAX)
+ return hw_event;
+ else if (CCI400_PMU_EVENT_SOURCE_M0 <= ev_source &&
+ ev_source <= CCI400_PMU_EVENT_SOURCE_M2 &&
+ CCI400_PMU_EVENT_MASTER_MIN <= ev_code &&
+ ev_code <= CCI400_PMU_EVENT_MASTER_MAX)
+ return hw_event;
+
+ return -EINVAL;
+}
+
+static inline int cci_pmu_counter_is_valid(struct arm_pmu *cci_pmu, int idx)
+{
+ return CCI400_PMU_CYCLE_COUNTER_IDX <= idx &&
+ idx <= CCI400_PMU_COUNTER_LAST(cci_pmu);
+}
+
+static inline u32 cci_pmu_read_register(int idx, unsigned int offset)
+{
+ return readl_relaxed(info->baseaddr + CCI400_PMU_CNTR_BASE(idx) + offset);
+}
+
+static inline void cci_pmu_write_register(u32 value, int idx, unsigned int offset)
+{
+ return writel_relaxed(value, info->baseaddr + CCI400_PMU_CNTR_BASE(idx) + offset);
+}
+
+static inline void cci_pmu_disable_counter(int idx)
+{
+ cci_pmu_write_register(0, idx, CCI400_PMU_CNTR_CTRL);
+}
+
+static inline void cci_pmu_enable_counter(int idx)
+{
+ cci_pmu_write_register(1, idx, CCI400_PMU_CNTR_CTRL);
+}
+
+static inline void cci_pmu_select_event(int idx, unsigned long event)
+{
+ event &= CCI400_PMU_EVENT_MASK;
+ cci_pmu_write_register(event, idx, CCI400_PMU_EVT_SEL);
+}
+
+static u32 cci_pmu_get_max_counters(void)
+{
+ u32 n_cnts = (readl_relaxed(info->baseaddr + CCI400_PMCR) &
+ CCI400_PMCR_NCNT_MASK) >> CCI400_PMCR_NCNT_SHIFT;
+
+ /* add 1 for cycle counter */
+ return n_cnts + 1;
+}
+
+static struct pmu_hw_events *cci_pmu_get_hw_events(void)
+{
+ return &cci_hw_events;
+}
+
+static int cci_pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_event = &event->hw;
+ unsigned long cci_event = hw_event->config_base & CCI400_PMU_EVENT_MASK;
+ int idx;
+
+ if (cci_event == CCI400_PMU_CYCLES) {
+ if (test_and_set_bit(CCI400_PMU_CYCLE_COUNTER_IDX, hw->used_mask))
+ return -EAGAIN;
+
+ return CCI400_PMU_CYCLE_COUNTER_IDX;
+ }
+
+ for (idx = CCI400_PMU_COUNTER0_IDX; idx <= CCI400_PMU_COUNTER_LAST(cci_pmu); ++idx) {
+ if (!test_and_set_bit(idx, hw->used_mask))
+ return idx;
+ }
+
+ /* No counters available */
+ return -EAGAIN;
+}
+
+static int cci_pmu_map_event(struct perf_event *event)
+{
+ int mapping;
+ u8 config = event->attr.config & CCI400_PMU_EVENT_MASK;
+
+ if (event->attr.type < PERF_TYPE_MAX)
+ return -ENOENT;
+
+ /* 0xff is used to represent CCI Cycles */
+ if (config == 0xff)
+ mapping = config;
+ else
+ mapping = cci_pmu_validate_hw_event(config);
+
+ return mapping;
+}
+
+static int cci_pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler)
+{
+ int irq, err, i = 0;
+ struct platform_device *pmu_device = cci_pmu->plat_device;
+
+ if (unlikely(!pmu_device))
+ return -ENODEV;
+
+ /* CCI exports 6 interrupts - 1 nERRORIRQ + 5 nEVNTCNTOVERFLOW (PMU)
+ nERRORIRQ will be handled by secure firmware on TC2. So we
+ assume that all CCI interrupts listed in the linux device
+ tree are PMU interrupts.
+
+ The following code should then be able to handle different routing
+ of the CCI PMU interrupts.
+ */
+ while ((irq = platform_get_irq(pmu_device, i)) > 0) {
+ err = request_irq(irq, handler, 0, "arm-cci-pmu", cci_pmu);
+ if (err) {
+ dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
+ irq);
+ return err;
+ }
+ i++;
+ }
+
+ return 0;
+}
+
+static irqreturn_t cci_pmu_handle_irq(int irq_num, void *dev)
+{
+ struct arm_pmu *cci_pmu = (struct arm_pmu *)dev;
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+ struct perf_sample_data data;
+ struct pt_regs *regs;
+ int idx;
+
+ regs = get_irq_regs();
+
+ /* Iterate over counters and update the corresponding perf events.
+ This should work regardless of whether we have per-counter overflow
+ interrupt or a combined overflow interrupt. */
+ for (idx = CCI400_PMU_CYCLE_COUNTER_IDX; idx <= CCI400_PMU_COUNTER_LAST(cci_pmu); idx++) {
+ struct perf_event *event = events->events[idx];
+ struct hw_perf_event *hw_counter;
+
+ if (!event)
+ continue;
+
+ hw_counter = &event->hw;
+
+ /* Did this counter overflow? */
+ if (!(cci_pmu_read_register(idx, CCI400_PMU_OVERFLOW) & CCI400_PMU_OVERFLOW_FLAG))
+ continue;
+ cci_pmu_write_register(CCI400_PMU_OVERFLOW_FLAG, idx, CCI400_PMU_OVERFLOW);
+
+ armpmu_event_update(event);
+ perf_sample_data_init(&data, 0, hw_counter->last_period);
+ if (!armpmu_event_set_period(event))
+ continue;
+
+ if (perf_event_overflow(event, &data, regs))
+ cci_pmu->disable(event);
+ }
+
+ irq_work_run();
+ return IRQ_HANDLED;
+}
+
+static void cci_pmu_free_irq(struct arm_pmu *cci_pmu)
+{
+ int irq, i = 0;
+ struct platform_device *pmu_device = cci_pmu->plat_device;
+
+ while ((irq = platform_get_irq(pmu_device, i)) > 0) {
+ free_irq(irq, cci_pmu);
+ i++;
+ }
+}
+
+static void cci_pmu_enable_event(struct perf_event *event)
+{
+ unsigned long flags;
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Configure the event to count, unless you are counting cycles */
+ if (idx != CCI400_PMU_CYCLE_COUNTER_IDX)
+ cci_pmu_select_event(idx, hw_counter->config_base);
+
+ cci_pmu_enable_counter(idx);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void cci_pmu_disable_event(struct perf_event *event)
+{
+ unsigned long flags;
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ cci_pmu_disable_counter(idx);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void cci_pmu_start(struct arm_pmu *cci_pmu)
+{
+ u32 val;
+ unsigned long flags;
+ struct cci_drvdata *info = platform_get_drvdata(cci_pmu->plat_device);
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Enable all the PMU counters. */
+ val = readl(info->baseaddr + CCI400_PMCR) | CCI400_PMCR_CEN;
+ writel(val, info->baseaddr + CCI400_PMCR);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static void cci_pmu_stop(struct arm_pmu *cci_pmu)
+{
+ u32 val;
+ unsigned long flags;
+ struct cci_drvdata *info = platform_get_drvdata(cci_pmu->plat_device);
+ struct pmu_hw_events *events = cci_pmu->get_hw_events();
+
+ raw_spin_lock_irqsave(&events->pmu_lock, flags);
+
+ /* Disable all the PMU counters. */
+ val = readl(info->baseaddr + CCI400_PMCR) & ~CCI400_PMCR_CEN;
+ writel(val, info->baseaddr + CCI400_PMCR);
+
+ raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
+}
+
+static u32 cci_pmu_read_counter(struct perf_event *event)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+ u32 value;
+
+ if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx))) {
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ return 0;
+ }
+ value = cci_pmu_read_register(idx, CCI400_PMU_CNTR);
+
+ return value;
+}
+
+static void cci_pmu_write_counter(struct perf_event *event, u32 value)
+{
+ struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
+ struct hw_perf_event *hw_counter = &event->hw;
+ int idx = hw_counter->idx;
+
+ if (unlikely(!cci_pmu_counter_is_valid(cci_pmu, idx)))
+ dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
+ else
+ cci_pmu_write_register(value, idx, CCI400_PMU_CNTR);
+}
+
+static struct arm_pmu cci_pmu = {
+ .name = DRIVER_NAME,
+ .max_period = (1LLU << 32) - 1,
+ .get_hw_events = cci_pmu_get_hw_events,
+ .get_event_idx = cci_pmu_get_event_idx,
+ .map_event = cci_pmu_map_event,
+ .request_irq = cci_pmu_request_irq,
+ .handle_irq = cci_pmu_handle_irq,
+ .free_irq = cci_pmu_free_irq,
+ .enable = cci_pmu_enable_event,
+ .disable = cci_pmu_disable_event,
+ .start = cci_pmu_start,
+ .stop = cci_pmu_stop,
+ .read_counter = cci_pmu_read_counter,
+ .write_counter = cci_pmu_write_counter,
+};
+
+static int cci_pmu_init(struct platform_device *pdev)
+{
+ cci_pmu.plat_device = pdev;
+ cci_pmu.num_events = cci_pmu_get_max_counters();
+ raw_spin_lock_init(&cci_hw_events.pmu_lock);
+ cpumask_setall(&cci_pmu.valid_cpus);
+
+ return armpmu_register(&cci_pmu, -1);
+}
+
+static void cci_pmu_destroy(void)
+{
+ perf_pmu_unregister(&cci_pmu.pmu);
+}
+
+#else
+
+static int cci_pmu_init(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static void cci_pmu_destroy(void) { }
+
+#endif /* CONFIG_HW_PERF_EVENTS */
+
+void notrace disable_cci(int cluster)
+{
+ u32 cci_reg = cluster ? CCI400_KF_OFFSET : CCI400_EAG_OFFSET;
+ writel_relaxed(0x0, info->baseaddr + cci_reg);
+
+ while (readl_relaxed(info->baseaddr + 0xc) & 0x1)
+ ;
+}
+EXPORT_SYMBOL_GPL(disable_cci);
+
+static int cci_driver_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret = 0;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "unable to allocate mem\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -EINVAL;
+ goto mem_free;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "address 0x%x in use\n", (u32) res->start);
+ ret = -EBUSY;
+ goto mem_free;
+ }
+
+ info->baseaddr = ioremap(res->start, resource_size(res));
+ if (!info->baseaddr) {
+ ret = -ENXIO;
+ goto ioremap_err;
+ }
+
+ /*
+ * Multi-cluster systems may need this data when non-coherent, during
+ * cluster power-up/power-down. Make sure it reaches main memory:
+ */
+ __cpuc_flush_dcache_area(info, sizeof *info);
+ outer_clean_range(virt_to_phys(info), virt_to_phys(info + 1));
+
+ platform_set_drvdata(pdev, info);
+
+ if (cci_pmu_init(pdev) < 0)
+ pr_info("CCI PMU initialisation failed.\n");
+
+ pr_info("CCI loaded at %p\n", info->baseaddr);
+ return ret;
+
+ioremap_err:
+ release_region(res->start, resource_size(res));
+mem_free:
+ kfree(info);
+
+ return ret;
+}
+
+static int cci_driver_remove(struct platform_device *pdev)
+{
+ struct cci_drvdata *info;
+ struct resource *res = pdev->resource;
+
+ cci_pmu_destroy();
+ info = platform_get_drvdata(pdev);
+ iounmap(info->baseaddr);
+ release_region(res->start, resource_size(res));
+ kfree(info);
+
+ return 0;
+}
+
+static const struct of_device_id arm_cci_matches[] = {
+ {.compatible = "arm,cci"},
+ {},
+};
+
+static struct platform_driver cci_platform_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = arm_cci_matches,
+ },
+ .probe = cci_driver_probe,
+ .remove = cci_driver_remove,
+};
+
+static int __init cci_init(void)
+{
+ return platform_driver_register(&cci_platform_driver);
+}
+
+static void __exit cci_exit(void)
+{
+ platform_driver_unregister(&cci_platform_driver);
+}
+
+arch_initcall(cci_init);
+module_exit(cci_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CCI support");
diff --git a/drivers/misc/vexpress/Kconfig b/drivers/misc/vexpress/Kconfig
new file mode 100644
index 00000000000..3e2676ae6ee
--- /dev/null
+++ b/drivers/misc/vexpress/Kconfig
@@ -0,0 +1,3 @@
+config ARM_SPC
+ bool "ARM SPC driver support"
+ depends on ARM
diff --git a/drivers/misc/vexpress/Makefile b/drivers/misc/vexpress/Makefile
new file mode 100644
index 00000000000..95b58166d0a
--- /dev/null
+++ b/drivers/misc/vexpress/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ARM_SPC) += arm-spc.o
diff --git a/drivers/misc/vexpress/arm-spc.c b/drivers/misc/vexpress/arm-spc.c
new file mode 100644
index 00000000000..6e2e4820c3a
--- /dev/null
+++ b/drivers/misc/vexpress/arm-spc.c
@@ -0,0 +1,390 @@
+/*
+ * Serial Power Controller (SPC) support
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author(s): Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ * Achin Gupta <achin.gupta@arm.com>
+ * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vexpress.h>
+
+#include <asm/cacheflush.h>
+#include <asm/memory.h>
+#include <asm/outercache.h>
+
+#define SNOOP_CTL_A15 0x404
+#define SNOOP_CTL_A7 0x504
+#define PERF_LVL_A15 0xB00
+#define PERF_REQ_A15 0xB04
+#define PERF_LVL_A7 0xB08
+#define PERF_REQ_A7 0xB0c
+#define COMMS 0xB10
+#define COMMS_REQ 0xB14
+#define PWC_STATUS 0xB18
+#define PWC_FLAG 0xB1c
+#define WAKE_INT_MASK 0xB24
+#define WAKE_INT_RAW 0xB28
+#define WAKE_INT_STAT 0xB2c
+#define A15_PWRDN_EN 0xB30
+#define A7_PWRDN_EN 0xB34
+#define A15_A7_ISOLATE 0xB38
+#define STANDBYWFI_STAT 0xB3c
+#define A15_CACTIVE 0xB40
+#define A15_PWRDNREQ 0xB44
+#define A15_PWRDNACK 0xB48
+#define A7_CACTIVE 0xB4c
+#define A7_PWRDNREQ 0xB50
+#define A7_PWRDNACK 0xB54
+#define A15_RESET_HOLD 0xB58
+#define A7_RESET_HOLD 0xB5c
+#define A15_RESET_STAT 0xB60
+#define A7_RESET_STAT 0xB64
+#define A15_CONF 0x400
+#define A7_CONF 0x500
+
+#define DRIVER_NAME "SPC"
+#define TIME_OUT 100
+
+struct vexpress_spc_drvdata {
+ void __iomem *baseaddr;
+ spinlock_t lock;
+};
+
+static struct vexpress_spc_drvdata *info;
+
+/* SCC virtual address */
+u32 vscc;
+
+static inline int read_wait_to(void __iomem *reg, int status, int timeout)
+{
+ while (timeout-- && readl(reg) == status) {
+ cpu_relax();
+ udelay(2);
+ }
+ if (!timeout)
+ return -EAGAIN;
+ else
+ return 0;
+}
+
+int vexpress_spc_get_performance(int cluster, int *perf)
+{
+ u32 perf_cfg_reg = 0;
+ u32 a15_clusid = 0;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return -ENXIO;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ perf_cfg_reg = cluster != a15_clusid ? PERF_LVL_A7 : PERF_LVL_A15;
+
+ spin_lock(&info->lock);
+ *perf = readl(info->baseaddr + perf_cfg_reg);
+ spin_unlock(&info->lock);
+
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_get_performance);
+
+int vexpress_spc_set_performance(int cluster, int perf)
+{
+ u32 perf_cfg_reg = 0;
+ u32 perf_stat_reg = 0;
+ u32 a15_clusid = 0;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return -ENXIO;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ perf_cfg_reg = cluster != a15_clusid ? PERF_LVL_A7 : PERF_LVL_A15;
+ perf_stat_reg = cluster != a15_clusid ? PERF_REQ_A7 : PERF_REQ_A15;
+
+ if (perf < 0 || perf > 7)
+ return -EINVAL;
+
+ spin_lock(&info->lock);
+ writel(perf, info->baseaddr + perf_cfg_reg);
+ if (read_wait_to(info->baseaddr + perf_stat_reg, 1, TIME_OUT))
+ ret = -EAGAIN;
+ spin_unlock(&info->lock);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_set_performance);
+
+void vexpress_spc_set_wake_intr(u32 mask)
+{
+ if (!IS_ERR_OR_NULL(info))
+ writel(mask & VEXPRESS_SPC_WAKE_INTR_MASK,
+ info->baseaddr + WAKE_INT_MASK);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_set_wake_intr);
+
+u32 vexpress_spc_get_wake_intr(int raw)
+{
+ u32 wake_intr_reg = raw ? WAKE_INT_RAW : WAKE_INT_STAT;
+
+ if (!IS_ERR_OR_NULL(info))
+ return readl(info->baseaddr + wake_intr_reg);
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_get_wake_intr);
+
+void vexpress_spc_powerdown_enable(int cluster, int enable)
+{
+ u32 pwdrn_reg = 0;
+ u32 a15_clusid = 0;
+
+ if (!IS_ERR_OR_NULL(info)) {
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ pwdrn_reg = cluster != a15_clusid ? A7_PWRDN_EN : A15_PWRDN_EN;
+ writel(!!enable, info->baseaddr + pwdrn_reg);
+ }
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_powerdown_enable);
+
+void vexpress_spc_adb400_pd_enable(int cluster, int enable)
+{
+ u32 pwdrn_reg = 0;
+ u32 a15_clusid = 0;
+ u32 val = enable ? 0xF : 0x0; /* all adb bridges ?? */
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ pwdrn_reg = cluster != a15_clusid ? A7_PWRDNREQ : A15_PWRDNREQ;
+
+ spin_lock(&info->lock);
+ writel(val, info->baseaddr + pwdrn_reg);
+ spin_unlock(&info->lock);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_adb400_pd_enable);
+
+void vexpress_scc_ctl_snoops(int cluster, int enable)
+{
+ u32 val;
+ u32 snoop_reg = 0;
+ u32 a15_clusid = 0;
+ u32 or = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ snoop_reg = cluster != a15_clusid ? SNOOP_CTL_A7 : SNOOP_CTL_A15;
+ or = cluster != a15_clusid ? 0x2000 : 0x180;
+
+ val = readl_relaxed(info->baseaddr + snoop_reg);
+ if (enable) {
+ or = ~or;
+ val &= or;
+ } else {
+ val |= or;
+ }
+ writel_relaxed(val, info->baseaddr + snoop_reg);
+}
+EXPORT_SYMBOL_GPL(vexpress_scc_ctl_snoops);
+
+void vexpress_spc_wfi_cpureset(int cluster, int cpu, int enable)
+{
+ u32 rsthold_reg, prst_shift;
+ u32 val;
+ u32 a15_clusid = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+
+ if (cluster != a15_clusid) {
+ rsthold_reg = A7_RESET_HOLD;
+ prst_shift = 3;
+ } else {
+ rsthold_reg = A15_RESET_HOLD;
+ prst_shift = 2;
+ }
+ val = readl_relaxed(info->baseaddr + rsthold_reg);
+ if (enable)
+ val |= (1 << cpu);
+ else
+ val &= ~(1 << cpu);
+ writel_relaxed(val, info->baseaddr + rsthold_reg);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_wfi_cpureset);
+
+void vexpress_spc_wfi_cluster_reset(int cluster, int enable)
+{
+ u32 rsthold_reg, shift;
+ u32 val;
+ u32 a15_clusid = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+
+ if (cluster != a15_clusid) {
+ rsthold_reg = A7_RESET_HOLD;
+ shift = 6;
+ } else {
+ rsthold_reg = A15_RESET_HOLD;
+ shift = 4;
+ }
+ spin_lock(&info->lock);
+ val = readl(info->baseaddr + rsthold_reg);
+ if (enable)
+ val |= 1 << shift;
+ else
+ val &= ~(1 << shift);
+ writel(val, info->baseaddr + rsthold_reg);
+ spin_unlock(&info->lock);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_wfi_cluster_reset);
+
+int vexpress_spc_wfi_cpustat(int cluster)
+{
+ u32 rststat_reg;
+ u32 val;
+ u32 a15_clusid = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return 0;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ rststat_reg = STANDBYWFI_STAT;
+
+ val = readl_relaxed(info->baseaddr + rststat_reg);
+ return cluster != a15_clusid ? ((val & 0x38) >> 3) : (val & 0x3);
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_wfi_cpustat);
+
+static bool vexpress_spc_loaded;
+
+bool vexpress_spc_check_loaded(void)
+{
+ return vexpress_spc_loaded;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_check_loaded);
+
+static int __devinit vexpress_spc_driver_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret = 0;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "unable to allocate mem\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -EINVAL;
+ goto mem_free;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "address 0x%x in use\n", (u32) res->start);
+ ret = -EBUSY;
+ goto mem_free;
+ }
+
+ info->baseaddr = ioremap(res->start, resource_size(res));
+ if (!info->baseaddr) {
+ ret = -ENXIO;
+ goto ioremap_err;
+ }
+ vscc = (u32) info->baseaddr;
+ spin_lock_init(&info->lock);
+ platform_set_drvdata(pdev, info);
+
+ /*
+ * Multi-cluster systems may need this data when non-coherent, during
+ * cluster power-up/power-down. Make sure it reaches main memory:
+ */
+ __cpuc_flush_dcache_area(info, sizeof *info);
+ outer_clean_range(virt_to_phys(info), virt_to_phys(info + 1));
+
+ pr_info("vexpress_spc loaded at %p\n", info->baseaddr);
+ vexpress_spc_loaded = true;
+ return ret;
+
+ioremap_err:
+ release_region(res->start, resource_size(res));
+mem_free:
+ kfree(info);
+
+ return ret;
+}
+
+static int __devexit vexpress_spc_driver_remove(struct platform_device *pdev)
+{
+ struct vexpress_spc_drvdata *info;
+ struct resource *res = pdev->resource;
+
+ info = platform_get_drvdata(pdev);
+ iounmap(info->baseaddr);
+ release_region(res->start, resource_size(res));
+ kfree(info);
+
+ return 0;
+}
+
+static const struct of_device_id arm_vexpress_spc_matches[] = {
+ {.compatible = "arm,spc"},
+ {},
+};
+
+static struct platform_driver vexpress_spc_platform_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = arm_vexpress_spc_matches,
+ },
+ .probe = vexpress_spc_driver_probe,
+ .remove = vexpress_spc_driver_remove,
+};
+
+static int __init vexpress_spc_init(void)
+{
+ return platform_driver_register(&vexpress_spc_platform_driver);
+}
+
+static void __exit vexpress_spc_exit(void)
+{
+ platform_driver_unregister(&vexpress_spc_platform_driver);
+}
+
+arch_initcall(vexpress_spc_init);
+module_exit(vexpress_spc_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Serial Power Controller (SPC) support");
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
new file mode 100644
index 00000000000..ce3f705fb6d
--- /dev/null
+++ b/include/linux/arm-cci.h
@@ -0,0 +1,30 @@
+/*
+ * CCI support
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_ARM_CCI_H
+#define __LINUX_ARM_CCI_H
+
+#ifdef CONFIG_ARM_CCI
+extern void disable_cci(int cluster);
+#else
+static inline void disable_cci(int cluster) { }
+#endif
+
+#endif
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
index c52215ff424..bc0b2f8b4db 100644
--- a/include/linux/vexpress.h
+++ b/include/linux/vexpress.h
@@ -118,4 +118,50 @@ void vexpress_osc_of_setup(struct device_node *node);
void vexpress_clk_init(void __iomem *sp810_base);
void vexpress_clk_of_init(void);
+/* SPC */
+
+#define VEXPRESS_SPC_WAKE_INTR_IRQ(cluster, cpu) \
+ (1 << (4 * (cluster) + (cpu)))
+#define VEXPRESS_SPC_WAKE_INTR_FIQ(cluster, cpu) \
+ (1 << (7 * (cluster) + (cpu)))
+#define VEXPRESS_SPC_WAKE_INTR_SWDOG (1 << 10)
+#define VEXPRESS_SPC_WAKE_INTR_GTIMER (1 << 11)
+#define VEXPRESS_SPC_WAKE_INTR_MASK 0xFFF
+
+#ifdef CONFIG_ARM_SPC
+extern int vexpress_spc_get_performance(int cluster, int *perf);
+extern int vexpress_spc_set_performance(int cluster, int perf);
+extern void vexpress_spc_set_wake_intr(u32 mask);
+extern u32 vexpress_spc_get_wake_intr(int raw);
+extern void vexpress_spc_powerdown_enable(int cluster, int enable);
+extern void vexpress_spc_adb400_pd_enable(int cluster, int enable);
+extern void vexpress_spc_wfi_cpureset(int cluster, int cpu, int enable);
+extern int vexpress_spc_wfi_cpustat(int cluster);
+extern void vexpress_spc_wfi_cluster_reset(int cluster, int enable);
+extern bool vexpress_spc_check_loaded(void);
+extern void vexpress_scc_ctl_snoops(int cluster, int enable);
+#else
+static inline int vexpress_spc_get_performance(int cluster, int *perf)
+{
+ return -EINVAL;
+}
+static inline int vexpress_spc_set_performance(int cluster, int perf)
+{
+ return -EINVAL;
+}
+static inline void vexpress_spc_set_wake_intr(u32 mask) { }
+static inline u32 vexpress_spc_get_wake_intr(int raw) { return 0; }
+static inline void vexpress_spc_powerdown_enable(int cluster, int enable) { }
+static inline void vexpress_spc_adb400_pd_enable(int cluster, int enable) { }
+static inline void vexpress_spc_wfi_cpureset(int cluster, int cpu, int enable)
+{ }
+static inline int vexpress_spc_wfi_cpustat(int cluster) { return 0; }
+static inline void vexpress_spc_wfi_cluster_reset(int cluster, int enable) { }
+static inline bool vexpress_spc_check_loaded(void)
+{
+ return false;
+}
+static inline void vexpress_scc_ctl_snoops(int cluster, int enable) { }
+#endif
+
#endif