summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJon Medhurst <tixy@linaro.org>2012-07-18 19:42:23 +0100
committerJon Medhurst <tixy@linaro.org>2012-07-18 19:42:23 +0100
commitf69fcd3559e2b05b4a311b054233488e566a1f29 (patch)
treef573fc957325b65683ac3846dcd1c89fe23881cd
parent2612181026933f4c72a76e3a0239915a322c0973 (diff)
parente1a9cc09f7888e4787c0b12a8f9ea5676904d62a (diff)
downloadvexpress-a9-armlt-linaro-android-3.5.tar.gz
Merge branch 'tracking-armlt-tc2-pm' of git://git.linaro.org/landing-teams/working/arm/kernel into armlt-linaro-android-3.5armlt-linaro-android-3.5-20120718armlt-linaro-android-3.5
Conflicts: drivers/cpuidle/coupled.c drivers/misc/Kconfig drivers/misc/Makefile
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts12
-rw-r--r--arch/arm/common/gic.c8
-rw-r--r--arch/arm/include/asm/cacheflush.h38
-rw-r--r--arch/arm/kernel/arch_timer.c2
-rw-r--r--arch/arm/kernel/sleep.S30
-rw-r--r--arch/arm/kernel/smp.c56
-rw-r--r--arch/arm/kernel/suspend.c23
-rw-r--r--arch/arm/mach-vexpress/Kconfig14
-rw-r--r--arch/arm/mach-vexpress/Makefile1
-rw-r--r--arch/arm/mach-vexpress/cpuidle-tc2.c251
-rw-r--r--arch/arm/mach-vexpress/hotplug-asm.S31
-rw-r--r--arch/arm/mach-vexpress/tc2-sleep.S60
-rw-r--r--arch/arm/mm/cache-v7.S43
-rw-r--r--arch/arm/mm/proc-macros.S7
-rw-r--r--drivers/cpufreq/Kconfig.arm9
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/vexpress_bL_cpufreq.c276
-rw-r--r--drivers/misc/Kconfig4
-rw-r--r--drivers/misc/Makefile3
-rw-r--r--drivers/misc/arm-cci.c130
-rw-r--r--drivers/misc/vexpress/Kconfig2
-rw-r--r--drivers/misc/vexpress/Makefile1
-rw-r--r--drivers/misc/vexpress/arm-spc.c371
-rw-r--r--include/linux/arm-cci.h30
-rw-r--r--include/linux/vexpress.h50
26 files changed, 1438 insertions, 16 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e9e5aeb098a..b26f8c08f39 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -303,6 +303,7 @@ config ARCH_VERSATILE
config ARCH_VEXPRESS
bool "ARM Ltd. Versatile Express family"
+ select ARCH_HAS_CPUFREQ
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARM_AMBA
select ARM_TIMER_SP804
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts
index b947cb6b48b..68e8fc6948f 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc2.dts
@@ -42,6 +42,7 @@
cluster0: cluster@0 {
reg = <0>;
+ freqs = <500000000 600000000 700000000 800000000 900000000 1000000000 1100000000 1200000000>;
cores {
#address-cells = <1>;
#size-cells = <0>;
@@ -59,6 +60,7 @@
cluster1: cluster@1 {
reg = <1>;
+ freqs = <350000000 400000000 500000000 600000000 700000000 800000000 900000000 1000000000>;
cores {
#address-cells = <1>;
#size-cells = <0>;
@@ -148,6 +150,16 @@
framebuffer = <0xff000000 0x01000000>;
};
+ spc@7fff0000 {
+ compatible = "arm,spc";
+ reg = <0x7FFF0000 0x1000>;
+ };
+
+ cci@2c090000 {
+ compatible = "arm,cci";
+ reg = <0x2c090000 0x8000>;
+ };
+
memory-controller@2b0a0000 {
compatible = "arm,pl341", "arm,primecell";
reg = <0x2b0a0000 0x1000>;
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index d108cc4e542..7fa82240d08 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -590,6 +590,14 @@ static void gic_cpu_save(unsigned int gic_nr)
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
+ /*
+ * Disable GIC CPU IF and IRQ bybass. When a CPU is shutdown we must
+ * insure that it does not exit wfi if an IRQ is pending on the IF.
+ * The GIC allows this operation by disabling the GIC CPU IF and the
+ * IRQ bypass mode. The raw IRQ line is still delivered to the power
+ * controller that use the IRQ to wake up the respective core.
+ */
+ writel_relaxed(0x1e0, cpu_base + GIC_CPU_CTRL);
}
static void gic_cpu_restore(unsigned int gic_nr)
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index c3099b1b698..1b1a43b2266 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -50,6 +50,10 @@
*
* Unconditionally clean and invalidate the entire cache.
*
+ * flush_kern_dcache_level(level)
+ *
+ * Flush data cache levels up to the level input parameter.
+ *
* flush_user_all()
*
* Clean and invalidate all user space cache entries
@@ -98,6 +102,7 @@
struct cpu_cache_fns {
void (*flush_icache_all)(void);
void (*flush_kern_all)(void);
+ void (*flush_kern_dcache_level)(int);
void (*flush_user_all)(void);
void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
@@ -200,6 +205,39 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define __flush_icache_preferred __flush_icache_all_generic
#endif
+#if __LINUX_ARM_ARCH__ >= 7
+/*
+ * Hotplug and CPU idle code requires to flush only cache levels
+ * impacted by power down operations. In v7 the upper level is
+ * retrieved by reading LoUIS field of CLIDR, since inner shareability
+ * represents the cache boundaries affected by per-CPU shutdown
+ * operations in the most common platforms.
+ */
+#define __cache_level_v7_uis ({ \
+ u32 val; \
+ asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r"(val)); \
+ ((val & 0xe00000) >> 21); })
+
+#define flush_cache_level_preferred() __cache_level_v7_uis
+#else
+#define flush_cache_level_preferred() (-1)
+#endif
+
+static inline int flush_cache_level_cpu(void)
+{
+ return flush_cache_level_preferred();
+}
+/*
+ * Flush data cache up to a certain cache level
+ * level - upper cache level to clean
+ * if level == -1, default to flush_kern_all
+ */
+#ifdef MULTI_CACHE
+#define flush_dcache_level(level) cpu_cache.flush_kern_dcache_level(level)
+#else
+#define flush_dcache_level(level) __cpuc_flush_kern_all()
+#endif
+
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
diff --git a/arch/arm/kernel/arch_timer.c b/arch/arm/kernel/arch_timer.c
index 0925c702b0e..cd8c80983fb 100644
--- a/arch/arm/kernel/arch_timer.c
+++ b/arch/arm/kernel/arch_timer.c
@@ -137,7 +137,7 @@ static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
/* Be safe... */
arch_timer_disable();
- clk->features = CLOCK_EVT_FEAT_ONESHOT;
+ clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->set_mode = arch_timer_set_mode;
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 987dcf33415..b67293924ef 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -4,6 +4,7 @@
#include <asm/assembler.h>
#include <asm/glue-cache.h>
#include <asm/glue-proc.h>
+#include "entry-header.S"
.text
/*
@@ -30,9 +31,8 @@ ENTRY(__cpu_suspend)
mov r2, r5 @ virtual SP
ldr r3, =sleep_save_sp
#ifdef CONFIG_SMP
- ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
- ALT_UP(mov lr, #0)
- and lr, lr, #15
+ get_thread_info r5
+ ldr lr, [r5, #TI_CPU] @ cpu logical index
add r3, r3, lr, lsl #2
#endif
bl __cpu_suspend_save
@@ -82,10 +82,11 @@ ENDPROC(cpu_resume_after_mmu)
.align
ENTRY(cpu_resume)
#ifdef CONFIG_SMP
- adr r0, sleep_save_sp
- ALT_SMP(mrc p15, 0, r1, c0, c0, 5)
+ ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
+ ALT_SMP(bfc r0, #24, #8)
+ ALT_SMP(bl cpu_logical_index) @ return logical index in r1
ALT_UP(mov r1, #0)
- and r1, r1, #15
+ adr r0, sleep_save_sp
ldr r0, [r0, r1, lsl #2] @ stack phys addr
#else
ldr r0, sleep_save_sp @ stack phys addr
@@ -102,3 +103,20 @@ sleep_save_sp:
.rept CONFIG_NR_CPUS
.long 0 @ preserve stack phys ptr here
.endr
+
+#ifdef CONFIG_SMP
+cpu_logical_index:
+ adr r3, cpu_map_ptr
+ ldr r2, [r3]
+ add r3, r3, r2 @ virt_to_phys(__cpu_logical_map)
+ mov r1, #0
+1:
+ ldr r2, [r3, r1, lsl #2]
+ cmp r2, r0
+ moveq pc, lr
+ add r1, r1, #1
+ b 1b
+
+cpu_map_ptr:
+ .long __cpu_logical_map - .
+#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 083ecceb406..04b2d76b0b7 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -25,6 +25,7 @@
#include <linux/percpu.h>
#include <linux/clockchips.h>
#include <linux/completion.h>
+#include <linux/cpufreq.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
@@ -136,7 +137,7 @@ int __cpu_disable(void)
* Flush user cache and TLB mappings, and then remove this CPU
* from the vm mask set of all processes.
*/
- flush_cache_all();
+ flush_dcache_level(flush_cache_level_cpu());
local_flush_tlb_all();
clear_tasks_mm_cpumask(cpu);
@@ -641,3 +642,56 @@ int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}
+
+#ifdef CONFIG_CPU_FREQ
+
+static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
+static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
+static unsigned long global_l_p_j_ref;
+static unsigned long global_l_p_j_ref_freq;
+
+static int cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ int cpu = freq->cpu;
+
+ if (freq->flags & CPUFREQ_CONST_LOOPS)
+ return NOTIFY_OK;
+
+ if (!per_cpu(l_p_j_ref, cpu)) {
+ per_cpu(l_p_j_ref, cpu) =
+ per_cpu(cpu_data, cpu).loops_per_jiffy;
+ per_cpu(l_p_j_ref_freq, cpu) = freq->old;
+ if (!global_l_p_j_ref) {
+ global_l_p_j_ref = loops_per_jiffy;
+ global_l_p_j_ref_freq = freq->old;
+ }
+ }
+
+ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
+ (val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
+ loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
+ global_l_p_j_ref_freq,
+ freq->new);
+ per_cpu(cpu_data, cpu).loops_per_jiffy =
+ cpufreq_scale(per_cpu(l_p_j_ref, cpu),
+ per_cpu(l_p_j_ref_freq, cpu),
+ freq->new);
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_notifier = {
+ .notifier_call = cpufreq_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+ return cpufreq_register_notifier(&cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+#endif
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c
index 1794cc3b0f1..8b3278bff99 100644
--- a/arch/arm/kernel/suspend.c
+++ b/arch/arm/kernel/suspend.c
@@ -17,16 +17,29 @@ extern void cpu_resume_mmu(void);
*/
void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
{
+ u32 *ctx = ptr;
+
*save_ptr = virt_to_phys(ptr);
/* This must correspond to the LDM in cpu_resume() assembly */
- *ptr++ = virt_to_phys(idmap_pgd);
- *ptr++ = sp;
- *ptr++ = virt_to_phys(cpu_do_resume);
+ *ctx++ = virt_to_phys(idmap_pgd);
+ *ctx++ = sp;
+ *ctx++ = virt_to_phys(cpu_do_resume);
- cpu_do_suspend(ptr);
+ cpu_do_suspend(ctx);
- flush_cache_all();
+ flush_dcache_level(flush_cache_level_cpu());
+ /*
+ * flush_dcache_level does not guarantee that
+ * save_ptr and ptr are cleaned to main memory,
+ * just up to the required cache level.
+ * Since the context pointer and context itself
+ * are to be retrieved with the MMU off that
+ * data must be cleaned from all cache levels
+ * to main memory using "area" cache primitives.
+ */
+ __cpuc_flush_dcache_area(ptr, ptrsz);
+ __cpuc_flush_dcache_area(save_ptr, sizeof(*save_ptr));
outer_clean_range(*save_ptr, *save_ptr + ptrsz);
outer_clean_range(virt_to_phys(save_ptr),
virt_to_phys(save_ptr) + sizeof(*save_ptr));
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index cf8730d35e7..95d1c2b743a 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -12,6 +12,20 @@ config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
build a working kernel, you must also enable relevant core
tile support or Flattened Device Tree based support options.
+config ARCH_VEXPRESS_TC2_PM
+ bool "Power Management Support for TC2 test-chip"
+ depends on CPU_IDLE && PM
+ select ARM_CPU_SUSPEND
+ select ARCH_NEEDS_CPU_IDLE_COUPLED
+ select ARM_SPC
+ select ARM_CCI
+ help
+ Provides code that enables CPU idle power management on the
+ TC2 testchip. It enables the CPU idle driver so that the kernel
+ can enter cluster power down states provided by the power
+ controller. Code is built on top of coupled C-state idle code
+ since all CPUs need to be idle to enter cluster shutdown.
+
config ARCH_VEXPRESS_CA9X4
bool "Versatile Express Cortex-A9x4 tile"
select ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
diff --git a/arch/arm/mach-vexpress/Makefile b/arch/arm/mach-vexpress/Makefile
index 90551b9780a..5067ef487bd 100644
--- a/arch/arm/mach-vexpress/Makefile
+++ b/arch/arm/mach-vexpress/Makefile
@@ -6,3 +6,4 @@ obj-y := v2m.o
obj-$(CONFIG_ARCH_VEXPRESS_CA9X4) += ct-ca9x4.o
obj-$(CONFIG_SMP) += platsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+obj-$(CONFIG_ARCH_VEXPRESS_TC2_PM) += cpuidle-tc2.o hotplug-asm.o tc2-sleep.o
diff --git a/arch/arm/mach-vexpress/cpuidle-tc2.c b/arch/arm/mach-vexpress/cpuidle-tc2.c
new file mode 100644
index 00000000000..bda97dd7c65
--- /dev/null
+++ b/arch/arm/mach-vexpress/cpuidle-tc2.c
@@ -0,0 +1,251 @@
+/*
+ * TC2 CPU idle driver.
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/arm-cci.h>
+#include <linux/bitmap.h>
+#include <linux/cpuidle.h>
+#include <linux/cpu_pm.h>
+#include <linux/clockchips.h>
+#include <linux/debugfs.h>
+#include <linux/hrtimer.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tick.h>
+#include <linux/vexpress.h>
+#include <asm/cpuidle.h>
+#include <asm/cputype.h>
+#include <asm/idmap.h>
+#include <asm/proc-fns.h>
+#include <asm/suspend.h>
+
+#include <mach/motherboard.h>
+
+static int tc2_enter_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx);
+
+static struct cpuidle_state tc2_cpuidle_set[] __initdata = {
+ [0] = ARM_CPUIDLE_WFI_STATE,
+ [1] = {
+ .enter = tc2_enter_coupled,
+ .exit_latency = 300,
+ .target_residency = 1000,
+ .flags = CPUIDLE_FLAG_TIME_VALID |
+ CPUIDLE_FLAG_COUPLED,
+ .name = "C1",
+ .desc = "ARM power down",
+ },
+};
+
+struct cpuidle_driver tc2_idle_driver = {
+ .name = "tc2_idle",
+ .owner = THIS_MODULE,
+ .en_core_tk_irqen = 1,
+ .safe_state_index = 0
+};
+
+static DEFINE_PER_CPU(struct cpuidle_device, tc2_idle_dev);
+
+#define NR_CLUSTERS 2
+static cpumask_t cluster_mask = CPU_MASK_NONE;
+
+extern void disable_clean_inv_dcache(int);
+static atomic_t abort_barrier[NR_CLUSTERS];
+
+extern void tc2_cpu_resume(void);
+extern void disable_snoops(void);
+
+int tc2_coupled_finisher(unsigned long arg)
+{
+ unsigned int mpidr = read_cpuid_mpidr();
+ unsigned int cpu = smp_processor_id();
+ unsigned int cluster = (mpidr >> 8) & 0xf;
+ unsigned int weight = cpumask_weight(topology_core_cpumask(cpu));
+ u8 wfi_weight = 0;
+
+ cpuidle_coupled_parallel_barrier((struct cpuidle_device *)arg,
+ &abort_barrier[cluster]);
+ if (mpidr & 0xf) {
+ disable_clean_inv_dcache(0);
+ wfi();
+ /* not reached */
+ }
+
+ while (wfi_weight != (weight - 1)) {
+ wfi_weight = vexpress_spc_wfi_cpustat(cluster);
+ wfi_weight = hweight8(wfi_weight);
+ }
+
+ vexpress_spc_powerdown_enable(cluster, 1);
+ disable_clean_inv_dcache(1);
+ disable_cci(cluster);
+ disable_snoops();
+ return 1;
+}
+
+/*
+ * tc2_enter_coupled - Programs CPU to enter the specified state
+ * @dev: cpuidle device
+ * @drv: The target state to be programmed
+ * @idx: state index
+ *
+ * Called from the CPUidle framework to program the device to the
+ * specified target state selected by the governor.
+ */
+static int tc2_enter_coupled(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int idx)
+{
+ struct timespec ts_preidle, ts_postidle, ts_idle;
+ int ret;
+ int cluster = (read_cpuid_mpidr() >> 8) & 0xf;
+ /* Used to keep track of the total time in idle */
+ getnstimeofday(&ts_preidle);
+
+ if (!cpu_isset(cluster, cluster_mask))
+ goto shallow_out;
+
+ BUG_ON(!irqs_disabled());
+
+ cpu_pm_enter();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu);
+
+ ret = cpu_suspend((unsigned long) dev, tc2_coupled_finisher);
+
+ if (ret)
+ BUG();
+
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
+
+ cpu_pm_exit();
+
+shallow_out:
+ getnstimeofday(&ts_postidle);
+ ts_idle = timespec_sub(ts_postidle, ts_preidle);
+
+ dev->last_residency = ts_idle.tv_nsec / NSEC_PER_USEC +
+ ts_idle.tv_sec * USEC_PER_SEC;
+ return idx;
+}
+
+static int idle_mask_show(struct seq_file *f, void *p)
+{
+ char buf[256];
+ bitmap_scnlistprintf(buf, 256, cpumask_bits(&cluster_mask),
+ NR_CLUSTERS);
+
+ seq_printf(f, "%s\n", buf);
+
+ return 0;
+}
+
+static int idle_mask_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, idle_mask_show, inode->i_private);
+}
+
+static const struct file_operations cpuidle_fops = {
+ .open = idle_mask_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int idle_debug_set(void *data, u64 val)
+{
+ int i;
+
+ if ((val > NR_CLUSTERS || val < 0) && val != 0xff) {
+ pr_warning("Wrong parameter passed\n");
+ return -EINVAL;
+ }
+ cpuidle_pause_and_lock();
+ if (val == 0xff) {
+ cpumask_clear(&cluster_mask);
+ return 0;
+ }
+
+ for (i = 0; i < NR_CLUSTERS; i++)
+ if (val == i)
+ cpumask_set_cpu(i, &cluster_mask);
+ cpuidle_resume_and_unlock();
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(idle_debug_fops, NULL, idle_debug_set, "%llu\n");
+
+/*
+ * tc2_idle_init
+ *
+ * Registers the TC2 specific cpuidle driver with the cpuidle
+ * framework with the valid set of states.
+ */
+int __init tc2_idle_init(void)
+{
+ struct cpuidle_device *dev;
+ int i, cpu_id;
+ struct dentry *idle_debug, *file_debug;
+ struct cpuidle_driver *drv = &tc2_idle_driver;
+
+ drv->state_count = (sizeof(tc2_cpuidle_set) /
+ sizeof(struct cpuidle_state));
+
+ for (i = 0; i < drv->state_count; i++) {
+ memcpy(&drv->states[i], &tc2_cpuidle_set[i],
+ sizeof(struct cpuidle_state));
+ }
+
+ cpuidle_register_driver(drv);
+
+ for_each_cpu(cpu_id, cpu_online_mask) {
+ pr_err("CPUidle for CPU%d registered\n", cpu_id);
+ dev = &per_cpu(tc2_idle_dev, cpu_id);
+ dev->cpu = cpu_id;
+ dev->safe_state_index = 0;
+
+ cpumask_copy(&dev->coupled_cpus,
+ topology_core_cpumask(cpu_id));
+ dev->state_count = drv->state_count;
+
+ if (cpuidle_register_device(dev)) {
+ printk(KERN_ERR "%s: Cpuidle register device failed\n",
+ __func__);
+ return -EIO;
+ }
+ }
+
+ idle_debug = debugfs_create_dir("idle_debug", NULL);
+
+ if (IS_ERR_OR_NULL(idle_debug)) {
+ printk(KERN_INFO "Error in creating idle debugfs directory\n");
+ return 0;
+ }
+
+ file_debug = debugfs_create_file("enable_idle", S_IRUGO | S_IWGRP,
+ idle_debug, NULL, &idle_debug_fops);
+
+ if (IS_ERR_OR_NULL(file_debug)) {
+ printk(KERN_INFO "Error in creating enable_idle file\n");
+ return 0;
+ }
+
+ file_debug = debugfs_create_file("enable_mask", S_IRUGO | S_IWGRP,
+ idle_debug, NULL, &cpuidle_fops);
+
+ if (IS_ERR_OR_NULL(file_debug))
+ printk(KERN_INFO "Error in creating enable_mask file\n");
+
+ /* enable all wake-up IRQs by default */
+ vexpress_spc_set_wake_intr(0x7ff);
+ v2m_flags_set(virt_to_phys(tc2_cpu_resume));
+
+ return 0;
+}
+
+late_initcall(tc2_idle_init);
diff --git a/arch/arm/mach-vexpress/hotplug-asm.S b/arch/arm/mach-vexpress/hotplug-asm.S
new file mode 100644
index 00000000000..c51d2310e39
--- /dev/null
+++ b/arch/arm/mach-vexpress/hotplug-asm.S
@@ -0,0 +1,31 @@
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+
+ .text
+ENTRY(disable_clean_inv_dcache)
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+
+ mrc p15, 0, r3, c1, c0, 0
+ bic r3, #4 @ clear C bit
+ mcr p15, 0, r3, c1, c0, 0
+ dsb
+ isb
+ cmp r0, #0
+ mrc p15, 1, r0, c0, c0, 1
+ andeq r0, r0, #0xe00000
+ moveq r0, r0, lsr #21
+ andne r0, r0, #0x7000000
+ movne r0, r0, lsr #24
+ movs r0, r0
+ blne v7_flush_dcache_level
+ clrex
+ mrc p15, 0, r3, c1, c0, 1
+ bic r3, #0x40 @ clear SMP bit
+ mcr p15, 0, r3, c1, c0, 1
+ isb
+ dsb
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ mov pc, lr
+ENDPROC(disable_clean_inv_dcache)
diff --git a/arch/arm/mach-vexpress/tc2-sleep.S b/arch/arm/mach-vexpress/tc2-sleep.S
new file mode 100644
index 00000000000..cc6b84144fa
--- /dev/null
+++ b/arch/arm/mach-vexpress/tc2-sleep.S
@@ -0,0 +1,60 @@
+#include <linux/linkage.h>
+
+ENTRY(tc2_cpu_resume)
+ mrc p15, 0, r0, c0, c0, 5
+ ands r0, r0, #0xff00
+ adr r0, value
+ addne r0, r0, #16
+ ldmia r0, {r1, r2, r3, r4} @ CCI address, SCC snoop control & val
+ mvn r3, r3 @ undo actions done at shutdown
+ ldr r0, [r2]
+ and r5, r0, r3
+ str r5, [r2]
+ mov r0, #3 @ enable CCI for the cluster
+ str r0, [r1]
+ adr r1, cci_ctrl
+ ldr r1, [r1]
+loop:
+ ldr r0, [r1]
+ ands r0, r0, #1
+ bne loop
+ mov r0, #0 @ disable power down enable
+ str r0, [r4]
+ b cpu_resume
+ENDPROC(tc2_cpu_resume)
+
+ENTRY(disable_snoops)
+ mrc p15, 0, r0, c0, c0, 5
+ ands r0, r0, #0xff00
+ adr r0, vvalue
+ addne r0, r0, #8
+ ldmia r0, {r2, r3} @ CCI address, SCC snoop control & val
+ ldr r1, scc_ptr
+ ldr r1, [r1]
+ add r2, r1, r2
+ ldr r0, [r2]
+ orr r0, r0, r3
+ dsb
+ isb
+ str r0, [r2]
+ wfi
+ENDPROC(disable_snoops)
+
+cci_ctrl:
+ .long 0x2c09000c
+value:
+ .long 0x2c094000
+ .long 0x7fff0404
+ .long 0x180
+ .long 0x7fff0b30
+ .long 0x2c095000
+ .long 0x7fff0504
+ .long 0x2000
+ .long 0x7fff0b34
+vvalue:
+ .long 0x404
+ .long 0x180
+ .long 0x504
+ .long 0x2000
+scc_ptr:
+ .long vscc
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 39e3fb3db80..5e648ea2aab 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -34,6 +34,23 @@ ENTRY(v7_flush_icache_all)
ENDPROC(v7_flush_icache_all)
/*
+ * v7_flush_dcache_level(level)
+ *
+ * Flush the D-cache up to the level passed as first input parameter.
+ *
+ * r0 - cache level
+ *
+ * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
+ */
+
+ENTRY(v7_flush_dcache_level)
+ dmb
+ mov r3, r0, lsl #1 @ level * 2
+ mrc p15, 1, r0, c0, c0, 1 @ read clidr
+ b __flush_level
+ENDPROC(v7_flush_dcache_level)
+
+/*
* v7_flush_dcache_all()
*
* Flush the whole D-cache.
@@ -48,6 +65,7 @@ ENTRY(v7_flush_dcache_all)
ands r3, r0, #0x7000000 @ extract loc from clidr
mov r3, r3, lsr #23 @ left align loc bit field
beq finished @ if loc is 0, then no need to clean
+__flush_level:
mov r10, #0 @ start clean at cache level 0
loop1:
add r2, r10, r10, lsr #1 @ work out 3x current cache level
@@ -121,6 +139,29 @@ ENTRY(v7_flush_kern_cache_all)
ENDPROC(v7_flush_kern_cache_all)
/*
+ * v7_flush_kern_dcache_level(int level)
+ * level - upper level that should be cleaned/invalidated
+ * [valid values (-1,7)]
+ * level == -1 forces a flush_kern_cache_all
+ * level == 0 is a nop
+ * 1 < level <=7 flush dcache up to level
+ * Flush the data cache up to a level passed as a platform
+ * specific parameter
+ */
+ENTRY(v7_flush_kern_dcache_level)
+ cmp r0, #-1 @ -1 defaults to flush all
+ beq v7_flush_kern_cache_all
+ ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
+ sub r2, r0, #1
+ cmp r2, #6
+ blls v7_flush_dcache_level @ jump if 0 < level <=7
+ ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
+ THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
+ mov pc, lr
+ENDPROC(v7_flush_kern_dcache_level)
+
+/*
* v7_flush_cache_all()
*
* Flush all TLB entries in a particular address space
@@ -350,4 +391,4 @@ ENDPROC(v7_dma_unmap_area)
__INITDATA
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
- define_cache_functions v7
+ define_cache_functions v7, cachelevel=1
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 2d8ff3ad86d..f193bc374e2 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -293,12 +293,17 @@ ENTRY(\name\()_processor_functions)
.size \name\()_processor_functions, . - \name\()_processor_functions
.endm
-.macro define_cache_functions name:req
+.macro define_cache_functions name:req, cachelevel=0
.align 2
.type \name\()_cache_fns, #object
ENTRY(\name\()_cache_fns)
.long \name\()_flush_icache_all
.long \name\()_flush_kern_cache_all
+ .if \cachelevel
+ .long \name\()_flush_kern_dcache_level
+ .else
+ .long \name\()_flush_kern_cache_all
+ .endif
.long \name\()_flush_user_cache_all
.long \name\()_flush_user_cache_range
.long \name\()_coherent_kern_range
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 5961e6415f0..d13195361a9 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -76,3 +76,12 @@ config ARM_EXYNOS5250_CPUFREQ
help
This adds the CPUFreq driver for Samsung EXYNOS5250
SoC.
+
+config ARM_VEXPRESS_BL_CPUFREQ
+ tristate "CPUfreq driver for ARM Vexpress big.LITTLE CPUs"
+ depends on ARCH_VEXPRESS && CPU_FREQ
+ help
+ This enables the CPUfreq driver for ARM Vexpress big.LITTLE
+ platform.
+
+ If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 35835b7cc2d..478c08b795f 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o
obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o
+obj-$(CONFIG_ARM_VEXPRESS_BL_CPUFREQ) += vexpress_bL_cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/vexpress_bL_cpufreq.c b/drivers/cpufreq/vexpress_bL_cpufreq.c
new file mode 100644
index 00000000000..88ac95ef85c
--- /dev/null
+++ b/drivers/cpufreq/vexpress_bL_cpufreq.c
@@ -0,0 +1,276 @@
+/*
+ * Vexpress big.LITTLE CPUFreq support
+ * Based on mach-integrator
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/cpufreq.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#include <linux/vexpress.h>
+
+#define VEXPRESS_MAX_CLUSTER 2
+
+static struct cpufreq_frequency_table *freq_table[VEXPRESS_MAX_CLUSTER];
+static atomic_t freq_table_users = ATOMIC_INIT(0);
+
+/* Cached current cluster for each CPU to save on IPIs */
+static DEFINE_PER_CPU(unsigned int, cpu_cur_cluster);
+
+/*
+ * Functions to get the current status.
+ *
+ * Beware that the cluster for another CPU may change unexpectedly.
+ */
+
+static unsigned int get_local_cluster(void)
+{
+ unsigned int mpidr;
+ asm ("mrc\tp15, 0, %0, c0, c0, 5" : "=r" (mpidr));
+ return (mpidr >> 8) & 0xf;
+}
+
+static void __get_current_cluster(void *_data)
+{
+ unsigned int *_cluster = _data;
+ *_cluster = get_local_cluster();
+}
+
+static int get_current_cluster(unsigned int cpu)
+{
+ unsigned int cluster = 0;
+ smp_call_function_single(cpu, __get_current_cluster, &cluster, 1);
+ return cluster;
+}
+
+static int get_current_cached_cluster(unsigned int cpu)
+{
+ return per_cpu(cpu_cur_cluster, cpu);
+}
+
+/* Validate policy frequency range */
+static int vexpress_cpufreq_verify_policy(struct cpufreq_policy *policy)
+{
+ uint32_t cur_cluster = get_current_cached_cluster(policy->cpu);
+
+ /* This call takes care of it all using freq_table */
+ return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
+}
+
+/* Set clock frequency */
+static int vexpress_cpufreq_set_target(struct cpufreq_policy *policy,
+ unsigned int target_freq, unsigned int relation)
+{
+ uint32_t cpu = policy->cpu;
+ struct cpufreq_freqs freqs;
+ uint32_t freq_tab_idx;
+ uint32_t cur_cluster;
+ int ret = 0;
+
+ /* Read current clock rate */
+ cur_cluster = get_current_cached_cluster(cpu);
+
+ if (vexpress_spc_get_performance(cur_cluster, &freq_tab_idx))
+ return -EIO;
+
+ freqs.old = freq_table[cur_cluster][freq_tab_idx].frequency;
+
+ /* Make sure that target_freq is within supported range */
+ if (target_freq > policy->max)
+ target_freq = policy->max;
+ if (target_freq < policy->min)
+ target_freq = policy->min;
+
+ /* Determine valid target frequency using freq_table */
+ cpufreq_frequency_table_target(policy, freq_table[cur_cluster],
+ target_freq, relation, &freq_tab_idx);
+ freqs.new = freq_table[cur_cluster][freq_tab_idx].frequency;
+
+ freqs.cpu = policy->cpu;
+
+ if (freqs.old == freqs.new)
+ return 0;
+
+ pr_debug("Requested Freq %d cpu %d\n", freqs.new, cpu);
+
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ ret = vexpress_spc_set_performance(cur_cluster, freq_tab_idx);
+ if (ret) {
+ pr_err("Error %d while setting required OPP\n", ret);
+ return ret;
+ }
+
+ policy->cur = freqs.new;
+
+ for_each_cpu(freqs.cpu, policy->cpus)
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ return ret;
+}
+
+/* Get current clock frequency */
+static unsigned int vexpress_cpufreq_get(unsigned int cpu)
+{
+ uint32_t freq_tab_idx = 0;
+ uint32_t cur_cluster = get_current_cached_cluster(cpu);
+
+ /*
+ * Read current clock rate with vexpress_spc call
+ */
+ if (vexpress_spc_get_performance(cur_cluster, &freq_tab_idx))
+ return -EIO;
+
+ return freq_table[cur_cluster][freq_tab_idx].frequency;
+}
+
+/* translate the integer array into cpufreq_frequency_table entries */
+static inline void _cpufreq_copy_table_from_array(uint32_t *table,
+ struct cpufreq_frequency_table *freq_table, int size)
+{
+ int i;
+ for (i = 0; i < size; i++) {
+ freq_table[i].index = i;
+ freq_table[i].frequency = table[i] / 1000; /* in kHZ */
+ }
+ freq_table[i].index = size;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+}
+
+static int vexpress_cpufreq_of_init(void)
+{
+ uint32_t cpu_opp_num;
+ struct cpufreq_frequency_table *freqtable[VEXPRESS_MAX_CLUSTER];
+ uint32_t *cpu_freqs;
+ int ret = 0, cluster_id = 0, len;
+ struct device_node *cluster = NULL;
+ const struct property *pp;
+ const u32 *hwid;
+
+ while ((cluster = of_find_node_by_name(cluster, "cluster"))) {
+ hwid = of_get_property(cluster, "reg", &len);
+ if (hwid && len == 4)
+ cluster_id = be32_to_cpup(hwid);
+
+ pp = of_find_property(cluster, "freqs", NULL);
+ if (!pp)
+ return -EINVAL;
+ cpu_opp_num = pp->length / sizeof(u32);
+ if (!cpu_opp_num)
+ return -ENODATA;
+
+ cpu_freqs = kzalloc(sizeof(uint32_t) * cpu_opp_num, GFP_KERNEL);
+ freqtable[cluster_id] =
+ kzalloc(sizeof(struct cpufreq_frequency_table) *
+ (cpu_opp_num + 1), GFP_KERNEL);
+ if (!cpu_freqs || !freqtable[cluster_id]) {
+ ret = -ENOMEM;
+ goto free_mem;
+ }
+ of_property_read_u32_array(cluster, "freqs",
+ cpu_freqs, cpu_opp_num);
+ _cpufreq_copy_table_from_array(cpu_freqs,
+ freqtable[cluster_id], cpu_opp_num);
+ freq_table[cluster_id] = freqtable[cluster_id];
+
+ kfree(cpu_freqs);
+ }
+ return ret;
+free_mem:
+ while (cluster_id >= 0)
+ kfree(freqtable[cluster_id--]);
+ kfree(cpu_freqs);
+ return ret;
+}
+
+/* Per-CPU initialization */
+static int vexpress_cpufreq_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+ uint32_t cur_cluster = get_current_cluster(policy->cpu);
+
+ if (atomic_inc_return(&freq_table_users) == 1)
+ result = vexpress_cpufreq_of_init();
+
+ if (result) {
+ atomic_dec_return(&freq_table_users);
+ pr_err("CPUFreq - CPU %d failed to initialize\n", policy->cpu);
+ return result;
+ }
+
+ result =
+ cpufreq_frequency_table_cpuinfo(policy, freq_table[cur_cluster]);
+ if (result)
+ return result;
+
+ cpufreq_frequency_table_get_attr(freq_table[cur_cluster], policy->cpu);
+
+ per_cpu(cpu_cur_cluster, policy->cpu) = cur_cluster;
+
+ /* set default policy and cpuinfo */
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+
+ policy->cpuinfo.transition_latency = 1000000; /* 1 ms assumed */
+ policy->cur = vexpress_cpufreq_get(policy->cpu);
+
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
+ cpumask_copy(policy->related_cpus, policy->cpus);
+
+ pr_info("CPUFreq for CPU %d initialized\n", policy->cpu);
+ return result;
+}
+
+/* Export freq_table to sysfs */
+static struct freq_attr *vexpress_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver vexpress_cpufreq_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = vexpress_cpufreq_verify_policy,
+ .target = vexpress_cpufreq_set_target,
+ .get = vexpress_cpufreq_get,
+ .init = vexpress_cpufreq_init,
+ .name = "cpufreq_vexpress",
+ .attr = vexpress_cpufreq_attr,
+};
+
+static int __init vexpress_cpufreq_modinit(void)
+{
+ return cpufreq_register_driver(&vexpress_cpufreq_driver);
+}
+
+static void __exit vexpress_cpufreq_modexit(void)
+{
+ cpufreq_unregister_driver(&vexpress_cpufreq_driver);
+}
+
+MODULE_DESCRIPTION("cpufreq driver for ARM vexpress big.LITTLE platform");
+MODULE_LICENSE("GPL");
+
+module_init(vexpress_cpufreq_modinit);
+module_exit(vexpress_cpufreq_modexit);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 0724b1f5aae..05e21981164 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -528,6 +528,9 @@ config WL127X_RFKILL
Creates an rfkill entry in sysfs for power control of Bluetooth
TI wl127x chips.
+config ARM_CCI
+ bool "ARM CCI driver support"
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -537,4 +540,5 @@ source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
source "drivers/misc/mei/Kconfig"
+source "drivers/misc/vexpress/Kconfig"
endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index ca02974e4c9..64e9e7044cc 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -54,4 +54,5 @@ obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_WL127X_RFKILL) += wl127x-rfkill.o
obj-$(CONFIG_SENSORS_AK8975) += akm8975.o
-
+obj-$(CONFIG_ARCH_VEXPRESS) += vexpress/
+obj-$(CONFIG_ARM_CCI) += arm-cci.o
diff --git a/drivers/misc/arm-cci.c b/drivers/misc/arm-cci.c
new file mode 100644
index 00000000000..96afe77b9fe
--- /dev/null
+++ b/drivers/misc/arm-cci.c
@@ -0,0 +1,130 @@
+/*
+ * CCI support
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/arm-cci.h>
+
+#define CCI400_EAG_OFFSET 0x4000
+#define CCI400_KF_OFFSET 0x5000
+
+#define DRIVER_NAME "CCI"
+struct cci_drvdata {
+ void __iomem *baseaddr;
+ spinlock_t lock;
+};
+
+static struct cci_drvdata *info;
+
+void disable_cci(int cluster)
+{
+ u32 cci_reg = cluster ? CCI400_KF_OFFSET : CCI400_EAG_OFFSET;
+ writel_relaxed(0x0, info->baseaddr + cci_reg);
+
+ while (readl_relaxed(info->baseaddr + 0xc) & 0x1)
+ ;
+}
+EXPORT_SYMBOL_GPL(disable_cci);
+
+static int __devinit cci_driver_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret = 0;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "unable to allocate mem\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -EINVAL;
+ goto mem_free;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "address 0x%x in use\n", (u32) res->start);
+ ret = -EBUSY;
+ goto mem_free;
+ }
+
+ info->baseaddr = ioremap(res->start, resource_size(res));
+ if (!info->baseaddr) {
+ ret = -ENXIO;
+ goto ioremap_err;
+ }
+
+ platform_set_drvdata(pdev, info);
+
+ pr_info("CCI loaded at %p\n", info->baseaddr);
+ return ret;
+
+ioremap_err:
+ release_region(res->start, resource_size(res));
+mem_free:
+ kfree(info);
+
+ return ret;
+}
+
+static int __devexit cci_driver_remove(struct platform_device *pdev)
+{
+ struct cci_drvdata *info;
+ struct resource *res = pdev->resource;
+
+ info = platform_get_drvdata(pdev);
+ iounmap(info->baseaddr);
+ release_region(res->start, resource_size(res));
+ kfree(info);
+
+ return 0;
+}
+
+static const struct of_device_id arm_cci_matches[] = {
+ {.compatible = "arm,cci"},
+ {},
+};
+
+static struct platform_driver cci_platform_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = arm_cci_matches,
+ },
+ .probe = cci_driver_probe,
+ .remove = cci_driver_remove,
+};
+
+static int __init cci_init(void)
+{
+ return platform_driver_register(&cci_platform_driver);
+}
+
+static void __exit cci_exit(void)
+{
+ platform_driver_unregister(&cci_platform_driver);
+}
+
+arch_initcall(cci_init);
+module_exit(cci_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("CCI support");
diff --git a/drivers/misc/vexpress/Kconfig b/drivers/misc/vexpress/Kconfig
new file mode 100644
index 00000000000..8fb52f640bf
--- /dev/null
+++ b/drivers/misc/vexpress/Kconfig
@@ -0,0 +1,2 @@
+config ARM_SPC
+ bool "ARM SPC driver support"
diff --git a/drivers/misc/vexpress/Makefile b/drivers/misc/vexpress/Makefile
new file mode 100644
index 00000000000..95b58166d0a
--- /dev/null
+++ b/drivers/misc/vexpress/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_ARM_SPC) += arm-spc.o
diff --git a/drivers/misc/vexpress/arm-spc.c b/drivers/misc/vexpress/arm-spc.c
new file mode 100644
index 00000000000..23cee9df576
--- /dev/null
+++ b/drivers/misc/vexpress/arm-spc.c
@@ -0,0 +1,371 @@
+/*
+ * Serial Power Controller (SPC) support
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Author(s): Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+ * Achin Gupta <achin.gupta@arm.com>
+ * Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vexpress.h>
+
+#define SNOOP_CTL_A15 0x404
+#define SNOOP_CTL_A7 0x504
+#define PERF_LVL_A15 0xB00
+#define PERF_REQ_A15 0xB04
+#define PERF_LVL_A7 0xB08
+#define PERF_REQ_A7 0xB0c
+#define COMMS 0xB10
+#define COMMS_REQ 0xB14
+#define PWC_STATUS 0xB18
+#define PWC_FLAG 0xB1c
+#define WAKE_INT_MASK 0xB24
+#define WAKE_INT_RAW 0xB28
+#define WAKE_INT_STAT 0xB2c
+#define A15_PWRDN_EN 0xB30
+#define A7_PWRDN_EN 0xB34
+#define A15_A7_ISOLATE 0xB38
+#define STANDBYWFI_STAT 0xB3c
+#define A15_CACTIVE 0xB40
+#define A15_PWRDNREQ 0xB44
+#define A15_PWRDNACK 0xB48
+#define A7_CACTIVE 0xB4c
+#define A7_PWRDNREQ 0xB50
+#define A7_PWRDNACK 0xB54
+#define A15_RESET_HOLD 0xB58
+#define A7_RESET_HOLD 0xB5c
+#define A15_RESET_STAT 0xB60
+#define A7_RESET_STAT 0xB64
+#define A15_CONF 0x400
+#define A7_CONF 0x500
+
+#define DRIVER_NAME "SPC"
+#define TIME_OUT 100
+
+struct vexpress_spc_drvdata {
+ void __iomem *baseaddr;
+ spinlock_t lock;
+};
+
+static struct vexpress_spc_drvdata *info;
+
+/* SCC virtual address */
+u32 vscc;
+
+static inline int read_wait_to(void __iomem *reg, int status, int timeout)
+{
+ while (timeout-- && readl(reg) == status) {
+ cpu_relax();
+ udelay(2);
+ }
+ if (!timeout)
+ return -EAGAIN;
+ else
+ return 0;
+}
+
+int vexpress_spc_get_performance(int cluster, int *perf)
+{
+ u32 perf_cfg_reg = 0;
+ u32 a15_clusid = 0;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return -ENXIO;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ perf_cfg_reg = cluster != a15_clusid ? PERF_LVL_A7 : PERF_LVL_A15;
+
+ spin_lock(&info->lock);
+ *perf = readl(info->baseaddr + perf_cfg_reg);
+ spin_unlock(&info->lock);
+
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_get_performance);
+
+int vexpress_spc_set_performance(int cluster, int perf)
+{
+ u32 perf_cfg_reg = 0;
+ u32 perf_stat_reg = 0;
+ u32 a15_clusid = 0;
+ int ret = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return -ENXIO;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ perf_cfg_reg = cluster != a15_clusid ? PERF_LVL_A7 : PERF_LVL_A15;
+ perf_stat_reg = cluster != a15_clusid ? PERF_REQ_A7 : PERF_REQ_A15;
+
+ if (perf < 0 || perf > 7)
+ return -EINVAL;
+
+ spin_lock(&info->lock);
+ writel(perf, info->baseaddr + perf_cfg_reg);
+ if (read_wait_to(info->baseaddr + perf_stat_reg, 1, TIME_OUT))
+ ret = -EAGAIN;
+ spin_unlock(&info->lock);
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_set_performance);
+
+void vexpress_spc_set_wake_intr(u32 mask)
+{
+ if (!IS_ERR_OR_NULL(info))
+ writel(mask & VEXPRESS_SPC_WAKE_INTR_MASK,
+ info->baseaddr + WAKE_INT_MASK);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_set_wake_intr);
+
+u32 vexpress_spc_get_wake_intr(int raw)
+{
+ u32 wake_intr_reg = raw ? WAKE_INT_RAW : WAKE_INT_STAT;
+
+ if (!IS_ERR_OR_NULL(info))
+ return readl(info->baseaddr + wake_intr_reg);
+ else
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_get_wake_intr);
+
+void vexpress_spc_powerdown_enable(int cluster, int enable)
+{
+ u32 pwdrn_reg = 0;
+ u32 a15_clusid = 0;
+
+ if (!IS_ERR_OR_NULL(info)) {
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ pwdrn_reg = cluster != a15_clusid ? A7_PWRDN_EN : A15_PWRDN_EN;
+ writel(!!enable, info->baseaddr + pwdrn_reg);
+ }
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_powerdown_enable);
+
+void vexpress_spc_adb400_pd_enable(int cluster, int enable)
+{
+ u32 pwdrn_reg = 0;
+ u32 a15_clusid = 0;
+ u32 val = enable ? 0xF : 0x0; /* all adb bridges ?? */
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ pwdrn_reg = cluster != a15_clusid ? A7_PWRDNREQ : A15_PWRDNREQ;
+
+ spin_lock(&info->lock);
+ writel(val, info->baseaddr + pwdrn_reg);
+ spin_unlock(&info->lock);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_adb400_pd_enable);
+
+void vexpress_scc_ctl_snoops(int cluster, int enable)
+{
+ u32 val;
+ u32 snoop_reg = 0;
+ u32 a15_clusid = 0;
+ u32 or = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ snoop_reg = cluster != a15_clusid ? SNOOP_CTL_A7 : SNOOP_CTL_A15;
+ or = cluster != a15_clusid ? 0x2000 : 0x180;
+
+ val = readl_relaxed(info->baseaddr + snoop_reg);
+ if (enable) {
+ or = ~or;
+ val &= or;
+ } else {
+ val |= or;
+ }
+ writel_relaxed(val, info->baseaddr + snoop_reg);
+}
+EXPORT_SYMBOL_GPL(vexpress_scc_ctl_snoops);
+
+void vexpress_spc_wfi_cpureset(int cluster, int cpu, int enable)
+{
+ u32 rsthold_reg, prst_shift;
+ u32 val;
+ u32 a15_clusid = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+
+ if (cluster != a15_clusid) {
+ rsthold_reg = A7_RESET_HOLD;
+ prst_shift = 3;
+ } else {
+ rsthold_reg = A15_RESET_HOLD;
+ prst_shift = 2;
+ }
+ val = readl_relaxed(info->baseaddr + rsthold_reg);
+ if (enable)
+ val |= (1 << cpu);
+ else
+ val &= ~(1 << cpu);
+ writel_relaxed(val, info->baseaddr + rsthold_reg);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_wfi_cpureset);
+
+void vexpress_spc_wfi_cluster_reset(int cluster, int enable)
+{
+ u32 rsthold_reg, shift;
+ u32 val;
+ u32 a15_clusid = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+
+ if (cluster != a15_clusid) {
+ rsthold_reg = A7_RESET_HOLD;
+ shift = 6;
+ } else {
+ rsthold_reg = A15_RESET_HOLD;
+ shift = 4;
+ }
+ spin_lock(&info->lock);
+ val = readl(info->baseaddr + rsthold_reg);
+ if (enable)
+ val |= 1 << shift;
+ else
+ val &= ~(1 << shift);
+ writel(val, info->baseaddr + rsthold_reg);
+ spin_unlock(&info->lock);
+ return;
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_wfi_cluster_reset);
+
+int vexpress_spc_wfi_cpustat(int cluster)
+{
+ u32 rststat_reg;
+ u32 val;
+ u32 a15_clusid = 0;
+
+ if (IS_ERR_OR_NULL(info))
+ return 0;
+
+ a15_clusid = readl_relaxed(info->baseaddr + A15_CONF) & 0xf;
+ rststat_reg = STANDBYWFI_STAT;
+
+ val = readl_relaxed(info->baseaddr + rststat_reg);
+ return cluster != a15_clusid ? ((val & 0x38) >> 3) : (val & 0x3);
+}
+EXPORT_SYMBOL_GPL(vexpress_spc_wfi_cpustat);
+
+
+static int __devinit vexpress_spc_driver_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int ret = 0;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "unable to allocate mem\n");
+ return -ENOMEM;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -EINVAL;
+ goto mem_free;
+ }
+
+ if (!request_mem_region(res->start, resource_size(res),
+ dev_name(&pdev->dev))) {
+ dev_err(&pdev->dev, "address 0x%x in use\n", (u32) res->start);
+ ret = -EBUSY;
+ goto mem_free;
+ }
+
+ info->baseaddr = ioremap(res->start, resource_size(res));
+ if (!info->baseaddr) {
+ ret = -ENXIO;
+ goto ioremap_err;
+ }
+ vscc = (u32) info->baseaddr;
+ spin_lock_init(&info->lock);
+ platform_set_drvdata(pdev, info);
+
+ pr_info("vexpress_spc loaded at %p\n", info->baseaddr);
+ return ret;
+
+ioremap_err:
+ release_region(res->start, resource_size(res));
+mem_free:
+ kfree(info);
+
+ return ret;
+}
+
+static int __devexit vexpress_spc_driver_remove(struct platform_device *pdev)
+{
+ struct vexpress_spc_drvdata *info;
+ struct resource *res = pdev->resource;
+
+ info = platform_get_drvdata(pdev);
+ iounmap(info->baseaddr);
+ release_region(res->start, resource_size(res));
+ kfree(info);
+
+ return 0;
+}
+
+static const struct of_device_id arm_vexpress_spc_matches[] = {
+ {.compatible = "arm,spc"},
+ {},
+};
+
+static struct platform_driver vexpress_spc_platform_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRIVER_NAME,
+ .of_match_table = arm_vexpress_spc_matches,
+ },
+ .probe = vexpress_spc_driver_probe,
+ .remove = vexpress_spc_driver_remove,
+};
+
+static int __init vexpress_spc_init(void)
+{
+ return platform_driver_register(&vexpress_spc_platform_driver);
+}
+
+static void __exit vexpress_spc_exit(void)
+{
+ platform_driver_unregister(&vexpress_spc_platform_driver);
+}
+
+arch_initcall(vexpress_spc_init);
+module_exit(vexpress_spc_exit);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Serial Power Controller (SPC) support");
diff --git a/include/linux/arm-cci.h b/include/linux/arm-cci.h
new file mode 100644
index 00000000000..ce3f705fb6d
--- /dev/null
+++ b/include/linux/arm-cci.h
@@ -0,0 +1,30 @@
+/*
+ * CCI support
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __LINUX_ARM_CCI_H
+#define __LINUX_ARM_CCI_H
+
+#ifdef CONFIG_ARM_CCI
+extern void disable_cci(int cluster);
+#else
+static inline void disable_cci(int cluster) { }
+#endif
+
+#endif
diff --git a/include/linux/vexpress.h b/include/linux/vexpress.h
new file mode 100644
index 00000000000..61f5651103c
--- /dev/null
+++ b/include/linux/vexpress.h
@@ -0,0 +1,50 @@
+/*
+ * include/linux/vexpress.h
+ *
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ *
+ * Versatile Express register definitions and exported functions
+ */
+
+#define VEXPRESS_SPC_WAKE_INTR_IRQ(cluster, cpu) \
+ (1 << (4 * (cluster) + (cpu)))
+#define VEXPRESS_SPC_WAKE_INTR_FIQ(cluster, cpu) \
+ (1 << (7 * (cluster) + (cpu)))
+#define VEXPRESS_SPC_WAKE_INTR_SWDOG (1 << 10)
+#define VEXPRESS_SPC_WAKE_INTR_GTIMER (1 << 11)
+#define VEXPRESS_SPC_WAKE_INTR_MASK 0xFFF
+
+#ifdef CONFIG_ARM_SPC
+extern int vexpress_spc_get_performance(int cluster, int *perf);
+extern int vexpress_spc_set_performance(int cluster, int perf);
+extern void vexpress_spc_set_wake_intr(u32 mask);
+extern u32 vexpress_spc_get_wake_intr(int raw);
+extern void vexpress_spc_powerdown_enable(int cluster, int enable);
+extern void vexpress_spc_adb400_pd_enable(int cluster, int enable);
+extern void vexpress_spc_wfi_cpureset(int cluster, int cpu, int enable);
+extern int vexpress_spc_wfi_cpustat(int cluster);
+extern void vexpress_spc_wfi_cluster_reset(int cluster, int enable);
+extern void vexpress_scc_ctl_snoops(int cluster, int enable);
+#else
+static inline int vexpress_spc_get_performance(int cluster, int *perf)
+{
+ return -EINVAL;
+}
+static inline int vexpress_spc_set_performance(int cluster, int perf)
+{
+ return -EINVAL;
+}
+static inline void vexpress_spc_set_wake_intr(u32 mask) { }
+static inline u32 vexpress_spc_get_wake_intr(int raw) { return 0; }
+static inline void vexpress_spc_powerdown_enable(int cluster, int enable) { }
+static inline void vexpress_spc_adb400_pd_enable(int cluster, int enable) { }
+static inline void vexpress_spc_wfi_cpureset(int cluster, int cpu, int enable)
+{ }
+static inline int vexpress_spc_wfi_cpustat(int cluster) { return 0; }
+static inline void vexpress_spc_wfi_cluster_reset(int cluster, int enable) { }
+static inline void vexpress_scc_ctl_snoops(int cluster, int enable) { }
+#endif