aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArve Hjønnevåg <arve@android.com>2010-05-19 15:12:34 -0700
committerArve Hjønnevåg <arve@android.com>2010-05-19 15:12:34 -0700
commit3890f5b9f107268794c1959460dea089f54090a8 (patch)
tree49d3d3ef160ca99ca4c2024343390e0c7a8b235b
parent23d8bfab1cd85eac4e4e2c1fc3f8961e647a2713 (diff)
parentd6a48f4e82910a05236435b3439249bc92fe0f2d (diff)
downloadexperimental-android-msm-2.6.34-pmtest.tar.gz
Merge ../kernel-next into android-msm-2.6.34-pmtestandroid-msm-2.6.34-pmtest
-rw-r--r--Documentation/ioctl/ioctl-number.txt3
-rw-r--r--Documentation/power/early-suspend.txt26
-rw-r--r--Documentation/power/opportunistic-suspend.txt174
-rw-r--r--drivers/input/evdev.c29
-rw-r--r--drivers/leds/Kconfig6
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/ledtrig-sleep.c80
-rw-r--r--drivers/power/power_supply_core.c9
-rw-r--r--drivers/rtc/Kconfig18
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/alarm-dev.c286
-rw-r--r--drivers/rtc/alarm.c588
-rw-r--r--include/linux/android_alarm.h106
-rwxr-xr-xinclude/linux/earlysuspend.h55
-rw-r--r--include/linux/input.h3
-rw-r--r--include/linux/power_supply.h3
-rw-r--r--include/linux/suspend.h1
-rwxr-xr-xinclude/linux/suspend_blocker.h173
-rw-r--r--include/linux/suspend_ioctls.h5
-rwxr-xr-xinclude/linux/wakelock.h140
-rw-r--r--kernel/power/Kconfig79
-rw-r--r--kernel/power/Makefile7
-rw-r--r--kernel/power/consoleearlysuspend.c78
-rw-r--r--kernel/power/earlysuspend.c175
-rw-r--r--kernel/power/fbearlysuspend.c153
-rw-r--r--kernel/power/main.c144
-rw-r--r--kernel/power/opportunistic_suspend.c810
-rw-r--r--kernel/power/power.h29
-rw-r--r--kernel/power/process.c11
-rw-r--r--kernel/power/suspend.c5
-rw-r--r--kernel/power/user_suspend_blocker.c168
-rw-r--r--kernel/power/userwakelock.c219
-rw-r--r--kernel/power/wakelocktest.c239
33 files changed, 3810 insertions, 15 deletions
diff --git a/Documentation/ioctl/ioctl-number.txt b/Documentation/ioctl/ioctl-number.txt
index dd5806f4fcc..e2458f73e3e 100644
--- a/Documentation/ioctl/ioctl-number.txt
+++ b/Documentation/ioctl/ioctl-number.txt
@@ -254,7 +254,8 @@ Code Seq#(hex) Include File Comments
'q' 80-FF linux/telephony.h Internet PhoneJACK, Internet LineJACK
linux/ixjuser.h <http://www.quicknet.net>
'r' 00-1F linux/msdos_fs.h and fs/fat/dir.c
-'s' all linux/cdk.h
+'s' all linux/cdk.h conflict!
+'s' all linux/suspend_block_dev.h conflict!
't' 00-7F linux/if_ppp.h
't' 80-8F linux/isdn_ppp.h
't' 90 linux/toshiba.h
diff --git a/Documentation/power/early-suspend.txt b/Documentation/power/early-suspend.txt
new file mode 100644
index 00000000000..8286d3a3d26
--- /dev/null
+++ b/Documentation/power/early-suspend.txt
@@ -0,0 +1,26 @@
+Early-suspend
+=============
+
+The early-suspend api allows drivers to get notified when user-space writes to
+/sys/power/request_state to indicate that the user visible sleep state should
+change. A level controls what order the handlers are called in. Suspend
+handlers are called in low to high level order, resume handlers are called in
+the opposite order.
+
+Four levels are defined:
+EARLY_SUSPEND_LEVEL_BLANK_SCREEN:
+ On suspend the screen should be turned off but the framebuffer must still be
+ accessible. On resume the screen can be turned back on.
+
+EARLY_SUSPEND_LEVEL_STOP_DRAWING:
+ On suspend this level notifies user-space that it should stop accessing the
+ framebuffer and it waits for it to complete. On resume it notifies user-space
+ that it should resume screen access.
+ Two methods are provided, console switch or a sysfs interface.
+
+EARLY_SUSPEND_LEVEL_DISABLE_FB:
+ Turn off the framebuffer on suspend and back on on resume.
+
+EARLY_SUSPEND_LEVEL_STOP_INPUT:
+ On suspend turn off input devices that are not capable of wakeup or where
+ wakeup is disabled. On resume turn the same devices back on.
diff --git a/Documentation/power/opportunistic-suspend.txt b/Documentation/power/opportunistic-suspend.txt
new file mode 100644
index 00000000000..f403a4f02f1
--- /dev/null
+++ b/Documentation/power/opportunistic-suspend.txt
@@ -0,0 +1,174 @@
+Opportunistic Suspend
+=====================
+
+Opportunistic suspend is a feature allowing the system to be suspended (ie. put
+into one of the available sleep states) automatically whenever it is regarded
+as idle. The suspend blockers framework described below is used to determine
+when that happens.
+
+The /sys/power/policy sysfs attribute is used to switch the system between the
+opportunistic and "forced" suspend behavior, where in the latter case the
+system is only suspended if a specific value, corresponding to one of the
+available system sleep states, is written into /sys/power/state. However, in
+the former, opportunistic, case the system is put into the sleep state
+corresponding to the value written to /sys/power/state whenever there are no
+active suspend blockers. The default policy is "forced". Also, suspend blockers
+do not affect sleep states entered from idle.
+
+When the policy is "opportunisic", there is a special value, "on", that can be
+written to /sys/power/state. This will block the automatic sleep request, as if
+a suspend blocker was used by a device driver. This way the opportunistic
+suspend may be blocked by user space whithout switching back to the "forced"
+mode.
+
+A suspend blocker is an object used to inform the PM subsystem when the system
+can or cannot be suspended in the "opportunistic" mode (the "forced" mode
+ignores suspend blockers). To use it, a device driver creates a struct
+suspend_blocker that must be initialized with suspend_blocker_init(). Before
+freeing the suspend_blocker structure or its name, suspend_blocker_unregister()
+must be called on it.
+
+A suspend blocker is activated using suspend_block(), which prevents the PM
+subsystem from putting the system into the requested sleep state in the
+"opportunistic" mode until the suspend blocker is deactivated with
+suspend_unblock(). Multiple suspend blockers may be active simultaneously, and
+the system will not suspend as long as at least one of them is active.
+
+If opportunistic suspend is already in progress when suspend_block() is called,
+it will abort the suspend, unless suspend_ops->enter has already been
+executed. If suspend is aborted this way, the system is usually not fully
+operational at that point. The suspend callbacks of some drivers may still be
+running and it usually takes time to restore the system to the fully operational
+state.
+
+Here's an example showing how a cell phone or other embedded system can handle
+keystrokes (or other input events) in the presence of suspend blockers. Use
+set_irq_wake or a platform specific API to make sure the keypad interrupt wakes
+up the cpu. Once the keypad driver has resumed, the sequence of events can look
+like this:
+
+- The Keypad driver gets an interrupt. It then calls suspend_block on the
+ keypad-scan suspend_blocker and starts scanning the keypad matrix.
+- The keypad-scan code detects a key change and reports it to the input-event
+ driver.
+- The input-event driver sees the key change, enqueues an event, and calls
+ suspend_block on the input-event-queue suspend_blocker.
+- The keypad-scan code detects that no keys are held and calls suspend_unblock
+ on the keypad-scan suspend_blocker.
+- The user-space input-event thread returns from select/poll, calls
+ suspend_block on the process-input-events suspend_blocker and then calls read
+ on the input-event device.
+- The input-event driver dequeues the key-event and, since the queue is now
+ empty, it calls suspend_unblock on the input-event-queue suspend_blocker.
+- The user-space input-event thread returns from read. If it determines that
+ the key should be ignored, it calls suspend_unblock on the
+ process_input_events suspend_blocker and then calls select or poll. The
+ system will automatically suspend again, since now no suspend blockers are
+ active.
+
+If the key that was pressed instead should preform a simple action (for example,
+adjusting the volume), this action can be performed right before calling
+suspend_unblock on the process_input_events suspend_blocker. However, if the key
+triggers a longer-running action, that action needs its own suspend_blocker and
+suspend_block must be called on that suspend blocker before calling
+suspend_unblock on the process_input_events suspend_blocker.
+
+ Key pressed Key released
+ | |
+keypad-scan ++++++++++++++++++
+input-event-queue +++ +++
+process-input-events +++ +++
+
+
+Driver API
+==========
+
+A driver can use the suspend block API by adding a suspend_blocker variable to
+its state and calling suspend_blocker_init(). For instance:
+
+struct state {
+ struct suspend_blocker suspend_blocker;
+}
+
+init() {
+ suspend_blocker_init(&state->suspend_blocker, name);
+}
+
+If the suspend_blocker variable is allocated statically,
+DEFINE_SUSPEND_BLOCKER() should be used to initialize it, for example:
+
+static DEFINE_SUSPEND_BLOCKER(blocker, name);
+
+and suspend_blocker_register(&blocker) has to be called to make the suspend
+blocker usable.
+
+Before freeing the memory in which a suspend_blocker variable is located,
+suspend_blocker_unregister() must be called, for instance:
+
+uninit() {
+ suspend_blocker_unregister(&state->suspend_blocker);
+}
+
+When the driver determines that it needs to run (usually in an interrupt
+handler) it calls suspend_block():
+
+ suspend_block(&state->suspend_blocker);
+
+When it no longer needs to run it calls suspend_unblock():
+
+ suspend_unblock(&state->suspend_blocker);
+
+Calling suspend_block() when the suspend blocker is active or suspend_unblock()
+when it is not active has no effect (i.e., these functions don't nest). This
+allows drivers to update their state and call suspend suspend_block() or
+suspend_unblock() based on the result. For instance:
+
+if (list_empty(&state->pending_work))
+ suspend_unblock(&state->suspend_blocker);
+else
+ suspend_block(&state->suspend_blocker);
+
+A driver can also call suspend_block_timeout() to release the suspend_blocker
+after a delay:
+
+ suspend_block_timeout(&state->suspend_blocker, HZ);
+
+This works whether the suspend_blocker is already active or not. It is useful if
+the driver woke up other parts of the system that do not use suspend_blockers
+but still need to run. Avoid this when possible, since it will waste power
+if the timeout is long or may fail to finish needed work if the timeout is
+short. Calling suspend_block or suspend_unblock will cancel the timeout.
+
+User space API
+==============
+
+To create a suspend blocker from user space, open the suspend_blocker special
+device file:
+
+ fd = open("/dev/suspend_blocker", O_RDWR | O_CLOEXEC);
+
+then optionally call:
+
+ ioctl(fd, SUSPEND_BLOCKER_IOCTL_SET_NAME(strlen(name)), name);
+
+To activate the suspend blocker call:
+
+ ioctl(fd, SUSPEND_BLOCKER_IOCTL_BLOCK);
+or
+ ioctl(fd, SUSPEND_BLOCKER_IOCTL_BLOCK_TIMEOUT, &timespec_timeout);
+
+To deactivate it call:
+
+ ioctl(fd, SUSPEND_BLOCKER_IOCTL_UNBLOCK);
+
+To destroy the suspend blocker, close the device:
+
+ close(fd);
+
+If the first ioctl called is not SUSPEND_BLOCKER_IOCTL_SET_NAME the suspend
+blocker will get the default name "(userspace)".
+
+A module parameter, unclean_exit_grace_period, can be set to allow servers
+some time to restart if they crash with an active suspend_blocker. If the
+process dies or the device is closed while the suspend_blocker is active, a
+suspend_blocker will be held for the number of seconds specified.
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 7b9ac426e28..3a9a1702544 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -20,6 +20,7 @@
#include <linux/input.h>
#include <linux/major.h>
#include <linux/device.h>
+#include <linux/suspend.h>
#include "input-compat.h"
struct evdev {
@@ -43,6 +44,8 @@ struct evdev_client {
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
+ struct suspend_blocker suspend_blocker;
+ bool use_suspend_blocker;
};
static struct evdev *evdev_table[EVDEV_MINORS];
@@ -55,6 +58,8 @@ static void evdev_pass_event(struct evdev_client *client,
* Interrupts are disabled, just acquire the lock
*/
spin_lock(&client->buffer_lock);
+ if (client->use_suspend_blocker)
+ suspend_block_timeout(&client->suspend_blocker, 5 * HZ);
client->buffer[client->head++] = *event;
client->head &= EVDEV_BUFFER_SIZE - 1;
spin_unlock(&client->buffer_lock);
@@ -63,6 +68,18 @@ static void evdev_pass_event(struct evdev_client *client,
kill_fasync(&client->fasync, SIGIO, POLL_IN);
}
+static int evdev_set_suspend_block(struct evdev_client *client, bool enable)
+{
+ spin_lock_irq(&client->buffer_lock);
+ if (!client->use_suspend_blocker && enable)
+ suspend_blocker_init(&client->suspend_blocker, "evdev");
+ else if (client->use_suspend_blocker && !enable)
+ suspend_blocker_unregister(&client->suspend_blocker);
+ client->use_suspend_blocker = enable;
+ spin_unlock_irq(&client->buffer_lock);
+ return 0;
+}
+
/*
* Pass incoming event to all connected clients.
*/
@@ -237,6 +254,8 @@ static int evdev_release(struct inode *inode, struct file *file)
mutex_unlock(&evdev->mutex);
evdev_detach_client(evdev, client);
+ if (client->use_suspend_blocker)
+ suspend_blocker_unregister(&client->suspend_blocker);
kfree(client);
evdev_close_device(evdev);
@@ -283,6 +302,8 @@ static int evdev_open(struct inode *inode, struct file *file)
file->private_data = client;
nonseekable_open(inode, file);
+ /* hack: android user space not yet call the ioctl */
+ evdev_set_suspend_block(client, true);
return 0;
err_free_client:
@@ -338,6 +359,8 @@ static int evdev_fetch_next_event(struct evdev_client *client,
if (have_event) {
*event = client->buffer[client->tail++];
client->tail &= EVDEV_BUFFER_SIZE - 1;
+ if (client->use_suspend_blocker && client->head == client->tail)
+ suspend_unblock(&client->suspend_blocker);
}
spin_unlock_irq(&client->buffer_lock);
@@ -588,6 +611,12 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
else
return evdev_ungrab(evdev, client);
+ case EVIOCGSUSPENDBLOCK:
+ return put_user(client->use_suspend_blocker, ip);
+
+ case EVIOCSSUSPENDBLOCK:
+ return evdev_set_suspend_block(client, !!p);
+
default:
if (_IOC_TYPE(cmd) != 'E')
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index beb1c60e56d..cddaf8e9333 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -354,6 +354,12 @@ config LEDS_TRIGGER_DEFAULT_ON
This allows LEDs to be initialised in the ON state.
If unsure, say Y.
+config LEDS_TRIGGER_SLEEP
+ tristate "LED Sleep Mode Trigger"
+ depends on LEDS_TRIGGERS && EARLYSUSPEND
+ help
+ This turns LEDs on when the screen is off but the cpu still running.
+
comment "iptables trigger is under Netfilter config (LED target)"
depends on LEDS_TRIGGERS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index d33b2594b38..f217190e820 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -47,3 +47,4 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
+obj-$(CONFIG_LEDS_TRIGGER_SLEEP) += ledtrig-sleep.o
diff --git a/drivers/leds/ledtrig-sleep.c b/drivers/leds/ledtrig-sleep.c
new file mode 100644
index 00000000000..f1640421215
--- /dev/null
+++ b/drivers/leds/ledtrig-sleep.c
@@ -0,0 +1,80 @@
+/* drivers/leds/ledtrig-sleep.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/leds.h>
+#include <linux/suspend.h>
+
+static int ledtrig_sleep_pm_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *ignored);
+
+DEFINE_LED_TRIGGER(ledtrig_sleep)
+static struct notifier_block ledtrig_sleep_pm_notifier = {
+ .notifier_call = ledtrig_sleep_pm_callback,
+ .priority = 0,
+};
+
+static void ledtrig_sleep_early_suspend(struct early_suspend *h)
+{
+ led_trigger_event(ledtrig_sleep, LED_FULL);
+}
+
+static void ledtrig_sleep_early_resume(struct early_suspend *h)
+{
+ led_trigger_event(ledtrig_sleep, LED_OFF);
+}
+
+static struct early_suspend ledtrig_sleep_early_suspend_handler = {
+ .suspend = ledtrig_sleep_early_suspend,
+ .resume = ledtrig_sleep_early_resume,
+};
+
+static int ledtrig_sleep_pm_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *ignored)
+{
+ switch (action) {
+ case PM_HIBERNATION_PREPARE:
+ case PM_SUSPEND_PREPARE:
+ led_trigger_event(ledtrig_sleep, LED_OFF);
+ return NOTIFY_OK;
+ case PM_POST_HIBERNATION:
+ case PM_POST_SUSPEND:
+ led_trigger_event(ledtrig_sleep, LED_FULL);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int __init ledtrig_sleep_init(void)
+{
+ led_trigger_register_simple("sleep", &ledtrig_sleep);
+ register_pm_notifier(&ledtrig_sleep_pm_notifier);
+ register_early_suspend(&ledtrig_sleep_early_suspend_handler);
+ return 0;
+}
+
+static void __exit ledtrig_sleep_exit(void)
+{
+ unregister_early_suspend(&ledtrig_sleep_early_suspend_handler);
+ unregister_pm_notifier(&ledtrig_sleep_pm_notifier);
+ led_trigger_unregister_simple(ledtrig_sleep);
+}
+
+module_init(ledtrig_sleep_init);
+module_exit(ledtrig_sleep_exit);
+
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index cce75b40b43..577a131b2e5 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -39,7 +39,7 @@ static int __power_supply_changed_work(struct device *dev, void *data)
static void power_supply_changed_work(struct work_struct *work)
{
struct power_supply *psy = container_of(work, struct power_supply,
- changed_work);
+ changed_work.work);
dev_dbg(psy->dev, "%s\n", __func__);
@@ -55,7 +55,7 @@ void power_supply_changed(struct power_supply *psy)
{
dev_dbg(psy->dev, "%s\n", __func__);
- schedule_work(&psy->changed_work);
+ schedule_suspend_blocking_work(&psy->changed_work);
}
EXPORT_SYMBOL_GPL(power_supply_changed);
@@ -155,7 +155,8 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
goto dev_create_failed;
}
- INIT_WORK(&psy->changed_work, power_supply_changed_work);
+ suspend_blocking_work_init(&psy->changed_work,
+ power_supply_changed_work, "power-supply");
rc = power_supply_create_attrs(psy);
if (rc)
@@ -172,6 +173,7 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
create_triggers_failed:
power_supply_remove_attrs(psy);
create_attrs_failed:
+ suspend_blocking_work_destroy(&psy->changed_work);
device_unregister(psy->dev);
dev_create_failed:
success:
@@ -184,6 +186,7 @@ void power_supply_unregister(struct power_supply *psy)
flush_scheduled_work();
power_supply_remove_triggers(psy);
power_supply_remove_attrs(psy);
+ suspend_blocking_work_destroy(&psy->changed_work);
device_unregister(psy->dev);
}
EXPORT_SYMBOL_GPL(power_supply_unregister);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 34807fbfc4c..0ae9cca755a 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -109,6 +109,24 @@ config RTC_INTF_DEV_UIE_EMUL
clock several times per second, please enable this option
only if you know that you really need it.
+config RTC_INTF_ALARM
+ bool "Android alarm driver"
+ depends on RTC_CLASS
+ default y
+ help
+ Provides non-wakeup and rtc backed wakeup alarms based on rtc or
+ elapsed realtime, and a non-wakeup alarm on the monotonic clock.
+ Also provides an interface to set the wall time which must be used
+ for elapsed realtime to work.
+
+config RTC_INTF_ALARM_DEV
+ bool "Android alarm device"
+ depends on RTC_INTF_ALARM
+ default y
+ help
+ Exports the alarm interface to user-space.
+
+
config RTC_DRV_TEST
tristate "Test driver/device"
help
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 18fe31d4e54..715994bb08d 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -11,6 +11,8 @@ obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o
obj-$(CONFIG_RTC_CLASS) += rtc-core.o
rtc-core-y := class.o interface.o
+obj-$(CONFIG_RTC_INTF_ALARM) += alarm.o
+obj-$(CONFIG_RTC_INTF_ALARM_DEV) += alarm-dev.o
rtc-core-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o
rtc-core-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o
rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o
diff --git a/drivers/rtc/alarm-dev.c b/drivers/rtc/alarm-dev.c
new file mode 100644
index 00000000000..20d41ec4623
--- /dev/null
+++ b/drivers/rtc/alarm-dev.c
@@ -0,0 +1,286 @@
+/* drivers/rtc/alarm-dev.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/mach/time.h>
+#include <linux/android_alarm.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/uaccess.h>
+#include <linux/suspend.h>
+
+#define ANDROID_ALARM_PRINT_INFO (1U << 0)
+#define ANDROID_ALARM_PRINT_IO (1U << 1)
+#define ANDROID_ALARM_PRINT_INT (1U << 2)
+
+static int debug_mask = ANDROID_ALARM_PRINT_INFO;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+ do { \
+ if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+ pr_info(args); \
+ } \
+ } while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+ ANDROID_ALARM_RTC_WAKEUP_MASK | \
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
+
+static int alarm_opened;
+static DEFINE_SPINLOCK(alarm_slock);
+static struct suspend_blocker alarm_suspend_blocker;
+static DECLARE_WAIT_QUEUE_HEAD(alarm_wait_queue);
+static uint32_t alarm_pending;
+static uint32_t alarm_enabled;
+static uint32_t wait_pending;
+
+static struct alarm alarms[ANDROID_ALARM_TYPE_COUNT];
+
+static long alarm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int rv = 0;
+ unsigned long flags;
+ struct timespec new_alarm_time;
+ struct timespec new_rtc_time;
+ struct timespec tmp_time;
+ enum android_alarm_type alarm_type = ANDROID_ALARM_IOCTL_TO_TYPE(cmd);
+ uint32_t alarm_type_mask = 1U << alarm_type;
+
+ if (alarm_type >= ANDROID_ALARM_TYPE_COUNT)
+ return -EINVAL;
+
+ if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_GET_TIME(0)) {
+ if ((file->f_flags & O_ACCMODE) == O_RDONLY)
+ return -EPERM;
+ if (file->private_data == NULL &&
+ cmd != ANDROID_ALARM_SET_RTC) {
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (alarm_opened) {
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return -EBUSY;
+ }
+ alarm_opened = 1;
+ file->private_data = (void *)1;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ }
+
+ switch (ANDROID_ALARM_BASE_CMD(cmd)) {
+ case ANDROID_ALARM_CLEAR(0):
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm %d clear\n", alarm_type);
+ alarm_try_to_cancel(&alarms[alarm_type]);
+ if (alarm_pending) {
+ alarm_pending &= ~alarm_type_mask;
+ if (!alarm_pending && !wait_pending)
+ suspend_unblock(&alarm_suspend_blocker);
+ }
+ alarm_enabled &= ~alarm_type_mask;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ break;
+
+ case ANDROID_ALARM_SET_OLD:
+ case ANDROID_ALARM_SET_AND_WAIT_OLD:
+ if (get_user(new_alarm_time.tv_sec, (int __user *)arg)) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ new_alarm_time.tv_nsec = 0;
+ goto from_old_alarm_set;
+
+ case ANDROID_ALARM_SET_AND_WAIT(0):
+ case ANDROID_ALARM_SET(0):
+ if (copy_from_user(&new_alarm_time, (void __user *)arg,
+ sizeof(new_alarm_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+from_old_alarm_set:
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm %d set %ld.%09ld\n", alarm_type,
+ new_alarm_time.tv_sec, new_alarm_time.tv_nsec);
+ alarm_enabled |= alarm_type_mask;
+ alarm_start_range(&alarms[alarm_type],
+ timespec_to_ktime(new_alarm_time),
+ timespec_to_ktime(new_alarm_time));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (ANDROID_ALARM_BASE_CMD(cmd) != ANDROID_ALARM_SET_AND_WAIT(0)
+ && cmd != ANDROID_ALARM_SET_AND_WAIT_OLD)
+ break;
+ /* fall though */
+ case ANDROID_ALARM_WAIT:
+ spin_lock_irqsave(&alarm_slock, flags);
+ pr_alarm(IO, "alarm wait\n");
+ if (!alarm_pending && wait_pending) {
+ suspend_unblock(&alarm_suspend_blocker);
+ wait_pending = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ rv = wait_event_interruptible(alarm_wait_queue, alarm_pending);
+ if (rv)
+ goto err1;
+ spin_lock_irqsave(&alarm_slock, flags);
+ rv = alarm_pending;
+ wait_pending = 1;
+ alarm_pending = 0;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ break;
+ case ANDROID_ALARM_SET_RTC:
+ if (copy_from_user(&new_rtc_time, (void __user *)arg,
+ sizeof(new_rtc_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ rv = alarm_set_rtc(new_rtc_time);
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm_pending |= ANDROID_ALARM_TIME_CHANGE_MASK;
+ wake_up(&alarm_wait_queue);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (rv < 0)
+ goto err1;
+ break;
+ case ANDROID_ALARM_GET_TIME(0):
+ switch (alarm_type) {
+ case ANDROID_ALARM_RTC_WAKEUP:
+ case ANDROID_ALARM_RTC:
+ getnstimeofday(&tmp_time);
+ break;
+ case ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP:
+ case ANDROID_ALARM_ELAPSED_REALTIME:
+ tmp_time =
+ ktime_to_timespec(alarm_get_elapsed_realtime());
+ break;
+ case ANDROID_ALARM_TYPE_COUNT:
+ case ANDROID_ALARM_SYSTEMTIME:
+ ktime_get_ts(&tmp_time);
+ break;
+ }
+ if (copy_to_user((void __user *)arg, &tmp_time,
+ sizeof(tmp_time))) {
+ rv = -EFAULT;
+ goto err1;
+ }
+ break;
+
+ default:
+ rv = -EINVAL;
+ goto err1;
+ }
+err1:
+ return rv;
+}
+
+static int alarm_open(struct inode *inode, struct file *file)
+{
+ file->private_data = NULL;
+ return 0;
+}
+
+static int alarm_release(struct inode *inode, struct file *file)
+{
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (file->private_data != 0) {
+ for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
+ uint32_t alarm_type_mask = 1U << i;
+ if (alarm_enabled & alarm_type_mask) {
+ pr_alarm(INFO, "alarm_release: clear alarm, "
+ "pending %d\n",
+ !!(alarm_pending & alarm_type_mask));
+ alarm_enabled &= ~alarm_type_mask;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ alarm_cancel(&alarms[i]);
+ spin_lock_irqsave(&alarm_slock, flags);
+ }
+ if (alarm_pending | wait_pending) {
+ if (alarm_pending)
+ pr_alarm(INFO, "alarm_release: clear "
+ "pending alarms %x\n", alarm_pending);
+ suspend_unblock(&alarm_suspend_blocker);
+ wait_pending = 0;
+ alarm_pending = 0;
+ }
+ alarm_opened = 0;
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return 0;
+}
+
+static void alarm_triggered(struct alarm *alarm)
+{
+ unsigned long flags;
+ uint32_t alarm_type_mask = 1U << alarm->type;
+
+ pr_alarm(INT, "alarm_triggered type %d\n", alarm->type);
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (alarm_enabled & alarm_type_mask) {
+ suspend_block_timeout(&alarm_suspend_blocker, 5 * HZ);
+ alarm_enabled &= ~alarm_type_mask;
+ alarm_pending |= alarm_type_mask;
+ wake_up(&alarm_wait_queue);
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+static const struct file_operations alarm_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = alarm_ioctl,
+ .open = alarm_open,
+ .release = alarm_release,
+};
+
+static struct miscdevice alarm_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "alarm",
+ .fops = &alarm_fops,
+};
+
+static int __init alarm_dev_init(void)
+{
+ int err;
+ int i;
+
+ err = misc_register(&alarm_device);
+ if (err)
+ return err;
+
+ for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++)
+ alarm_init(&alarms[i], i, alarm_triggered);
+ suspend_blocker_init(&alarm_suspend_blocker, "alarm");
+
+ return 0;
+}
+
+static void __exit alarm_dev_exit(void)
+{
+ misc_deregister(&alarm_device);
+ suspend_blocker_unregister(&alarm_suspend_blocker);
+}
+
+module_init(alarm_dev_init);
+module_exit(alarm_dev_exit);
+
diff --git a/drivers/rtc/alarm.c b/drivers/rtc/alarm.c
new file mode 100644
index 00000000000..272e8909f3b
--- /dev/null
+++ b/drivers/rtc/alarm.c
@@ -0,0 +1,588 @@
+/* drivers/rtc/alarm.c
+ *
+ * Copyright (C) 2007-2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/mach/time.h>
+#include <linux/android_alarm.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/sysdev.h>
+#include <linux/suspend.h>
+
+#define ANDROID_ALARM_PRINT_ERROR (1U << 0)
+#define ANDROID_ALARM_PRINT_INIT_STATUS (1U << 1)
+#define ANDROID_ALARM_PRINT_TSET (1U << 2)
+#define ANDROID_ALARM_PRINT_CALL (1U << 3)
+#define ANDROID_ALARM_PRINT_SUSPEND (1U << 4)
+#define ANDROID_ALARM_PRINT_INT (1U << 5)
+#define ANDROID_ALARM_PRINT_FLOW (1U << 6)
+
+static int debug_mask = ANDROID_ALARM_PRINT_ERROR | \
+ ANDROID_ALARM_PRINT_INIT_STATUS;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define pr_alarm(debug_level_mask, args...) \
+ do { \
+ if (debug_mask & ANDROID_ALARM_PRINT_##debug_level_mask) { \
+ pr_info(args); \
+ } \
+ } while (0)
+
+#define ANDROID_ALARM_WAKEUP_MASK ( \
+ ANDROID_ALARM_RTC_WAKEUP_MASK | \
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK)
+
+/* support old usespace code */
+#define ANDROID_ALARM_SET_OLD _IOW('a', 2, time_t) /* set alarm */
+#define ANDROID_ALARM_SET_AND_WAIT_OLD _IOW('a', 3, time_t)
+
+struct alarm_queue {
+ struct rb_root alarms;
+ struct rb_node *first;
+ struct hrtimer timer;
+ ktime_t delta;
+ bool stopped;
+ ktime_t stopped_time;
+};
+
+static struct rtc_device *alarm_rtc_dev;
+static DEFINE_SPINLOCK(alarm_slock);
+static DEFINE_MUTEX(alarm_setrtc_mutex);
+static struct suspend_blocker alarm_rtc_suspend_blocker;
+static struct platform_device *alarm_platform_dev;
+struct alarm_queue alarms[ANDROID_ALARM_TYPE_COUNT];
+static bool suspended;
+
+static void update_timer_locked(struct alarm_queue *base, bool head_removed)
+{
+ struct alarm *alarm;
+ bool is_wakeup = base == &alarms[ANDROID_ALARM_RTC_WAKEUP] ||
+ base == &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+
+ if (base->stopped) {
+ pr_alarm(FLOW, "changed alarm while setting the wall time\n");
+ return;
+ }
+
+ if (is_wakeup && !suspended && head_removed)
+ suspend_unblock(&alarm_rtc_suspend_blocker);
+
+ if (!base->first)
+ return;
+
+ alarm = container_of(base->first, struct alarm, node);
+
+ pr_alarm(FLOW, "selected alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+ if (is_wakeup && suspended) {
+ pr_alarm(FLOW, "changed alarm while suspened\n");
+ suspend_block_timeout(&alarm_rtc_suspend_blocker, 1 * HZ);
+ return;
+ }
+
+ hrtimer_try_to_cancel(&base->timer);
+ base->timer._expires = ktime_add(base->delta, alarm->expires);
+ base->timer._softexpires = ktime_add(base->delta, alarm->softexpires);
+ hrtimer_start_expires(&base->timer, HRTIMER_MODE_ABS);
+}
+
+static void alarm_enqueue_locked(struct alarm *alarm)
+{
+ struct alarm_queue *base = &alarms[alarm->type];
+ struct rb_node **link = &base->alarms.rb_node;
+ struct rb_node *parent = NULL;
+ struct alarm *entry;
+ int leftmost = 1;
+
+ pr_alarm(FLOW, "added alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function, ktime_to_ns(alarm->expires));
+
+ if (base->first == &alarm->node)
+ base->first = rb_next(&alarm->node);
+ if (!RB_EMPTY_NODE(&alarm->node)) {
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ }
+
+ while (*link) {
+ parent = *link;
+ entry = rb_entry(parent, struct alarm, node);
+ /*
+ * We dont care about collisions. Nodes with
+ * the same expiry time stay together.
+ */
+ if (alarm->expires.tv64 < entry->expires.tv64) {
+ link = &(*link)->rb_left;
+ } else {
+ link = &(*link)->rb_right;
+ leftmost = 0;
+ }
+ }
+ if (leftmost) {
+ base->first = &alarm->node;
+ update_timer_locked(base, false);
+ }
+
+ rb_link_node(&alarm->node, parent, link);
+ rb_insert_color(&alarm->node, &base->alarms);
+}
+
+/**
+ * alarm_init - initialize an alarm
+ * @alarm: the alarm to be initialized
+ * @type: the alarm type to be used
+ * @function: alarm callback function
+ */
+void alarm_init(struct alarm *alarm,
+ enum android_alarm_type type, void (*function)(struct alarm *))
+{
+ RB_CLEAR_NODE(&alarm->node);
+ alarm->type = type;
+ alarm->function = function;
+
+ pr_alarm(FLOW, "created alarm, type %d, func %pF\n", type, function);
+}
+
+
+/**
+ * alarm_start_range - (re)start an alarm
+ * @alarm: the alarm to be added
+ * @start: earliest expiry time
+ * @end: expiry time
+ */
+void alarm_start_range(struct alarm *alarm, ktime_t start, ktime_t end)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ alarm->softexpires = start;
+ alarm->expires = end;
+ alarm_enqueue_locked(alarm);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+}
+
+/**
+ * alarm_try_to_cancel - try to deactivate an alarm
+ * @alarm: alarm to stop
+ *
+ * Returns:
+ * 0 when the alarm was not active
+ * 1 when the alarm was active
+ * -1 when the alarm may currently be excuting the callback function and
+ * cannot be stopped (it may also be inactive)
+ */
+int alarm_try_to_cancel(struct alarm *alarm)
+{
+ struct alarm_queue *base = &alarms[alarm->type];
+ unsigned long flags;
+ bool first = false;
+ int ret = 0;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ if (!RB_EMPTY_NODE(&alarm->node)) {
+ pr_alarm(FLOW, "canceled alarm, type %d, func %pF at %lld\n",
+ alarm->type, alarm->function,
+ ktime_to_ns(alarm->expires));
+ ret = 1;
+ if (base->first == &alarm->node) {
+ base->first = rb_next(&alarm->node);
+ first = true;
+ }
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ if (first)
+ update_timer_locked(base, true);
+ } else
+ pr_alarm(FLOW, "tried to cancel alarm, type %d, func %pF\n",
+ alarm->type, alarm->function);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (!ret && hrtimer_callback_running(&base->timer))
+ ret = -1;
+ return ret;
+}
+
+/**
+ * alarm_cancel - cancel an alarm and wait for the handler to finish.
+ * @alarm: the alarm to be cancelled
+ *
+ * Returns:
+ * 0 when the alarm was not active
+ * 1 when the alarm was active
+ */
+int alarm_cancel(struct alarm *alarm)
+{
+ for (;;) {
+ int ret = alarm_try_to_cancel(alarm);
+ if (ret >= 0)
+ return ret;
+ cpu_relax();
+ }
+}
+
+/**
+ * alarm_set_rtc - set the kernel and rtc walltime
+ * @new_time: timespec value containing the new time
+ */
+int alarm_set_rtc(struct timespec new_time)
+{
+ int i;
+ int ret;
+ unsigned long flags;
+ struct rtc_time rtc_new_rtc_time;
+ struct timespec tmp_time;
+
+ rtc_time_to_tm(new_time.tv_sec, &rtc_new_rtc_time);
+
+ pr_alarm(TSET, "set rtc %ld %ld - rtc %02d:%02d:%02d %02d/%02d/%04d\n",
+ new_time.tv_sec, new_time.tv_nsec,
+ rtc_new_rtc_time.tm_hour, rtc_new_rtc_time.tm_min,
+ rtc_new_rtc_time.tm_sec, rtc_new_rtc_time.tm_mon + 1,
+ rtc_new_rtc_time.tm_mday,
+ rtc_new_rtc_time.tm_year + 1900);
+
+ mutex_lock(&alarm_setrtc_mutex);
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspend_block(&alarm_rtc_suspend_blocker);
+ getnstimeofday(&tmp_time);
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ hrtimer_try_to_cancel(&alarms[i].timer);
+ alarms[i].stopped = true;
+ alarms[i].stopped_time = timespec_to_ktime(tmp_time);
+ }
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+ ktime_sub(alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta,
+ timespec_to_ktime(timespec_sub(tmp_time, new_time)));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ ret = do_settimeofday(&new_time);
+ spin_lock_irqsave(&alarm_slock, flags);
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ alarms[i].stopped = false;
+ update_timer_locked(&alarms[i], false);
+ }
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ if (ret < 0) {
+ pr_alarm(ERROR, "alarm_set_rtc: Failed to set time\n");
+ goto err;
+ }
+ if (!alarm_rtc_dev) {
+ pr_alarm(ERROR,
+ "alarm_set_rtc: no RTC, time will be lost on reboot\n");
+ goto err;
+ }
+ ret = rtc_set_time(alarm_rtc_dev, &rtc_new_rtc_time);
+ if (ret < 0)
+ pr_alarm(ERROR, "alarm_set_rtc: "
+ "Failed to set RTC, time will be lost on reboot\n");
+err:
+ suspend_unblock(&alarm_rtc_suspend_blocker);
+ mutex_unlock(&alarm_setrtc_mutex);
+ return ret;
+}
+
+/**
+ * alarm_get_elapsed_realtime - get the elapsed real time in ktime_t format
+ *
+ * returns the time in ktime_t format
+ */
+ktime_t alarm_get_elapsed_realtime(void)
+{
+ ktime_t now;
+ unsigned long flags;
+ struct alarm_queue *base = &alarms[ANDROID_ALARM_ELAPSED_REALTIME];
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ now = base->stopped ? base->stopped_time : ktime_get_real();
+ now = ktime_sub(now, base->delta);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return now;
+}
+
+static enum hrtimer_restart alarm_timer_triggered(struct hrtimer *timer)
+{
+ struct alarm_queue *base;
+ struct alarm *alarm;
+ unsigned long flags;
+ ktime_t now;
+
+ spin_lock_irqsave(&alarm_slock, flags);
+
+ base = container_of(timer, struct alarm_queue, timer);
+ now = base->stopped ? base->stopped_time : hrtimer_cb_get_time(timer);
+ now = ktime_sub(now, base->delta);
+
+ pr_alarm(INT, "alarm_timer_triggered type %d at %lld\n",
+ base - alarms, ktime_to_ns(now));
+
+ while (base->first) {
+ alarm = container_of(base->first, struct alarm, node);
+ if (alarm->softexpires.tv64 > now.tv64) {
+ pr_alarm(FLOW, "don't call alarm, %pF, %lld (s %lld)\n",
+ alarm->function, ktime_to_ns(alarm->expires),
+ ktime_to_ns(alarm->softexpires));
+ break;
+ }
+ base->first = rb_next(&alarm->node);
+ rb_erase(&alarm->node, &base->alarms);
+ RB_CLEAR_NODE(&alarm->node);
+ pr_alarm(CALL, "call alarm, type %d, func %pF, %lld (s %lld)\n",
+ alarm->type, alarm->function,
+ ktime_to_ns(alarm->expires),
+ ktime_to_ns(alarm->softexpires));
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ alarm->function(alarm);
+ spin_lock_irqsave(&alarm_slock, flags);
+ }
+ if (!base->first)
+ pr_alarm(FLOW, "no more alarms of type %d\n", base - alarms);
+ update_timer_locked(base, true);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return HRTIMER_NORESTART;
+}
+
+static void alarm_triggered_func(void *p)
+{
+ struct rtc_device *rtc = alarm_rtc_dev;
+ if (!(rtc->irq_data & RTC_AF))
+ return;
+ pr_alarm(INT, "rtc alarm triggered\n");
+ suspend_block_timeout(&alarm_rtc_suspend_blocker, 1 * HZ);
+}
+
+static int alarm_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int err = 0;
+ unsigned long flags;
+ struct rtc_wkalrm rtc_alarm;
+ struct rtc_time rtc_current_rtc_time;
+ unsigned long rtc_current_time;
+ unsigned long rtc_alarm_time;
+ struct timespec rtc_delta;
+ struct timespec wall_time;
+ struct alarm_queue *wakeup_queue = NULL;
+ struct alarm_queue *tmp_queue = NULL;
+
+ pr_alarm(SUSPEND, "alarm_suspend(%p, %d)\n", pdev, state.event);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = true;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ hrtimer_cancel(&alarms[ANDROID_ALARM_RTC_WAKEUP].timer);
+ hrtimer_cancel(&alarms[
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK].timer);
+
+ tmp_queue = &alarms[ANDROID_ALARM_RTC_WAKEUP];
+ if (tmp_queue->first)
+ wakeup_queue = tmp_queue;
+ tmp_queue = &alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP];
+ if (tmp_queue->first && (!wakeup_queue ||
+ hrtimer_get_expires(&tmp_queue->timer).tv64 <
+ hrtimer_get_expires(&wakeup_queue->timer).tv64))
+ wakeup_queue = tmp_queue;
+ if (wakeup_queue) {
+ rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+ getnstimeofday(&wall_time);
+ rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+ set_normalized_timespec(&rtc_delta,
+ wall_time.tv_sec - rtc_current_time,
+ wall_time.tv_nsec);
+
+ rtc_alarm_time = timespec_sub(ktime_to_timespec(
+ hrtimer_get_expires(&wakeup_queue->timer)),
+ rtc_delta).tv_sec;
+
+ rtc_time_to_tm(rtc_alarm_time, &rtc_alarm.time);
+ rtc_alarm.enabled = 1;
+ rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+ rtc_read_time(alarm_rtc_dev, &rtc_current_rtc_time);
+ rtc_tm_to_time(&rtc_current_rtc_time, &rtc_current_time);
+ pr_alarm(SUSPEND,
+ "rtc alarm set at %ld, now %ld, rtc delta %ld.%09ld\n",
+ rtc_alarm_time, rtc_current_time,
+ rtc_delta.tv_sec, rtc_delta.tv_nsec);
+ if (rtc_current_time + 1 >= rtc_alarm_time) {
+ pr_alarm(SUSPEND, "alarm about to go off\n");
+ memset(&rtc_alarm, 0, sizeof(rtc_alarm));
+ rtc_alarm.enabled = 0;
+ rtc_set_alarm(alarm_rtc_dev, &rtc_alarm);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = false;
+ suspend_block_timeout(&alarm_rtc_suspend_blocker,
+ 2 * HZ);
+ update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP],
+ false);
+ update_timer_locked(&alarms[
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP], false);
+ err = -EBUSY;
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ }
+ }
+ return err;
+}
+
+static int alarm_resume(struct platform_device *pdev)
+{
+ struct rtc_wkalrm alarm;
+ unsigned long flags;
+
+ pr_alarm(SUSPEND, "alarm_resume(%p)\n", pdev);
+
+ memset(&alarm, 0, sizeof(alarm));
+ alarm.enabled = 0;
+ rtc_set_alarm(alarm_rtc_dev, &alarm);
+
+ spin_lock_irqsave(&alarm_slock, flags);
+ suspended = false;
+ update_timer_locked(&alarms[ANDROID_ALARM_RTC_WAKEUP], false);
+ update_timer_locked(&alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP],
+ false);
+ spin_unlock_irqrestore(&alarm_slock, flags);
+
+ return 0;
+}
+
+static struct rtc_task alarm_rtc_task = {
+ .func = alarm_triggered_func
+};
+
+static int rtc_alarm_add_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ int err;
+ struct rtc_device *rtc = to_rtc_device(dev);
+
+ mutex_lock(&alarm_setrtc_mutex);
+
+ if (alarm_rtc_dev) {
+ err = -EBUSY;
+ goto err1;
+ }
+
+ alarm_platform_dev =
+ platform_device_register_simple("alarm", -1, NULL, 0);
+ if (IS_ERR(alarm_platform_dev)) {
+ err = PTR_ERR(alarm_platform_dev);
+ goto err2;
+ }
+ err = rtc_irq_register(rtc, &alarm_rtc_task);
+ if (err)
+ goto err3;
+ alarm_rtc_dev = rtc;
+ pr_alarm(INIT_STATUS, "using rtc device, %s, for alarms", rtc->name);
+ mutex_unlock(&alarm_setrtc_mutex);
+
+ return 0;
+
+err3:
+ platform_device_unregister(alarm_platform_dev);
+err2:
+err1:
+ mutex_unlock(&alarm_setrtc_mutex);
+ return err;
+}
+
+static void rtc_alarm_remove_device(struct device *dev,
+ struct class_interface *class_intf)
+{
+ if (dev == &alarm_rtc_dev->dev) {
+ pr_alarm(INIT_STATUS, "lost rtc device for alarms");
+ rtc_irq_unregister(alarm_rtc_dev, &alarm_rtc_task);
+ platform_device_unregister(alarm_platform_dev);
+ alarm_rtc_dev = NULL;
+ }
+}
+
+static struct class_interface rtc_alarm_interface = {
+ .add_dev = &rtc_alarm_add_device,
+ .remove_dev = &rtc_alarm_remove_device,
+};
+
+static struct platform_driver alarm_driver = {
+ .suspend = alarm_suspend,
+ .resume = alarm_resume,
+ .driver = {
+ .name = "alarm"
+ }
+};
+
+static int __init alarm_late_init(void)
+{
+ unsigned long flags;
+ struct timespec tmp_time, system_time;
+
+ /* this needs to run after the rtc is read at boot */
+ spin_lock_irqsave(&alarm_slock, flags);
+ /* We read the current rtc and system time so we can later calulate
+ * elasped realtime to be (boot_systemtime + rtc - boot_rtc) ==
+ * (rtc - (boot_rtc - boot_systemtime))
+ */
+ getnstimeofday(&tmp_time);
+ ktime_get_ts(&system_time);
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP].delta =
+ alarms[ANDROID_ALARM_ELAPSED_REALTIME].delta =
+ timespec_to_ktime(timespec_sub(tmp_time, system_time));
+
+ spin_unlock_irqrestore(&alarm_slock, flags);
+ return 0;
+}
+
+static int __init alarm_driver_init(void)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ANDROID_ALARM_SYSTEMTIME; i++) {
+ hrtimer_init(&alarms[i].timer,
+ CLOCK_REALTIME, HRTIMER_MODE_ABS);
+ alarms[i].timer.function = alarm_timer_triggered;
+ }
+ hrtimer_init(&alarms[ANDROID_ALARM_SYSTEMTIME].timer,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ alarms[ANDROID_ALARM_SYSTEMTIME].timer.function = alarm_timer_triggered;
+ err = platform_driver_register(&alarm_driver);
+ if (err < 0)
+ goto err1;
+ suspend_blocker_init(&alarm_rtc_suspend_blocker, "alarm_rtc");
+ rtc_alarm_interface.class = rtc_class;
+ err = class_interface_register(&rtc_alarm_interface);
+ if (err < 0)
+ goto err2;
+
+ return 0;
+
+err2:
+ suspend_blocker_unregister(&alarm_rtc_suspend_blocker);
+ platform_driver_unregister(&alarm_driver);
+err1:
+ return err;
+}
+
+static void __exit alarm_exit(void)
+{
+ class_interface_unregister(&rtc_alarm_interface);
+ suspend_blocker_unregister(&alarm_rtc_suspend_blocker);
+ platform_driver_unregister(&alarm_driver);
+}
+
+late_initcall(alarm_late_init);
+module_init(alarm_driver_init);
+module_exit(alarm_exit);
+
diff --git a/include/linux/android_alarm.h b/include/linux/android_alarm.h
new file mode 100644
index 00000000000..f8f14e793db
--- /dev/null
+++ b/include/linux/android_alarm.h
@@ -0,0 +1,106 @@
+/* include/linux/android_alarm.h
+ *
+ * Copyright (C) 2006-2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_ALARM_H
+#define _LINUX_ANDROID_ALARM_H
+
+#include <linux/ioctl.h>
+#include <linux/time.h>
+
+enum android_alarm_type {
+ /* return code bit numbers or set alarm arg */
+ ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME,
+
+ ANDROID_ALARM_TYPE_COUNT,
+
+ /* return code bit numbers */
+ /* ANDROID_ALARM_TIME_CHANGE = 16 */
+};
+
+#ifdef __KERNEL__
+
+#include <linux/ktime.h>
+#include <linux/rbtree.h>
+
+/*
+ * The alarm interface is similar to the hrtimer interface but adds support
+ * for wakeup from suspend. It also adds an elapsed realtime clock that can
+ * be used for periodic timers that need to keep runing while the system is
+ * suspended and not be disrupted when the wall time is set.
+ */
+
+/**
+ * struct alarm - the basic alarm structure
+ * @node: red black tree node for time ordered insertion
+ * @type: alarm type. rtc/elapsed-realtime/systemtime, wakeup/non-wakeup.
+ * @softexpires: the absolute earliest expiry time of the alarm.
+ * @expires: the absolute expiry time.
+ * @function: alarm expiry callback function
+ *
+ * The alarm structure must be initialized by alarm_init()
+ *
+ */
+
+struct alarm {
+ struct rb_node node;
+ enum android_alarm_type type;
+ ktime_t softexpires;
+ ktime_t expires;
+ void (*function)(struct alarm *);
+};
+
+void alarm_init(struct alarm *alarm,
+ enum android_alarm_type type, void (*function)(struct alarm *));
+void alarm_start_range(struct alarm *alarm, ktime_t start, ktime_t end);
+int alarm_try_to_cancel(struct alarm *alarm);
+int alarm_cancel(struct alarm *alarm);
+ktime_t alarm_get_elapsed_realtime(void);
+
+/* set rtc while preserving elapsed realtime */
+int alarm_set_rtc(const struct timespec ts);
+
+#endif
+
+enum android_alarm_return_flags {
+ ANDROID_ALARM_RTC_WAKEUP_MASK = 1U << ANDROID_ALARM_RTC_WAKEUP,
+ ANDROID_ALARM_RTC_MASK = 1U << ANDROID_ALARM_RTC,
+ ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME_WAKEUP,
+ ANDROID_ALARM_ELAPSED_REALTIME_MASK =
+ 1U << ANDROID_ALARM_ELAPSED_REALTIME,
+ ANDROID_ALARM_SYSTEMTIME_MASK = 1U << ANDROID_ALARM_SYSTEMTIME,
+ ANDROID_ALARM_TIME_CHANGE_MASK = 1U << 16
+};
+
+/* Disable alarm */
+#define ANDROID_ALARM_CLEAR(type) _IO('a', 0 | ((type) << 4))
+
+/* Ack last alarm and wait for next */
+#define ANDROID_ALARM_WAIT _IO('a', 1)
+
+#define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size)
+/* Set alarm */
+#define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec)
+#define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec)
+#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec)
+#define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec)
+#define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0)))
+#define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4)
+
+#endif
diff --git a/include/linux/earlysuspend.h b/include/linux/earlysuspend.h
new file mode 100755
index 00000000000..e3d03fb5215
--- /dev/null
+++ b/include/linux/earlysuspend.h
@@ -0,0 +1,55 @@
+/* include/linux/earlysuspend.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_EARLYSUSPEND_H
+#define _LINUX_EARLYSUSPEND_H
+
+#include <linux/list.h>
+
+/* The early_suspend structure defines suspend and resume hooks to be called
+ * when the user visible sleep state of the system changes, and a level to
+ * control the order. They can be used to turn off the screen and input
+ * devices that are not used for wakeup.
+ * Suspend handlers are called in low to high level order, resume handlers are
+ * called in the opposite order. If, when calling register_early_suspend,
+ * the suspend handlers have already been called without a matching call to the
+ * resume handlers, the suspend handler will be called directly from
+ * register_early_suspend. This direct call can violate the normal level order.
+ */
+enum {
+ EARLY_SUSPEND_LEVEL_BLANK_SCREEN = 50,
+ EARLY_SUSPEND_LEVEL_STOP_INPUT = 75,
+ EARLY_SUSPEND_LEVEL_STOP_DRAWING = 100,
+ EARLY_SUSPEND_LEVEL_DISABLE_FB = 150,
+};
+struct early_suspend {
+#ifdef CONFIG_EARLYSUSPEND
+ struct list_head link;
+ int level;
+ void (*suspend)(struct early_suspend *h);
+ void (*resume)(struct early_suspend *h);
+#endif
+};
+
+#ifdef CONFIG_EARLYSUSPEND
+void register_early_suspend(struct early_suspend *handler);
+void unregister_early_suspend(struct early_suspend *handler);
+#else
+#define register_early_suspend(handler) do { } while (0)
+#define unregister_early_suspend(handler) do { } while (0)
+#endif
+
+#endif
+
diff --git a/include/linux/input.h b/include/linux/input.h
index 7ed2251b33f..b2d93b4f657 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -82,6 +82,9 @@ struct input_absinfo {
#define EVIOCGRAB _IOW('E', 0x90, int) /* Grab/Release device */
+#define EVIOCGSUSPENDBLOCK _IOR('E', 0x91, int) /* get suspend block enable */
+#define EVIOCSSUSPENDBLOCK _IOW('E', 0x91, int) /* set suspend block enable */
+
/*
* Event types
*/
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index ebd2b8fb00d..f6412c8ac5c 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -14,6 +14,7 @@
#define __LINUX_POWER_SUPPLY_H__
#include <linux/device.h>
+#include <linux/suspend.h>
#include <linux/workqueue.h>
#include <linux/leds.h>
@@ -152,7 +153,7 @@ struct power_supply {
/* private */
struct device *dev;
- struct work_struct changed_work;
+ struct suspend_blocking_work changed_work;
#ifdef CONFIG_LEDS_TRIGGERS
struct led_trigger *charging_full_trig;
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 5e781d824e6..07023d373e4 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -6,6 +6,7 @@
#include <linux/init.h>
#include <linux/pm.h>
#include <linux/mm.h>
+#include <linux/suspend_blocker.h>
#include <asm/errno.h>
#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
diff --git a/include/linux/suspend_blocker.h b/include/linux/suspend_blocker.h
new file mode 100755
index 00000000000..351f50198b1
--- /dev/null
+++ b/include/linux/suspend_blocker.h
@@ -0,0 +1,173 @@
+/* include/linux/suspend_blocker.h
+ *
+ * Copyright (C) 2007-2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SUSPEND_BLOCKER_H
+#define _LINUX_SUSPEND_BLOCKER_H
+
+#include <linux/list.h>
+#include <linux/ktime.h>
+#include <linux/workqueue.h>
+
+/**
+ * struct suspend_blocker_stats - statistics for a suspend blocker
+ *
+ * @count: Number of times this blocker has been deacivated.
+ * @expire_count: Number of times this blocker has been auto-deacivated.
+ * @wakeup_count: Number of times this blocker was the first to block suspend
+ * after resume.
+ * @total_time: Total time this suspend blocker has prevented suspend.
+ * @prevent_suspend_time: Time this suspend blocker has prevented suspend while
+ * user-space requested suspend.
+ * @max_time: Max time this suspend blocker has been continuously active.
+ * @last_time: Monotonic clock when the active state last changed.
+ */
+struct suspend_blocker_stats {
+#ifdef CONFIG_SUSPEND_BLOCKER_STATS
+ unsigned int count;
+ unsigned int expire_count;
+ unsigned int wakeup_count;
+ ktime_t total_time;
+ ktime_t prevent_suspend_time;
+ ktime_t max_time;
+ ktime_t last_time;
+#endif
+};
+
+/**
+ * struct suspend_blocker - the basic suspend_blocker structure
+ * @link: List entry for active or inactive list.
+ * @flags: Tracks initialized and active state and statistics.
+ * @expires: Time, in jiffies, to unblock suspend.
+ * @name: Suspend blocker name used for debugging.
+ *
+ * When a suspend_blocker is active it prevents the system from entering
+ * opportunistic suspend.
+ *
+ * The suspend_blocker structure must be initialized by suspend_blocker_init()
+ */
+struct suspend_blocker {
+#ifdef CONFIG_OPPORTUNISTIC_SUSPEND
+ struct list_head link;
+ int flags;
+ unsigned long expires;
+ const char *name;
+ struct suspend_blocker_stats stat;
+#endif
+};
+
+/**
+ * struct suspend_blocking_work - the basic suspend_blocking_work structure
+ * @work: Standard work struct.
+ * @suspend_blocker: Suspend blocker.
+ * @func: Callback.
+ * @lock: Spinlock protecting pending and running state.
+ * @active: Number of cpu workqueues where work is pending or
+ * callback is running.
+ *
+ * When suspend blocking work is pending or its callback is running it prevents
+ * the system from entering opportunistic suspend.
+ *
+ * The suspend_blocking_work structure must be initialized by
+ * suspend_blocking_work_init().
+ */
+
+struct suspend_blocking_work {
+ struct work_struct work;
+#ifdef CONFIG_OPPORTUNISTIC_SUSPEND
+ struct suspend_blocker suspend_blocker;
+ work_func_t func;
+ spinlock_t lock;
+ int active;
+#endif
+};
+
+static inline struct suspend_blocking_work *to_suspend_blocking_work(
+ struct work_struct *work)
+{
+ return container_of(work, struct suspend_blocking_work, work);
+}
+
+#ifdef CONFIG_OPPORTUNISTIC_SUSPEND
+#define __SUSPEND_BLOCKER_INITIALIZER(blocker_name) \
+ { .name = #blocker_name, }
+
+#define DEFINE_SUSPEND_BLOCKER(blocker, name) \
+ struct suspend_blocker blocker = __SUSPEND_BLOCKER_INITIALIZER(name)
+
+extern void suspend_blocker_register(struct suspend_blocker *blocker);
+extern void suspend_blocker_init(struct suspend_blocker *blocker,
+ const char *name);
+extern void suspend_blocker_unregister(struct suspend_blocker *blocker);
+extern void suspend_block(struct suspend_blocker *blocker);
+extern void suspend_block_timeout(struct suspend_blocker *blocker,
+ long timeout);
+extern void suspend_unblock(struct suspend_blocker *blocker);
+extern bool suspend_blocker_is_active(struct suspend_blocker *blocker);
+extern bool suspend_is_blocked(void);
+
+void suspend_blocking_work_init(struct suspend_blocking_work *work,
+ work_func_t func, const char *name);
+void suspend_blocking_work_destroy(struct suspend_blocking_work *work);
+int queue_suspend_blocking_work(struct workqueue_struct *wq,
+ struct suspend_blocking_work *work);
+int schedule_suspend_blocking_work(struct suspend_blocking_work *work);
+int cancel_suspend_blocking_work_sync(struct suspend_blocking_work *work);
+
+#else
+
+#define DEFINE_SUSPEND_BLOCKER(blocker, name) \
+ struct suspend_blocker blocker
+
+static inline void suspend_blocker_register(struct suspend_blocker *bl) {}
+static inline void suspend_blocker_init(struct suspend_blocker *bl,
+ const char *n) {}
+static inline void suspend_blocker_unregister(struct suspend_blocker *bl) {}
+static inline void suspend_block(struct suspend_blocker *bl) {}
+static inline void suspend_block_timeout(struct suspend_blocker *bl, long t) {}
+static inline void suspend_unblock(struct suspend_blocker *bl) {}
+static inline bool suspend_blocker_is_active(struct suspend_blocker *bl)
+{
+ return false;
+}
+static inline bool suspend_is_blocked(void) { return false; }
+
+static inline void suspend_blocking_work_init(
+ struct suspend_blocking_work *work, work_func_t func, const char *name)
+{
+ INIT_WORK(&work->work, func);
+}
+static inline void suspend_blocking_work_destroy(
+ struct suspend_blocking_work *work)
+{
+ cancel_work_sync(&work->work);
+}
+static inline int queue_suspend_blocking_work(
+ struct workqueue_struct *wq, struct suspend_blocking_work *work)
+{
+ return queue_work(wq, &work->work);
+}
+static inline int schedule_suspend_blocking_work(
+ struct suspend_blocking_work *work)
+{
+ return schedule_work(&work->work);
+}
+static inline int cancel_suspend_blocking_work_sync(
+ struct suspend_blocking_work *work)
+{
+ return cancel_work_sync(&work->work);
+}
+#endif
+
+#endif
diff --git a/include/linux/suspend_ioctls.h b/include/linux/suspend_ioctls.h
index 0b30382984f..0068058c94c 100644
--- a/include/linux/suspend_ioctls.h
+++ b/include/linux/suspend_ioctls.h
@@ -30,4 +30,9 @@ struct resume_swap_area {
#define SNAPSHOT_ALLOC_SWAP_PAGE _IOR(SNAPSHOT_IOC_MAGIC, 20, __kernel_loff_t)
#define SNAPSHOT_IOC_MAXNR 20
+#define SUSPEND_BLOCKER_IOCTL_SET_NAME(len) _IOC(_IOC_WRITE, 's', 0, len)
+#define SUSPEND_BLOCKER_IOCTL_BLOCK _IO('s', 1)
+#define SUSPEND_BLOCKER_IOCTL_UNBLOCK _IO('s', 2)
+#define SUSPEND_BLOCKER_IOCTL_BLOCK_TIMEOUT _IOW('s', 3, struct timespec)
+
#endif /* _LINUX_SUSPEND_IOCTLS_H */
diff --git a/include/linux/wakelock.h b/include/linux/wakelock.h
new file mode 100755
index 00000000000..babe4f0f904
--- /dev/null
+++ b/include/linux/wakelock.h
@@ -0,0 +1,140 @@
+/* include/linux/wakelock.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_WAKELOCK_H
+#define _LINUX_WAKELOCK_H
+
+#include <linux/pm_qos_params.h>
+#include <linux/suspend.h>
+#include <linux/timer.h>
+
+enum {
+ WAKE_LOCK_SUSPEND, /* Prevent suspend */
+ WAKE_LOCK_IDLE, /* Prevent low power idle */
+ WAKE_LOCK_TYPE_COUNT
+};
+
+struct wake_lock {
+ union {
+ struct suspend_blocker blocker;
+ struct {
+ char *name;
+ struct timer_list timer;
+ } qos;
+ };
+ int type;
+};
+
+static inline void wake_unlock(struct wake_lock *lock)
+{
+ switch(lock->type) {
+ case WAKE_LOCK_SUSPEND:
+ suspend_unblock(&lock->blocker);
+ break;
+ case WAKE_LOCK_IDLE:
+ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+ lock->qos.name, PM_QOS_DEFAULT_VALUE);
+ break;
+ }
+}
+
+static inline void idle_wake_lock_timeout(unsigned long data)
+{
+ wake_unlock((struct wake_lock *)data);
+}
+
+static inline void wake_lock_init(struct wake_lock *lock, int type,
+ const char *name)
+{
+ lock->type = type;
+ switch(type) {
+ case WAKE_LOCK_SUSPEND:
+ suspend_blocker_init(&lock->blocker, name);
+ break;
+ case WAKE_LOCK_IDLE:
+ lock->qos.name = (char *)name;
+ setup_timer(&lock->qos.timer, idle_wake_lock_timeout,
+ (unsigned long)lock);
+ pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, lock->qos.name,
+ PM_QOS_DEFAULT_VALUE);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+static inline void wake_lock_destroy(struct wake_lock *lock)
+{
+ switch(lock->type) {
+ case WAKE_LOCK_SUSPEND:
+ suspend_blocker_unregister(&lock->blocker);
+ break;
+ case WAKE_LOCK_IDLE:
+ del_timer(&lock->qos.timer);
+ pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY,
+ lock->qos.name);
+ break;
+ }
+}
+static inline void wake_lock(struct wake_lock *lock)
+{
+ switch(lock->type) {
+ case WAKE_LOCK_SUSPEND:
+ suspend_block(&lock->blocker);
+ break;
+ case WAKE_LOCK_IDLE:
+ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+ lock->qos.name, 0);
+ break;
+ }
+}
+static inline void wake_lock_timeout(struct wake_lock *lock, long timeout)
+{
+ switch(lock->type) {
+ case WAKE_LOCK_SUSPEND:
+ suspend_block_timeout(&lock->blocker, timeout);
+ break;
+ case WAKE_LOCK_IDLE:
+ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY,
+ lock->qos.name, 0);
+ mod_timer(&lock->qos.timer, jiffies + timeout);
+ break;
+ }
+}
+
+static inline int wake_lock_active(struct wake_lock *lock)
+{
+ switch(lock->type) {
+ case WAKE_LOCK_SUSPEND:
+ return suspend_blocker_is_active(&lock->blocker);
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+static inline long has_wake_lock(int type)
+{
+ switch(type) {
+ case WAKE_LOCK_SUSPEND:
+ return suspend_is_blocked();
+ case WAKE_LOCK_IDLE:
+ return !pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+#endif
+
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 5c36ea9d55d..608e2dedc0e 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -130,6 +130,85 @@ config SUSPEND_FREEZER
Turning OFF this setting is NOT recommended! If in doubt, say Y.
+config OPPORTUNISTIC_SUSPEND
+ bool "Opportunistic suspend"
+ depends on SUSPEND
+ select RTC_LIB
+ default n
+ ---help---
+ Opportunistic sleep support. Allows the system to be put into a sleep
+ state opportunistically, if it doesn't do any useful work at the
+ moment. The PM subsystem is switched into this mode of operation by
+ writing "opportunistic" into /sys/power/policy, while writing
+ "forced" to this file turns the opportunistic suspend feature off.
+ In the "opportunistic" mode suspend blockers are used to determine
+ when to suspend the system and the value written to /sys/power/state
+ determines the sleep state the system will be put into when there are
+ no active suspend blockers.
+
+config SUSPEND_BLOCKER_STATS
+ bool "Suspend blockers statistics"
+ depends on OPPORTUNISTIC_SUSPEND
+ default y
+ ---help---
+ Use /sys/kernel/debug/suspend_blockers to report suspend blockers
+ statistics.
+
+config USER_SUSPEND_BLOCKERS
+ bool "User space suspend blockers"
+ depends on OPPORTUNISTIC_SUSPEND
+ ---help---
+ User space suspend blockers API. Creates a misc device allowing user
+ space to create, use and destroy suspend blockers.
+
+config HAS_EARLYSUSPEND
+ bool
+
+config USER_WAKELOCK
+ bool "Userspace wake locks"
+ depends on OPPORTUNISTIC_SUSPEND
+ default y
+ ---help---
+ User-space wake lock api. Write "lockname" or "lockname timeout"
+ to /sys/power/wake_lock lock and if needed create a wake lock.
+ Write "lockname" to /sys/power/wake_unlock to unlock a user wake
+ lock.
+
+config EARLYSUSPEND
+ bool "Early suspend"
+ depends on OPPORTUNISTIC_SUSPEND
+ select HAS_EARLYSUSPEND
+ default y
+ ---help---
+ Call early suspend handlers when the user requested sleep state
+ changes.
+
+choice
+ prompt "User-space screen access"
+ default FB_EARLYSUSPEND if !FRAMEBUFFER_CONSOLE
+ default CONSOLE_EARLYSUSPEND
+ depends on EARLYSUSPEND
+
+ config NO_USER_SPACE_SCREEN_ACCESS_CONTROL
+ bool "None"
+
+ config CONSOLE_EARLYSUSPEND
+ bool "Console switch on early-suspend"
+ depends on EARLYSUSPEND && VT
+ ---help---
+ Register early suspend handler to perform a console switch
+ when user-space should stop drawing to the screen and a switch
+ back when it should resume.
+
+ config FB_EARLYSUSPEND
+ bool "Sysfs interface"
+ depends on EARLYSUSPEND
+ ---help---
+ Register early suspend handler that notifies and waits for
+ user-space through sysfs when user-space should stop drawing
+ to the screen and notifies user-space when it should resume.
+endchoice
+
config HIBERNATION_NVS
bool
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 43191815f87..5d0d3487b12 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -7,6 +7,13 @@ obj-$(CONFIG_PM) += main.o
obj-$(CONFIG_PM_SLEEP) += console.o
obj-$(CONFIG_FREEZER) += process.o
obj-$(CONFIG_SUSPEND) += suspend.o
+obj-$(CONFIG_OPPORTUNISTIC_SUSPEND) += opportunistic_suspend.o
+obj-$(CONFIG_OPPORTUNISTIC_SUSPEND) += wakelocktest.o
+obj-$(CONFIG_USER_SUSPEND_BLOCKERS) += user_suspend_blocker.o
+obj-$(CONFIG_EARLYSUSPEND) += earlysuspend.o
+obj-$(CONFIG_CONSOLE_EARLYSUSPEND) += consoleearlysuspend.o
+obj-$(CONFIG_FB_EARLYSUSPEND) += fbearlysuspend.o
+obj-$(CONFIG_USER_WAKELOCK) += userwakelock.o
obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o
obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o
obj-$(CONFIG_HIBERNATION_NVS) += hibernate_nvs.o
diff --git a/kernel/power/consoleearlysuspend.c b/kernel/power/consoleearlysuspend.c
new file mode 100644
index 00000000000..a3edcb26738
--- /dev/null
+++ b/kernel/power/consoleearlysuspend.c
@@ -0,0 +1,78 @@
+/* kernel/power/consoleearlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/earlysuspend.h>
+#include <linux/kbd_kern.h>
+#include <linux/module.h>
+#include <linux/vt_kern.h>
+#include <linux/wait.h>
+
+#define EARLY_SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
+
+static int orig_fgconsole;
+static void console_early_suspend(struct early_suspend *h)
+{
+ acquire_console_sem();
+ orig_fgconsole = fg_console;
+ if (vc_allocate(EARLY_SUSPEND_CONSOLE))
+ goto err;
+ if (set_console(EARLY_SUSPEND_CONSOLE))
+ goto err;
+ release_console_sem();
+
+ if (vt_waitactive(EARLY_SUSPEND_CONSOLE + 1))
+ pr_warning("console_early_suspend: Can't switch VCs.\n");
+ return;
+err:
+ pr_warning("console_early_suspend: Can't set console\n");
+ release_console_sem();
+}
+
+static void console_late_resume(struct early_suspend *h)
+{
+ int ret;
+ acquire_console_sem();
+ ret = set_console(orig_fgconsole);
+ release_console_sem();
+ if (ret) {
+ pr_warning("console_late_resume: Can't set console.\n");
+ return;
+ }
+
+ if (vt_waitactive(orig_fgconsole + 1))
+ pr_warning("console_late_resume: Can't switch VCs.\n");
+}
+
+static struct early_suspend console_early_suspend_desc = {
+ .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+ .suspend = console_early_suspend,
+ .resume = console_late_resume,
+};
+
+static int __init console_early_suspend_init(void)
+{
+ register_early_suspend(&console_early_suspend_desc);
+ return 0;
+}
+
+static void __exit console_early_suspend_exit(void)
+{
+ unregister_early_suspend(&console_early_suspend_desc);
+}
+
+module_init(console_early_suspend_init);
+module_exit(console_early_suspend_exit);
+
diff --git a/kernel/power/earlysuspend.c b/kernel/power/earlysuspend.c
new file mode 100644
index 00000000000..927ae72c80e
--- /dev/null
+++ b/kernel/power/earlysuspend.c
@@ -0,0 +1,175 @@
+/* kernel/power/earlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rtc.h>
+#include <linux/syscalls.h> /* sys_sync */
+#include <linux/workqueue.h>
+
+#include "power.h"
+
+extern struct workqueue_struct *pm_wq;
+
+enum {
+ DEBUG_SUSPEND = 1U << 0,
+ DEBUG_HANDLERS = 1U << 1,
+};
+static int debug_mask;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static DEFINE_MUTEX(early_suspend_lock);
+static LIST_HEAD(early_suspend_handlers);
+static void early_suspend(struct work_struct *work);
+static void late_resume(struct work_struct *work);
+static DECLARE_WORK(early_suspend_work, early_suspend);
+static DECLARE_WORK(late_resume_work, late_resume);
+static DEFINE_SPINLOCK(state_lock);
+enum {
+ SUSPEND_REQUESTED = 0x1,
+ SUSPENDED = 0x2,
+ SUSPEND_REQUESTED_AND_SUSPENDED = SUSPEND_REQUESTED | SUSPENDED,
+};
+static int state;
+
+void register_early_suspend(struct early_suspend *handler)
+{
+ struct list_head *pos;
+
+ mutex_lock(&early_suspend_lock);
+ list_for_each(pos, &early_suspend_handlers) {
+ struct early_suspend *e;
+ e = list_entry(pos, struct early_suspend, link);
+ if (e->level > handler->level)
+ break;
+ }
+ list_add_tail(&handler->link, pos);
+ if ((state & SUSPENDED) && handler->suspend)
+ handler->suspend(handler);
+ mutex_unlock(&early_suspend_lock);
+}
+EXPORT_SYMBOL(register_early_suspend);
+
+void unregister_early_suspend(struct early_suspend *handler)
+{
+ mutex_lock(&early_suspend_lock);
+ list_del(&handler->link);
+ mutex_unlock(&early_suspend_lock);
+}
+EXPORT_SYMBOL(unregister_early_suspend);
+
+static void early_suspend(struct work_struct *work)
+{
+ struct early_suspend *pos;
+ unsigned long irqflags;
+ int abort = 0;
+
+ mutex_lock(&early_suspend_lock);
+ spin_lock_irqsave(&state_lock, irqflags);
+ if (state == SUSPEND_REQUESTED)
+ state |= SUSPENDED;
+ else
+ abort = 1;
+ spin_unlock_irqrestore(&state_lock, irqflags);
+
+ if (abort) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("early_suspend: abort, state %d\n", state);
+ mutex_unlock(&early_suspend_lock);
+ goto abort;
+ }
+
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("early_suspend: call handlers\n");
+ list_for_each_entry(pos, &early_suspend_handlers, link) {
+ if (pos->suspend) {
+ if (debug_mask & DEBUG_HANDLERS)
+ pr_info("early_suspend: call %pF\n",
+ pos->suspend);
+ pos->suspend(pos);
+ if (debug_mask & DEBUG_HANDLERS)
+ pr_info("early_suspend: %pF returned\n",
+ pos->suspend);
+ }
+ }
+ mutex_unlock(&early_suspend_lock);
+
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("early_suspend: sync\n");
+
+ sys_sync();
+abort:
+ spin_lock_irqsave(&state_lock, irqflags);
+ if (state == SUSPEND_REQUESTED_AND_SUSPENDED)
+ suspend_unblock(&main_suspend_blocker);
+ spin_unlock_irqrestore(&state_lock, irqflags);
+}
+
+static void late_resume(struct work_struct *work)
+{
+ struct early_suspend *pos;
+ unsigned long irqflags;
+ int abort = 0;
+
+ mutex_lock(&early_suspend_lock);
+ spin_lock_irqsave(&state_lock, irqflags);
+ if (state == SUSPENDED)
+ state = 0; /* clear SUSPENDED */
+ else
+ abort = 1;
+ spin_unlock_irqrestore(&state_lock, irqflags);
+
+ if (abort) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("late_resume: abort, state %d\n", state);
+ goto abort;
+ }
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("late_resume: call handlers\n");
+ list_for_each_entry_reverse(pos, &early_suspend_handlers, link)
+ if (pos->resume) {
+ if (debug_mask & DEBUG_HANDLERS)
+ pr_info("early_suspend: call %pF\n",
+ pos->resume);
+ pos->resume(pos);
+ if (debug_mask & DEBUG_HANDLERS)
+ pr_info("early_suspend: %pF returned\n",
+ pos->resume);
+ }
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("late_resume: done\n");
+abort:
+ mutex_unlock(&early_suspend_lock);
+}
+
+void request_early_suspend_state(bool on)
+{
+ unsigned long irqflags;
+ int old_sleep;
+
+ spin_lock_irqsave(&state_lock, irqflags);
+ old_sleep = state & SUSPEND_REQUESTED;
+ if (!old_sleep && !on) {
+ state |= SUSPEND_REQUESTED;
+ queue_work(pm_wq, &early_suspend_work);
+ } else if (old_sleep && on) {
+ state &= ~SUSPEND_REQUESTED;
+ suspend_block(&main_suspend_blocker);
+ queue_work(pm_wq, &late_resume_work);
+ }
+ spin_unlock_irqrestore(&state_lock, irqflags);
+}
+
diff --git a/kernel/power/fbearlysuspend.c b/kernel/power/fbearlysuspend.c
new file mode 100644
index 00000000000..19f3e54f2ef
--- /dev/null
+++ b/kernel/power/fbearlysuspend.c
@@ -0,0 +1,153 @@
+/* kernel/power/fbearlysuspend.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/earlysuspend.h>
+#include <linux/module.h>
+#include <linux/wait.h>
+
+#include "power.h"
+
+static wait_queue_head_t fb_state_wq;
+static DEFINE_SPINLOCK(fb_state_lock);
+static enum {
+ FB_STATE_STOPPED_DRAWING,
+ FB_STATE_REQUEST_STOP_DRAWING,
+ FB_STATE_DRAWING_OK,
+} fb_state;
+
+/* tell userspace to stop drawing, wait for it to stop */
+static void stop_drawing_early_suspend(struct early_suspend *h)
+{
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ fb_state = FB_STATE_REQUEST_STOP_DRAWING;
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+ wake_up_all(&fb_state_wq);
+ ret = wait_event_timeout(fb_state_wq,
+ fb_state == FB_STATE_STOPPED_DRAWING,
+ HZ);
+ if (unlikely(fb_state != FB_STATE_STOPPED_DRAWING))
+ pr_warning("stop_drawing_early_suspend: timeout waiting for "
+ "userspace to stop drawing\n");
+}
+
+/* tell userspace to start drawing */
+static void start_drawing_late_resume(struct early_suspend *h)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ fb_state = FB_STATE_DRAWING_OK;
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+ wake_up(&fb_state_wq);
+}
+
+static struct early_suspend stop_drawing_early_suspend_desc = {
+ .level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
+ .suspend = stop_drawing_early_suspend,
+ .resume = start_drawing_late_resume,
+};
+
+static ssize_t wait_for_fb_sleep_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int ret;
+
+ ret = wait_event_interruptible(fb_state_wq,
+ fb_state != FB_STATE_DRAWING_OK);
+ if (ret && fb_state == FB_STATE_DRAWING_OK)
+ return ret;
+ else
+ s += sprintf(buf, "sleeping");
+ return s - buf;
+}
+
+static ssize_t wait_for_fb_wake_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int ret;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&fb_state_lock, irq_flags);
+ if (fb_state == FB_STATE_REQUEST_STOP_DRAWING) {
+ fb_state = FB_STATE_STOPPED_DRAWING;
+ wake_up(&fb_state_wq);
+ }
+ spin_unlock_irqrestore(&fb_state_lock, irq_flags);
+
+ ret = wait_event_interruptible(fb_state_wq,
+ fb_state == FB_STATE_DRAWING_OK);
+ if (ret && fb_state != FB_STATE_DRAWING_OK)
+ return ret;
+ else
+ s += sprintf(buf, "awake");
+
+ return s - buf;
+}
+
+#define power_ro_attr(_name) \
+static struct kobj_attribute _name##_attr = { \
+ .attr = { \
+ .name = __stringify(_name), \
+ .mode = 0444, \
+ }, \
+ .show = _name##_show, \
+ .store = NULL, \
+}
+
+power_ro_attr(wait_for_fb_sleep);
+power_ro_attr(wait_for_fb_wake);
+
+static struct attribute *g[] = {
+ &wait_for_fb_sleep_attr.attr,
+ &wait_for_fb_wake_attr.attr,
+ NULL,
+};
+
+static struct attribute_group attr_group = {
+ .attrs = g,
+};
+
+static int __init fb_earlysuspend_init(void)
+{
+ int ret;
+
+ init_waitqueue_head(&fb_state_wq);
+ fb_state = FB_STATE_DRAWING_OK;
+
+ ret = sysfs_create_group(power_kobj, &attr_group);
+ if (ret) {
+ pr_err("fb_earlysuspend_init: sysfs_create_group failed\n");
+ return ret;
+ }
+
+ register_early_suspend(&stop_drawing_early_suspend_desc);
+ return 0;
+}
+
+static void __exit fb_earlysuspend_exit(void)
+{
+ unregister_early_suspend(&stop_drawing_early_suspend_desc);
+ sysfs_remove_group(power_kobj, &attr_group);
+}
+
+module_init(fb_earlysuspend_init);
+module_exit(fb_earlysuspend_exit);
+
diff --git a/kernel/power/main.c b/kernel/power/main.c
index b58800b21fc..dff332e9a2f 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -20,6 +20,62 @@ DEFINE_MUTEX(pm_mutex);
unsigned int pm_flags;
EXPORT_SYMBOL(pm_flags);
+#ifdef CONFIG_OPPORTUNISTIC_SUSPEND
+struct pm_policy {
+ const char *name;
+ bool (*valid_state)(suspend_state_t state);
+ int (*set_state)(suspend_state_t state);
+};
+
+static struct pm_policy policies[] = {
+ {
+ .name = "forced",
+ .valid_state = valid_state,
+ .set_state = enter_state,
+ },
+ {
+ .name = "opportunistic",
+ .valid_state = opportunistic_suspend_valid_state,
+ .set_state = opportunistic_suspend_state,
+ },
+};
+
+#ifdef CONFIG_OPPORTUNISTIC_SUSPEND
+static int policy = 1;
+#else
+static int policy;
+#endif
+
+static inline bool hibernation_supported(void)
+{
+ return !strncmp(policies[policy].name, "forced", 6);
+}
+
+static inline bool pm_state_valid(int state_idx)
+{
+ return pm_states[state_idx] && policies[policy].valid_state(state_idx);
+}
+
+static inline int pm_enter_state(int state_idx)
+{
+ return policies[policy].set_state(state_idx);
+}
+
+#else
+
+static inline bool hibernation_supported(void) { return true; }
+
+static inline bool pm_state_valid(int state_idx)
+{
+ return pm_states[state_idx] && valid_state(state_idx);
+}
+
+static inline int pm_enter_state(int state_idx)
+{
+ return enter_state(state_idx);
+}
+#endif /* CONFIG_OPPORTUNISTIC_SUSPEND */
+
#ifdef CONFIG_PM_SLEEP
/* Routines for PM-transition notifications */
@@ -146,6 +202,15 @@ struct kobject *power_kobj;
*
* store() accepts one of those strings, translates it into the
* proper enumerated value, and initiates a suspend transition.
+ *
+ * If policy is set to opportunistic, store() does not block until the
+ * system resumes, and it will try to re-enter the state until another
+ * state is requested. Suspend blockers are respected and the requested
+ * state will only be entered when no suspend blockers are active.
+ * Write "on" to disable.
+ *
+ * If CONFIG_EARLYSUSPEND is set, early_suspend hooks are called when
+ * the requested state changes to or from "on"
*/
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
@@ -155,12 +220,13 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
int i;
for (i = 0; i < PM_SUSPEND_MAX; i++) {
- if (pm_states[i] && valid_state(i))
+ if (pm_state_valid(i))
s += sprintf(s,"%s ", pm_states[i]);
}
#endif
#ifdef CONFIG_HIBERNATION
- s += sprintf(s, "%s\n", "disk");
+ if (hibernation_supported())
+ s += sprintf(s, "%s\n", "disk");
#else
if (s != buf)
/* convert the last space to a newline */
@@ -173,7 +239,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
#ifdef CONFIG_SUSPEND
- suspend_state_t state = PM_SUSPEND_STANDBY;
+ suspend_state_t state = PM_SUSPEND_ON;
const char * const *s;
#endif
char *p;
@@ -185,8 +251,9 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
/* First, check if we are requested to hibernate */
if (len == 4 && !strncmp(buf, "disk", len)) {
- error = hibernate();
- goto Exit;
+ if (hibernation_supported())
+ error = hibernate();
+ goto Exit;
}
#ifdef CONFIG_SUSPEND
@@ -195,7 +262,7 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
break;
}
if (state < PM_SUSPEND_MAX && *s)
- error = enter_state(state);
+ error = pm_enter_state(state);
#endif
Exit:
@@ -204,6 +271,56 @@ static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
power_attr(state);
+#ifdef CONFIG_OPPORTUNISTIC_SUSPEND
+/**
+ * policy - set policy for state
+ */
+static ssize_t policy_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(policies); i++) {
+ if (i == policy)
+ s += sprintf(s, "[%s] ", policies[i].name);
+ else
+ s += sprintf(s, "%s ", policies[i].name);
+ }
+ if (s != buf)
+ /* convert the last space to a newline */
+ *(s-1) = '\n';
+ return (s - buf);
+}
+
+static ssize_t policy_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ const char *s;
+ char *p;
+ int len;
+ int i;
+
+ p = memchr(buf, '\n', n);
+ len = p ? p - buf : n;
+
+ for (i = 0; i < ARRAY_SIZE(policies); i++) {
+ s = policies[i].name;
+ if (s && len == strlen(s) && !strncmp(buf, s, len)) {
+ mutex_lock(&pm_mutex);
+ policies[policy].set_state(PM_SUSPEND_ON);
+ policy = i;
+ mutex_unlock(&pm_mutex);
+ return n;
+ }
+ }
+ return -EINVAL;
+}
+
+power_attr(policy);
+#endif /* CONFIG_OPPORTUNISTIC_SUSPEND */
+
#ifdef CONFIG_PM_TRACE
int pm_trace_enabled;
@@ -229,6 +346,11 @@ pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
power_attr(pm_trace);
#endif /* CONFIG_PM_TRACE */
+#ifdef CONFIG_USER_WAKELOCK
+power_attr(wake_lock);
+power_attr(wake_unlock);
+#endif
+
static struct attribute * g[] = {
&state_attr.attr,
#ifdef CONFIG_PM_TRACE
@@ -236,10 +358,17 @@ static struct attribute * g[] = {
#endif
#ifdef CONFIG_PM_SLEEP
&pm_async_attr.attr,
+#ifdef CONFIG_OPPORTUNISTIC_SUSPEND
+ &policy_attr.attr,
+#endif
#ifdef CONFIG_PM_DEBUG
&pm_test_attr.attr,
#endif
#endif
+#ifdef CONFIG_USER_WAKELOCK
+ &wake_lock_attr.attr,
+ &wake_unlock_attr.attr,
+#endif
NULL,
};
@@ -247,7 +376,7 @@ static struct attribute_group attr_group = {
.attrs = g,
};
-#ifdef CONFIG_PM_RUNTIME
+#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_OPPORTUNISTIC_SUSPEND)
struct workqueue_struct *pm_wq;
EXPORT_SYMBOL_GPL(pm_wq);
@@ -266,6 +395,7 @@ static int __init pm_init(void)
int error = pm_start_workqueue();
if (error)
return error;
+ opportunistic_suspend_init();
power_kobj = kobject_create_and_add("power", NULL);
if (!power_kobj)
return -ENOMEM;
diff --git a/kernel/power/opportunistic_suspend.c b/kernel/power/opportunistic_suspend.c
new file mode 100644
index 00000000000..e1a0d139600
--- /dev/null
+++ b/kernel/power/opportunistic_suspend.c
@@ -0,0 +1,810 @@
+/*
+ * kernel/power/opportunistic_suspend.c
+ *
+ * Copyright (C) 2005-2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/rtc.h>
+#include <linux/suspend.h>
+#include <linux/debugfs.h>
+
+#include "power.h"
+
+extern struct workqueue_struct *pm_wq;
+
+enum {
+ DEBUG_EXIT_SUSPEND = 1U << 0,
+ DEBUG_WAKEUP = 1U << 1,
+ DEBUG_USER_STATE = 1U << 2,
+ DEBUG_SUSPEND = 1U << 3,
+ DEBUG_SUSPEND_BLOCKER = 1U << 4,
+ DEBUG_EXPIRE = 1U << 5,
+};
+static int debug_mask = DEBUG_EXIT_SUSPEND | DEBUG_WAKEUP | DEBUG_USER_STATE;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static int unknown_wakeup_delay_msecs = 500;
+module_param_named(unknown_wakeup_delay_msecs, unknown_wakeup_delay_msecs, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define SB_INITIALIZED (1U << 8)
+#define SB_ACTIVE (1U << 9)
+#define SB_AUTO_EXPIRE (1U << 10)
+#define SB_PREVENTING_SUSPEND (1U << 11)
+
+DEFINE_SUSPEND_BLOCKER(main_suspend_blocker, main);
+
+static DEFINE_SPINLOCK(list_lock);
+static DEFINE_SPINLOCK(state_lock);
+static LIST_HEAD(inactive_blockers);
+static LIST_HEAD(active_blockers);
+static int current_event_num;
+static suspend_state_t requested_suspend_state = PM_SUSPEND_MEM;
+static bool enable_suspend_blockers;
+static DEFINE_SUSPEND_BLOCKER(unknown_wakeup, unknown_wakeups);
+
+#ifdef CONFIG_SUSPEND_BLOCKER_STATS
+static struct suspend_blocker_stats dropped_suspend_blockers;
+static ktime_t last_sleep_time_update;
+static bool wait_for_wakeup;
+
+static void suspend_blocker_stat_init(struct suspend_blocker_stats *stat)
+{
+ stat->count = 0;
+ stat->expire_count = 0;
+ stat->wakeup_count = 0;
+ stat->total_time = ktime_set(0, 0);
+ stat->prevent_suspend_time = ktime_set(0, 0);
+ stat->max_time = ktime_set(0, 0);
+ stat->last_time = ktime_set(0, 0);
+}
+
+static void init_dropped_suspend_blockers(void)
+{
+ suspend_blocker_stat_init(&dropped_suspend_blockers);
+}
+
+static void suspend_blocker_stat_drop(struct suspend_blocker_stats *stat)
+{
+ if (!stat->count)
+ return;
+
+ dropped_suspend_blockers.count += stat->count;
+ dropped_suspend_blockers.expire_count += stat->expire_count;
+ dropped_suspend_blockers.total_time = ktime_add(
+ dropped_suspend_blockers.total_time, stat->total_time);
+ dropped_suspend_blockers.prevent_suspend_time = ktime_add(
+ dropped_suspend_blockers.prevent_suspend_time,
+ stat->prevent_suspend_time);
+ dropped_suspend_blockers.max_time = ktime_add(
+ dropped_suspend_blockers.max_time, stat->max_time);
+}
+
+static bool stats_get_expired_time(struct suspend_blocker *blocker,
+ ktime_t *expire_time)
+{
+ struct timespec ts;
+ struct timespec kt;
+ struct timespec tomono;
+ struct timespec delta;
+ unsigned long seq;
+ long timeout;
+
+ if (!(blocker->flags & SB_AUTO_EXPIRE))
+ return false;
+ do {
+ seq = read_seqbegin(&xtime_lock);
+ timeout = blocker->expires - jiffies;
+ if (timeout > 0)
+ return false;
+ kt = current_kernel_time();
+ tomono = wall_to_monotonic;
+ } while (read_seqretry(&xtime_lock, seq));
+ jiffies_to_timespec(-timeout, &delta);
+ set_normalized_timespec(&ts, kt.tv_sec + tomono.tv_sec - delta.tv_sec,
+ kt.tv_nsec + tomono.tv_nsec - delta.tv_nsec);
+ *expire_time = timespec_to_ktime(ts);
+ return true;
+}
+
+static void suspend_unblock_stat(struct suspend_blocker *blocker, bool expired)
+{
+ struct suspend_blocker_stats *stat = &blocker->stat;
+ ktime_t duration;
+ ktime_t now;
+
+ if (!(blocker->flags & SB_ACTIVE))
+ return;
+
+ if (stats_get_expired_time(blocker, &now))
+ expired = true;
+ else
+ now = ktime_get();
+ stat->count++;
+ if (expired)
+ stat->expire_count++;
+ duration = ktime_sub(now, stat->last_time);
+ stat->total_time = ktime_add(stat->total_time, duration);
+ if (ktime_to_ns(duration) > ktime_to_ns(stat->max_time))
+ stat->max_time = duration;
+
+ stat->last_time = ktime_get();
+ if (blocker->flags & SB_PREVENTING_SUSPEND) {
+ duration = ktime_sub(now, last_sleep_time_update);
+ stat->prevent_suspend_time = ktime_add(
+ stat->prevent_suspend_time, duration);
+ blocker->flags &= ~SB_PREVENTING_SUSPEND;
+ }
+}
+
+static void suspend_block_stat(struct suspend_blocker *blocker)
+{
+ if (wait_for_wakeup) {
+ if (debug_mask & DEBUG_WAKEUP)
+ pr_info("wakeup suspend blocker: %s\n", blocker->name);
+
+ wait_for_wakeup = false;
+ blocker->stat.wakeup_count++;
+ }
+ if ((blocker->flags & SB_AUTO_EXPIRE) &&
+ time_is_before_eq_jiffies(blocker->expires)) {
+ suspend_unblock_stat(blocker, false);
+ blocker->stat.last_time = ktime_get();
+ }
+
+ if (!(blocker->flags & SB_ACTIVE))
+ blocker->stat.last_time = ktime_get();
+}
+
+static void update_sleep_wait_stats(bool done)
+{
+ struct suspend_blocker *blocker;
+ ktime_t now, etime, elapsed, add;
+ bool expired;
+
+ now = ktime_get();
+ elapsed = ktime_sub(now, last_sleep_time_update);
+ list_for_each_entry(blocker, &active_blockers, link) {
+ struct suspend_blocker_stats *stat = &blocker->stat;
+
+ expired = stats_get_expired_time(blocker, &etime);
+ if (blocker->flags & SB_PREVENTING_SUSPEND) {
+ if (expired)
+ add = ktime_sub(etime, last_sleep_time_update);
+ else
+ add = elapsed;
+ stat->prevent_suspend_time = ktime_add(
+ stat->prevent_suspend_time, add);
+ }
+ if (done || expired)
+ blocker->flags &= ~SB_PREVENTING_SUSPEND;
+ else
+ blocker->flags |= SB_PREVENTING_SUSPEND;
+ }
+ last_sleep_time_update = now;
+}
+
+void about_to_enter_suspend(void)
+{
+ wait_for_wakeup = true;
+}
+
+#else /* !CONFIG_SUSPEND_BLOCKER_STATS */
+
+static inline void init_dropped_suspend_blockers(void) {}
+static inline void suspend_blocker_stat_init(struct suspend_blocker_stats *s) {}
+static inline void suspend_blocker_stat_drop(struct suspend_blocker_stats *s) {}
+static inline void suspend_unblock_stat(struct suspend_blocker *blocker,
+ bool expired) {}
+static inline void suspend_block_stat(struct suspend_blocker *blocker) {}
+static inline void update_sleep_wait_stats(bool done) {}
+#endif /* !CONFIG_SUSPEND_BLOCKER_STATS */
+
+#define pr_info_time(fmt, args...) \
+ do { \
+ struct timespec ts; \
+ struct rtc_time tm; \
+ getnstimeofday(&ts); \
+ rtc_time_to_tm(ts.tv_sec, &tm); \
+ pr_info(fmt "(%d-%02d-%02d %02d:%02d:%02d.%09lu UTC)\n" , \
+ args, \
+ tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, \
+ tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec); \
+ } while (0);
+
+static void print_active_suspend_blockers(void)
+{
+ struct suspend_blocker *blocker;
+
+ list_for_each_entry(blocker, &active_blockers, link) {
+ if (blocker->flags & SB_AUTO_EXPIRE) {
+ long timeout = blocker->expires - jiffies;
+ if (timeout <= 0)
+ pr_info("PM: Suspend blocker %s, expired\n",
+ blocker->name);
+ else
+ pr_info("PM: Active suspend blocker %s, time "
+ "left %ld\n", blocker->name, timeout);
+ } else
+ pr_info("PM: Active suspend blocker %s\n",
+ blocker->name);
+ }
+}
+
+static void expire_suspend_blocker(struct suspend_blocker *blocker)
+{
+ suspend_unblock_stat(blocker, true);
+ blocker->flags &= ~(SB_ACTIVE | SB_AUTO_EXPIRE);
+ list_del(&blocker->link);
+ list_add(&blocker->link, &inactive_blockers);
+ if (debug_mask & (DEBUG_SUSPEND_BLOCKER | DEBUG_EXPIRE))
+ pr_info("expired suspend blocker %s\n", blocker->name);
+}
+
+static long max_suspend_blocker_timeout_locked(void)
+{
+ struct suspend_blocker *blocker, *n;
+ long max_timeout = 0;
+
+ list_for_each_entry_safe(blocker, n, &active_blockers, link) {
+ if (blocker->flags & SB_AUTO_EXPIRE) {
+ long timeout = blocker->expires - jiffies;
+ if (timeout <= 0)
+ expire_suspend_blocker(blocker);
+ else if (timeout > max_timeout)
+ max_timeout = timeout;
+ } else
+ return -1;
+ }
+ return max_timeout;
+}
+
+/**
+ * suspend_is_blocked - Check if there are active suspend blockers.
+ *
+ * Return true if suspend blockers are enabled and there are active suspend
+ * blockers, in which case the system cannot be put to sleep opportunistically.
+ */
+bool suspend_is_blocked(void)
+{
+ long ret;
+ unsigned long irqflags;
+ if (!enable_suspend_blockers)
+ return 0;
+ spin_lock_irqsave(&list_lock, irqflags);
+ ret = !!max_suspend_blocker_timeout_locked();
+ spin_unlock_irqrestore(&list_lock, irqflags);
+ return ret;
+}
+
+static void suspend_worker(struct work_struct *work)
+{
+ int ret;
+ int entry_event_num;
+
+ enable_suspend_blockers = true;
+
+ if (suspend_is_blocked()) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("PM: Automatic suspend aborted\n");
+ goto abort;
+ }
+
+ entry_event_num = current_event_num;
+
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("PM: Automatic suspend\n");
+
+ ret = pm_suspend(requested_suspend_state);
+
+ if (debug_mask & DEBUG_EXIT_SUSPEND)
+ pr_info_time("PM: Automatic suspend exit, ret = %d ", ret);
+
+ if (current_event_num == entry_event_num) {
+ if (debug_mask & DEBUG_SUSPEND)
+ pr_info("PM: pm_suspend() returned with no event\n");
+ suspend_block_timeout(&unknown_wakeup,
+ msecs_to_jiffies(unknown_wakeup_delay_msecs));
+ }
+
+abort:
+ enable_suspend_blockers = false;
+}
+static DECLARE_WORK(suspend_work, suspend_worker);
+
+static void expire_suspend_blockers(unsigned long data)
+{
+ long timeout;
+ unsigned long irqflags;
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("expire_suspend_blockers: start\n");
+ spin_lock_irqsave(&list_lock, irqflags);
+ if (debug_mask & DEBUG_SUSPEND)
+ print_active_suspend_blockers();
+ timeout = max_suspend_blocker_timeout_locked();
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("expire_suspend_blockers: done, timeout %ld\n",
+ timeout);
+ if (timeout == 0)
+ queue_work(pm_wq, &suspend_work);
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+static DEFINE_TIMER(expire_timer, expire_suspend_blockers, 0, 0);
+
+static void update_suspend(struct suspend_blocker *blocker, long max_timeout)
+{
+ if (max_timeout > 0) {
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("suspend_blocker: %s, start expire timer, "
+ "%ld\n", blocker->name, max_timeout);
+ mod_timer(&expire_timer, jiffies + max_timeout);
+ } else {
+ if (del_timer(&expire_timer))
+ if (debug_mask & DEBUG_EXPIRE)
+ pr_info("suspend_blocker: %s, stop expire "
+ "timer\n", blocker->name);
+ if (max_timeout == 0)
+ queue_work(pm_wq, &suspend_work);
+ }
+}
+
+/**
+ * suspend_blocker_register - Prepare a suspend blocker for being used.
+ * @blocker: Suspend blocker to handle.
+ *
+ * The suspend blocker struct and name must not be freed before calling
+ * suspend_blocker_unregister().
+ */
+void suspend_blocker_register(struct suspend_blocker *blocker)
+{
+ unsigned long irqflags = 0;
+
+ WARN_ON(!blocker->name);
+
+ if (debug_mask & DEBUG_SUSPEND_BLOCKER)
+ pr_info("%s: Registering %s\n", __func__, blocker->name);
+
+ suspend_blocker_stat_init(&blocker->stat);
+
+ blocker->flags = SB_INITIALIZED;
+ INIT_LIST_HEAD(&blocker->link);
+
+ spin_lock_irqsave(&list_lock, irqflags);
+ list_add(&blocker->link, &inactive_blockers);
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+EXPORT_SYMBOL(suspend_blocker_register);
+
+/**
+ * suspend_blocker_init - Initialize a suspend blocker's name and register it.
+ * @blocker: Suspend blocker to initialize.
+ * @name: The name of the suspend blocker to show in debug messages and
+ * /sys/kernel/debug/suspend_blockers.
+ *
+ * The suspend blocker struct and name must not be freed before calling
+ * suspend_blocker_unregister().
+ */
+void suspend_blocker_init(struct suspend_blocker *blocker, const char *name)
+{
+ blocker->name = name;
+ suspend_blocker_register(blocker);
+}
+EXPORT_SYMBOL(suspend_blocker_init);
+
+/**
+ * suspend_blocker_unregister - Unregister a suspend blocker.
+ * @blocker: Suspend blocker to handle.
+ */
+void suspend_blocker_unregister(struct suspend_blocker *blocker)
+{
+ unsigned long irqflags;
+
+ if (WARN_ON(!(blocker->flags & SB_INITIALIZED)))
+ return;
+
+ spin_lock_irqsave(&list_lock, irqflags);
+
+ suspend_unblock_stat(blocker, false);
+ suspend_blocker_stat_drop(&blocker->stat);
+
+ blocker->flags &= ~SB_INITIALIZED;
+ list_del(&blocker->link);
+ if (blocker->flags & SB_ACTIVE)
+ update_suspend(blocker, max_suspend_blocker_timeout_locked());
+ spin_unlock_irqrestore(&list_lock, irqflags);
+
+ if (debug_mask & DEBUG_SUSPEND_BLOCKER)
+ pr_info("%s: Unregistered %s\n", __func__, blocker->name);
+}
+EXPORT_SYMBOL(suspend_blocker_unregister);
+
+static void __suspend_block(struct suspend_blocker *blocker, long timeout,
+ bool has_timeout)
+{
+ unsigned long irqflags;
+
+ if (WARN_ON(!(blocker->flags & SB_INITIALIZED)))
+ return;
+
+ spin_lock_irqsave(&list_lock, irqflags);
+
+ suspend_block_stat(blocker);
+
+ blocker->flags |= SB_ACTIVE;
+ if (has_timeout) {
+ if (debug_mask & DEBUG_SUSPEND_BLOCKER)
+ pr_info("suspend_block: %s, timeout %ld.%03lu\n",
+ blocker->name, timeout / HZ,
+ (timeout % HZ) * MSEC_PER_SEC / HZ);
+
+ blocker->expires = jiffies + timeout;
+ blocker->flags |= SB_AUTO_EXPIRE;
+ list_move_tail(&blocker->link, &active_blockers);
+ } else {
+ if (debug_mask & DEBUG_SUSPEND_BLOCKER)
+ pr_info("suspend_block: %s\n", blocker->name);
+
+ blocker->expires = LONG_MAX;
+ blocker->flags &= ~SB_AUTO_EXPIRE;
+ /* Add to head so suspend_is_blocked only has to examine */
+ /* one entry */
+ list_move(&blocker->link, &active_blockers);
+ }
+
+ current_event_num++;
+
+ if (blocker == &main_suspend_blocker)
+ update_sleep_wait_stats(true);
+ else if (!suspend_blocker_is_active(&main_suspend_blocker))
+ update_sleep_wait_stats(false);
+ update_suspend(blocker, has_timeout ?
+ max_suspend_blocker_timeout_locked() : -1);
+
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+
+/**
+ * suspend_block - Block system suspend.
+ * @blocker: Suspend blocker to use.
+ *
+ * It is safe to call this function from interrupt context.
+ */
+void suspend_block(struct suspend_blocker *blocker)
+{
+ __suspend_block(blocker, 0, false);
+}
+EXPORT_SYMBOL(suspend_block);
+
+/**
+ * suspend_block_timeout - Block system suspend for a limited time
+ * @blocker: Suspend blocker to use.
+ * @timeout: Timeout in jiffies before the suspend blocker auto-unblock
+ */
+void suspend_block_timeout(struct suspend_blocker *blocker, long timeout)
+{
+ __suspend_block(blocker, timeout, true);
+}
+EXPORT_SYMBOL(suspend_block_timeout);
+
+/**
+ * suspend_unblock - Allow system suspend to happen.
+ * @blocker: Suspend blocker to unblock.
+ *
+ * If no other suspend blockers are active, schedule suspend of the system.
+ *
+ * It is safe to call this function from interrupt context.
+ */
+void suspend_unblock(struct suspend_blocker *blocker)
+{
+ unsigned long irqflags;
+
+ if (WARN_ON(!(blocker->flags & SB_INITIALIZED)))
+ return;
+
+ spin_lock_irqsave(&list_lock, irqflags);
+
+ if (debug_mask & DEBUG_SUSPEND_BLOCKER)
+ pr_info("%s: %s\n", __func__, blocker->name);
+
+ suspend_unblock_stat(blocker, false);
+
+ blocker->flags &= ~(SB_ACTIVE | SB_AUTO_EXPIRE);
+ list_move(&blocker->link, &inactive_blockers);
+ update_suspend(blocker, max_suspend_blocker_timeout_locked());
+
+ if (blocker == &main_suspend_blocker) {
+ if (debug_mask & DEBUG_SUSPEND)
+ print_active_suspend_blockers();
+
+ update_sleep_wait_stats(false);
+ }
+
+ spin_unlock_irqrestore(&list_lock, irqflags);
+}
+EXPORT_SYMBOL(suspend_unblock);
+
+/**
+ * suspend_blocker_is_active - Test if a suspend blocker is blocking suspend
+ * @blocker: Suspend blocker to check.
+ *
+ * Returns true if the suspend_blocker is currently active.
+ *
+ * If the suspend_blocker has a timeout, it does not check the timeout, but if
+ * the timeout had already expired when it was checked elsewhere this function
+ * will return false.
+ */
+bool suspend_blocker_is_active(struct suspend_blocker *blocker)
+{
+ WARN_ON(!(blocker->flags & SB_INITIALIZED));
+
+ return !!(blocker->flags & SB_ACTIVE);
+}
+EXPORT_SYMBOL(suspend_blocker_is_active);
+
+bool opportunistic_suspend_valid_state(suspend_state_t state)
+{
+ return (state == PM_SUSPEND_ON) || valid_state(state);
+}
+
+int opportunistic_suspend_state(suspend_state_t state)
+{
+ unsigned long irqflags;
+
+ if (!opportunistic_suspend_valid_state(state))
+ return -ENODEV;
+
+ spin_lock_irqsave(&state_lock, irqflags);
+
+ if (debug_mask & DEBUG_USER_STATE)
+ pr_info_time("%s: %s (%d->%d) at %lld ", __func__,
+ state != PM_SUSPEND_ON ? "sleep" : "wakeup",
+ requested_suspend_state, state,
+ ktime_to_ns(ktime_get()));
+
+ requested_suspend_state = state;
+#ifdef CONFIG_EARLYSUSPEND
+ request_early_suspend_state(state == PM_SUSPEND_ON);
+#else
+ if (state == PM_SUSPEND_ON)
+ suspend_block(&main_suspend_blocker);
+ else
+ suspend_unblock(&main_suspend_blocker);
+#endif
+
+ spin_unlock_irqrestore(&state_lock, irqflags);
+
+ return 0;
+}
+
+void __init opportunistic_suspend_init(void)
+{
+ suspend_blocker_register(&main_suspend_blocker);
+ suspend_block(&main_suspend_blocker);
+ suspend_blocker_register(&unknown_wakeup);
+ init_dropped_suspend_blockers();
+}
+
+static struct dentry *suspend_blocker_stats_dentry;
+
+#ifdef CONFIG_SUSPEND_BLOCKER_STATS
+static int print_blocker_stats(struct seq_file *m, const char *name,
+ struct suspend_blocker_stats *stat,
+ struct suspend_blocker *blocker)
+{
+ int lock_count = stat->count;
+ int expire_count = stat->expire_count;
+ ktime_t active_time = ktime_set(0, 0);
+ ktime_t total_time = stat->total_time;
+ ktime_t max_time = stat->max_time;
+ ktime_t prevent_suspend_time = stat->prevent_suspend_time;
+
+ if (blocker && blocker->flags & SB_ACTIVE) {
+ ktime_t now, add_time;
+ bool expired = stats_get_expired_time(blocker, &now);
+ if (!expired)
+ now = ktime_get();
+ add_time = ktime_sub(now, stat->last_time);
+ lock_count++;
+ if (!expired)
+ active_time = add_time;
+ else
+ expire_count++;
+ total_time = ktime_add(total_time, add_time);
+ if (blocker->flags & SB_PREVENTING_SUSPEND)
+ prevent_suspend_time = ktime_add(prevent_suspend_time,
+ ktime_sub(now, last_sleep_time_update));
+ if (add_time.tv64 > max_time.tv64)
+ max_time = add_time;
+ }
+
+ return seq_printf(m, "\"%s\"\t%d\t%d\t%d\t%lld\t%lld\t%lld\t%lld\t"
+ "%lld\n", name, lock_count, expire_count,
+ stat->wakeup_count, ktime_to_ns(active_time),
+ ktime_to_ns(total_time),
+ ktime_to_ns(prevent_suspend_time),
+ ktime_to_ns(max_time),
+ ktime_to_ns(stat->last_time));
+}
+
+static int suspend_blocker_stats_show(struct seq_file *m, void *unused)
+{
+ unsigned long irqflags;
+ struct suspend_blocker *blocker;
+
+ seq_puts(m, "name\tcount\texpire_count\twake_count\tactive_since"
+ "\ttotal_time\tsleep_time\tmax_time\tlast_change\n");
+
+ spin_lock_irqsave(&list_lock, irqflags);
+ list_for_each_entry(blocker, &active_blockers, link)
+ print_blocker_stats(m,
+ blocker->name, &blocker->stat, blocker);
+
+ list_for_each_entry(blocker, &inactive_blockers, link)
+ print_blocker_stats(m,
+ blocker->name, &blocker->stat, blocker);
+
+ print_blocker_stats(m, "deleted", &dropped_suspend_blockers, 0);
+ spin_unlock_irqrestore(&list_lock, irqflags);
+ return 0;
+}
+
+#else
+
+static int suspend_blocker_stats_show(struct seq_file *m, void *unused)
+{
+ unsigned long irqflags;
+ struct suspend_blocker *blocker;
+
+ seq_puts(m, "name\tactive\n");
+ spin_lock_irqsave(&list_lock, irqflags);
+ list_for_each_entry(blocker, &inactive_blockers, link)
+ seq_printf(m, "\"%s\"\t0\n", blocker->name);
+ list_for_each_entry(blocker, &active_blockers, link)
+ seq_printf(m, "\"%s\"\t1\n", blocker->name);
+ spin_unlock_irqrestore(&list_lock, irqflags);
+ return 0;
+}
+
+#endif
+
+static int suspend_blocker_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, suspend_blocker_stats_show, NULL);
+}
+
+static const struct file_operations suspend_blocker_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = suspend_blocker_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init suspend_blocker_debugfs_init(void)
+{
+ suspend_blocker_stats_dentry = debugfs_create_file("suspend_blockers",
+ S_IRUGO, NULL, NULL, &suspend_blocker_stats_fops);
+ return 0;
+}
+
+postcore_initcall(suspend_blocker_debugfs_init);
+
+static void suspend_blocking_work_complete(struct suspend_blocking_work *work)
+{
+ unsigned long flags;
+
+ WARN_ON(!work->active);
+ spin_lock_irqsave(&work->lock, flags);
+ if (!--work->active)
+ suspend_unblock(&work->suspend_blocker);
+ spin_unlock_irqrestore(&work->lock, flags);
+}
+
+static void suspend_blocking_work_func(struct work_struct *work)
+{
+ struct suspend_blocking_work *sbwork = to_suspend_blocking_work(work);
+
+ sbwork->func(work);
+ suspend_blocking_work_complete(sbwork);
+}
+
+/**
+ * suspend_blocking_work_init - Initialize a suspend-blocking work item.
+ * @work: Work item to initialize.
+ * @func: Callback.
+ * @name: Name for suspend blocker.
+ *
+ */
+void suspend_blocking_work_init(struct suspend_blocking_work *work,
+ work_func_t func, const char *name)
+{
+ INIT_WORK(&work->work, suspend_blocking_work_func);
+ suspend_blocker_init(&work->suspend_blocker, name);
+ work->func = func;
+ spin_lock_init(&work->lock);
+ work->active = 0;
+}
+EXPORT_SYMBOL_GPL(suspend_blocking_work_init);
+
+/**
+ * cancel_suspend_blocking_work_sync - Cancel a suspend-blocking work item.
+ * @work: Work item to handle.
+ */
+int cancel_suspend_blocking_work_sync(struct suspend_blocking_work *work)
+{
+ int ret;
+
+ ret = cancel_work_sync(&work->work);
+ if (ret)
+ suspend_blocking_work_complete(work);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cancel_suspend_blocking_work_sync);
+
+/**
+ * suspend_blocking_work_destroy - Destroy a suspend-blocking work item.
+ * @work: The work item in question.
+ *
+ * If the work was ever queued on more then one workqueue all but the last
+ * workqueue must be flushed before calling suspend_blocking_work_destroy.
+ */
+void suspend_blocking_work_destroy(struct suspend_blocking_work *work)
+{
+ cancel_suspend_blocking_work_sync(work);
+ WARN_ON(work->active);
+ suspend_blocker_unregister(&work->suspend_blocker);
+}
+EXPORT_SYMBOL_GPL(suspend_blocking_work_destroy);
+
+/**
+ * queue_suspend_blocking_work - Queue a suspend-blocking work item.
+ * @wq: Workqueue to queue the work on.
+ * @work: Work item to queue.
+ */
+int queue_suspend_blocking_work(struct workqueue_struct *wq,
+ struct suspend_blocking_work *work)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&work->lock, flags);
+ ret = queue_work(wq, &work->work);
+ if (ret) {
+ suspend_block(&work->suspend_blocker);
+ work->active++;
+ }
+ spin_unlock_irqrestore(&work->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(queue_suspend_blocking_work);
+
+/**
+ * schedule_suspend_blocking_work - Schedule a suspend-blocking work item.
+ * @work: Work item to schedule.
+ */
+int schedule_suspend_blocking_work(struct suspend_blocking_work *work)
+{
+ int ret;
+ unsigned long flags;
+
+ spin_lock_irqsave(&work->lock, flags);
+ ret = schedule_work(&work->work);
+ if (ret) {
+ suspend_block(&work->suspend_blocker);
+ work->active++;
+ }
+ spin_unlock_irqrestore(&work->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(schedule_suspend_blocking_work);
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 46c5a26630a..9cc06c469b9 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -236,3 +236,32 @@ static inline void suspend_thaw_processes(void)
{
}
#endif
+
+#ifdef CONFIG_OPPORTUNISTIC_SUSPEND
+/* kernel/power/opportunistic_suspend.c */
+extern struct suspend_blocker main_suspend_blocker;
+extern int opportunistic_suspend_state(suspend_state_t state);
+extern bool opportunistic_suspend_valid_state(suspend_state_t state);
+extern void __init opportunistic_suspend_init(void);
+#else
+static inline void opportunistic_suspend_init(void) {}
+#endif
+#ifdef CONFIG_SUSPEND_BLOCKER_STATS
+void about_to_enter_suspend(void);
+#else
+static inline void about_to_enter_suspend(void) {}
+#endif
+
+#ifdef CONFIG_USER_WAKELOCK
+ssize_t wake_lock_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+ssize_t wake_lock_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n);
+ssize_t wake_unlock_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf);
+ssize_t wake_unlock_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n);
+#endif
+
+/* kernel/power/earlysuspend.c */
+void request_early_suspend_state(bool on);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 71ae29052ab..27d26d3eb74 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -38,6 +38,7 @@ static int try_to_freeze_tasks(bool sig_only)
struct timeval start, end;
u64 elapsed_csecs64;
unsigned int elapsed_csecs;
+ bool wakeup = false;
do_gettimeofday(&start);
@@ -63,6 +64,10 @@ static int try_to_freeze_tasks(bool sig_only)
todo++;
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
+ if (todo && suspend_is_blocked()) {
+ wakeup = true;
+ break;
+ }
if (!todo || time_after(jiffies, end_time))
break;
@@ -85,13 +90,15 @@ static int try_to_freeze_tasks(bool sig_only)
* but it cleans up leftover PF_FREEZE requests.
*/
printk("\n");
- printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
+ printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
"(%d tasks refusing to freeze):\n",
+ wakeup ? "aborted" : "failed",
elapsed_csecs / 100, elapsed_csecs % 100, todo);
read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
- if (freezing(p) && !freezer_should_skip(p))
+ if (freezing(p) && !freezer_should_skip(p)
+ && elapsed_csecs > 100)
sched_show_task(p);
cancel_freezing(p);
task_unlock(p);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 56e7dbb8b99..df694e7efa4 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -20,6 +20,7 @@
#include "power.h"
const char *const pm_states[PM_SUSPEND_MAX] = {
+ [PM_SUSPEND_ON] = "on",
[PM_SUSPEND_STANDBY] = "standby",
[PM_SUSPEND_MEM] = "mem",
};
@@ -157,8 +158,10 @@ static int suspend_enter(suspend_state_t state)
error = sysdev_suspend(PMSG_SUSPEND);
if (!error) {
- if (!suspend_test(TEST_CORE))
+ if (!suspend_is_blocked() && !suspend_test(TEST_CORE)) {
+ about_to_enter_suspend();
error = suspend_ops->enter(state);
+ }
sysdev_resume();
}
diff --git a/kernel/power/user_suspend_blocker.c b/kernel/power/user_suspend_blocker.c
new file mode 100644
index 00000000000..5d3c7b81f36
--- /dev/null
+++ b/kernel/power/user_suspend_blocker.c
@@ -0,0 +1,168 @@
+/*
+ * kernel/power/user_suspend_blocker.c
+ *
+ * Copyright (C) 2009-2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/suspend_ioctls.h>
+
+enum {
+ DEBUG_FAILURE = BIT(0),
+};
+static int debug_mask = DEBUG_FAILURE;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static int unclean_exit_grace_period;
+module_param_named(unclean_exit_grace_period, unclean_exit_grace_period, int,
+ S_IRUGO | S_IWUSR | S_IWGRP);
+
+static DEFINE_MUTEX(ioctl_lock);
+static struct suspend_blocker unclean_exit_suspend_blocker;
+
+#define USER_SUSPEND_BLOCKER_NAME_LEN 31
+
+struct user_suspend_blocker {
+ struct suspend_blocker blocker;
+ char name[USER_SUSPEND_BLOCKER_NAME_LEN + 1];
+ bool registered;
+};
+
+static int user_suspend_blocker_open(struct inode *inode, struct file *filp)
+{
+ struct user_suspend_blocker *blocker;
+
+ blocker = kzalloc(sizeof(*blocker), GFP_KERNEL);
+ if (!blocker)
+ return -ENOMEM;
+
+ nonseekable_open(inode, filp);
+ strcpy(blocker->name, "(userspace)");
+ blocker->blocker.name = blocker->name;
+ filp->private_data = blocker;
+
+ return 0;
+}
+
+static int suspend_blocker_set_name(struct user_suspend_blocker *blocker,
+ void __user *name, size_t name_len)
+{
+ if (blocker->registered)
+ return -EBUSY;
+
+ if (name_len > USER_SUSPEND_BLOCKER_NAME_LEN)
+ name_len = USER_SUSPEND_BLOCKER_NAME_LEN;
+
+ if (copy_from_user(blocker->name, name, name_len))
+ return -EFAULT;
+ blocker->name[name_len] = '\0';
+
+ return 0;
+}
+
+static long user_suspend_blocker_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long _arg)
+{
+ void __user *arg = (void __user *)_arg;
+ struct user_suspend_blocker *blocker = filp->private_data;
+ struct timespec ts;
+ unsigned long timeout;
+ long ret = 0;
+
+ mutex_lock(&ioctl_lock);
+ if ((cmd & ~IOCSIZE_MASK) == SUSPEND_BLOCKER_IOCTL_SET_NAME(0)) {
+ ret = suspend_blocker_set_name(blocker, arg, _IOC_SIZE(cmd));
+ goto done;
+ }
+ if (!blocker->registered) {
+ suspend_blocker_register(&blocker->blocker);
+ blocker->registered = true;
+ }
+ switch (cmd) {
+ case SUSPEND_BLOCKER_IOCTL_BLOCK:
+ suspend_block(&blocker->blocker);
+ break;
+
+ case SUSPEND_BLOCKER_IOCTL_BLOCK_TIMEOUT:
+ if (copy_from_user(&ts, arg, sizeof(ts))) {
+ ret = -EFAULT;
+ goto done;
+ }
+ timeout = timespec_to_jiffies(&ts);
+ suspend_block_timeout(&blocker->blocker, timeout);
+ ret = 0;
+ break;
+
+ case SUSPEND_BLOCKER_IOCTL_UNBLOCK:
+ suspend_unblock(&blocker->blocker);
+ break;
+
+ default:
+ ret = -ENOTTY;
+ }
+done:
+ if (ret && (debug_mask & DEBUG_FAILURE))
+ pr_err("user_suspend_blocker_ioctl: cmd %x failed, %ld\n",
+ cmd, ret);
+ mutex_unlock(&ioctl_lock);
+ return ret;
+}
+
+static int user_suspend_blocker_release(struct inode *inode, struct file *filp)
+{
+ struct user_suspend_blocker *blocker = filp->private_data;
+
+ if (suspend_blocker_is_active(&blocker->blocker) &&
+ unclean_exit_grace_period)
+ suspend_block_timeout(&unclean_exit_suspend_blocker,
+ unclean_exit_grace_period * HZ);
+
+ if (blocker->registered)
+ suspend_blocker_unregister(&blocker->blocker);
+ kfree(blocker);
+
+ return 0;
+}
+
+const struct file_operations user_suspend_blocker_fops = {
+ .open = user_suspend_blocker_open,
+ .release = user_suspend_blocker_release,
+ .unlocked_ioctl = user_suspend_blocker_ioctl,
+};
+
+struct miscdevice user_suspend_blocker_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "suspend_blocker",
+ .fops = &user_suspend_blocker_fops,
+};
+
+static int __init user_suspend_blocker_init(void)
+{
+ suspend_blocker_init(&unclean_exit_suspend_blocker,
+ "user-unclean-exit");
+ return misc_register(&user_suspend_blocker_device);
+}
+
+static void __exit user_suspend_blocker_exit(void)
+{
+ misc_deregister(&user_suspend_blocker_device);
+ suspend_blocker_unregister(&unclean_exit_suspend_blocker);
+}
+
+module_init(user_suspend_blocker_init);
+module_exit(user_suspend_blocker_exit);
diff --git a/kernel/power/userwakelock.c b/kernel/power/userwakelock.c
new file mode 100644
index 00000000000..34af809c7e5
--- /dev/null
+++ b/kernel/power/userwakelock.c
@@ -0,0 +1,219 @@
+/* kernel/power/userwakelock.c
+ *
+ * Copyright (C) 2005-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+#include "power.h"
+
+enum {
+ DEBUG_FAILURE = BIT(0),
+ DEBUG_ERROR = BIT(1),
+ DEBUG_NEW = BIT(2),
+ DEBUG_ACCESS = BIT(3),
+ DEBUG_LOOKUP = BIT(4),
+};
+static int debug_mask = DEBUG_FAILURE;
+module_param_named(debug_mask, debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static DEFINE_MUTEX(tree_lock);
+
+struct user_wake_lock {
+ struct rb_node node;
+ struct wake_lock wake_lock;
+ char name[0];
+};
+struct rb_root user_wake_locks;
+
+static struct user_wake_lock *lookup_wake_lock_name(
+ const char *buf, int allocate, long *timeoutptr)
+{
+ struct rb_node **p = &user_wake_locks.rb_node;
+ struct rb_node *parent = NULL;
+ struct user_wake_lock *l;
+ int diff;
+ u64 timeout;
+ int name_len;
+ const char *arg;
+
+ /* Find length of lock name and start of optional timeout string */
+ arg = buf;
+ while (*arg && !isspace(*arg))
+ arg++;
+ name_len = arg - buf;
+ if (!name_len)
+ goto bad_arg;
+ while (isspace(*arg))
+ arg++;
+
+ /* Process timeout string */
+ if (timeoutptr && *arg) {
+ timeout = simple_strtoull(arg, (char **)&arg, 0);
+ while (isspace(*arg))
+ arg++;
+ if (*arg)
+ goto bad_arg;
+ /* convert timeout from nanoseconds to jiffies > 0 */
+ timeout += (NSEC_PER_SEC / HZ) - 1;
+ do_div(timeout, (NSEC_PER_SEC / HZ));
+ if (timeout <= 0)
+ timeout = 1;
+ *timeoutptr = timeout;
+ } else if (*arg)
+ goto bad_arg;
+ else if (timeoutptr)
+ *timeoutptr = 0;
+
+ /* Lookup wake lock in rbtree */
+ while (*p) {
+ parent = *p;
+ l = rb_entry(parent, struct user_wake_lock, node);
+ diff = strncmp(buf, l->name, name_len);
+ if (!diff && l->name[name_len])
+ diff = -1;
+ if (debug_mask & DEBUG_ERROR)
+ pr_info("lookup_wake_lock_name: compare %.*s %s %d\n",
+ name_len, buf, l->name, diff);
+
+ if (diff < 0)
+ p = &(*p)->rb_left;
+ else if (diff > 0)
+ p = &(*p)->rb_right;
+ else
+ return l;
+ }
+
+ /* Allocate and add new wakelock to rbtree */
+ if (!allocate) {
+ if (debug_mask & DEBUG_ERROR)
+ pr_info("lookup_wake_lock_name: %.*s not found\n",
+ name_len, buf);
+ return ERR_PTR(-EINVAL);
+ }
+ l = kzalloc(sizeof(*l) + name_len + 1, GFP_KERNEL);
+ if (l == NULL) {
+ if (debug_mask & DEBUG_FAILURE)
+ pr_err("lookup_wake_lock_name: failed to allocate "
+ "memory for %.*s\n", name_len, buf);
+ return ERR_PTR(-ENOMEM);
+ }
+ memcpy(l->name, buf, name_len);
+ if (debug_mask & DEBUG_NEW)
+ pr_info("lookup_wake_lock_name: new wake lock %s\n", l->name);
+ wake_lock_init(&l->wake_lock, WAKE_LOCK_SUSPEND, l->name);
+ rb_link_node(&l->node, parent, p);
+ rb_insert_color(&l->node, &user_wake_locks);
+ return l;
+
+bad_arg:
+ if (debug_mask & DEBUG_ERROR)
+ pr_info("lookup_wake_lock_name: wake lock, %.*s, bad arg, %s\n",
+ name_len, buf, arg);
+ return ERR_PTR(-EINVAL);
+}
+
+ssize_t wake_lock_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ char *end = buf + PAGE_SIZE;
+ struct rb_node *n;
+ struct user_wake_lock *l;
+
+ mutex_lock(&tree_lock);
+
+ for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) {
+ l = rb_entry(n, struct user_wake_lock, node);
+ if (wake_lock_active(&l->wake_lock))
+ s += scnprintf(s, end - s, "%s ", l->name);
+ }
+ s += scnprintf(s, end - s, "\n");
+
+ mutex_unlock(&tree_lock);
+ return (s - buf);
+}
+
+ssize_t wake_lock_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ long timeout;
+ struct user_wake_lock *l;
+
+ mutex_lock(&tree_lock);
+ l = lookup_wake_lock_name(buf, 1, &timeout);
+ if (IS_ERR(l)) {
+ n = PTR_ERR(l);
+ goto bad_name;
+ }
+
+ if (debug_mask & DEBUG_ACCESS)
+ pr_info("wake_lock_store: %s, timeout %ld\n", l->name, timeout);
+
+ if (timeout)
+ wake_lock_timeout(&l->wake_lock, timeout);
+ else
+ wake_lock(&l->wake_lock);
+bad_name:
+ mutex_unlock(&tree_lock);
+ return n;
+}
+
+
+ssize_t wake_unlock_show(
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ char *s = buf;
+ char *end = buf + PAGE_SIZE;
+ struct rb_node *n;
+ struct user_wake_lock *l;
+
+ mutex_lock(&tree_lock);
+
+ for (n = rb_first(&user_wake_locks); n != NULL; n = rb_next(n)) {
+ l = rb_entry(n, struct user_wake_lock, node);
+ if (!wake_lock_active(&l->wake_lock))
+ s += scnprintf(s, end - s, "%s ", l->name);
+ }
+ s += scnprintf(s, end - s, "\n");
+
+ mutex_unlock(&tree_lock);
+ return (s - buf);
+}
+
+ssize_t wake_unlock_store(
+ struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ struct user_wake_lock *l;
+
+ mutex_lock(&tree_lock);
+ l = lookup_wake_lock_name(buf, 0, NULL);
+ if (IS_ERR(l)) {
+ n = PTR_ERR(l);
+ goto not_found;
+ }
+
+ if (debug_mask & DEBUG_ACCESS)
+ pr_info("wake_unlock_store: %s\n", l->name);
+
+ wake_unlock(&l->wake_lock);
+not_found:
+ mutex_unlock(&tree_lock);
+ return n;
+}
+
diff --git a/kernel/power/wakelocktest.c b/kernel/power/wakelocktest.c
new file mode 100644
index 00000000000..d806d2501e0
--- /dev/null
+++ b/kernel/power/wakelocktest.c
@@ -0,0 +1,239 @@
+/* kernel/power/userwakelock.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/hrtimer.h>
+#include <linux/ktime.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/pm_qos_params.h>
+#include <linux/uaccess.h>
+#include <linux/wakelock.h>
+
+static DEFINE_MUTEX(ioctl_lock);
+static struct wake_lock test_wake_lock;
+static DEFINE_SPINLOCK(test_spin_lock);
+static atomic_t test_atomic;
+static atomic_t test_atomic2;
+
+static void test_wake_lock1(int count, void *arg)
+{
+ while (count-- > 0)
+ wake_lock(&test_wake_lock);
+}
+
+static void test_wake_unlock(int count, void *arg)
+{
+ while (count-- > 0)
+ wake_unlock(&test_wake_lock);
+}
+
+static void test_wake_lock_unlock(int count, void *arg)
+{
+ while (count-- > 0) {
+ wake_lock(&test_wake_lock);
+ wake_unlock(&test_wake_lock);
+ }
+}
+
+static void test_has_wake_lock(int count, void *arg)
+{
+ while (count-- > 0)
+ has_wake_lock(WAKE_LOCK_IDLE);
+}
+
+static void test_atomic_inc(int count, void *arg)
+{
+ while (count-- > 0)
+ atomic_inc(&test_atomic);
+}
+
+static void atomic_lock(void)
+{
+ if (atomic_cmpxchg(&test_atomic2, 0, 1) == 0)
+ atomic_inc(&test_atomic);
+}
+
+static void atomic_unlock(void)
+{
+ if (atomic_cmpxchg(&test_atomic2, 1, 0) == 1)
+ if (atomic_dec_return(&test_atomic) == -1)
+ pr_info("atomic_unlock schedule sleep\n");
+}
+
+static void test_atomic_lock(int count, void *arg)
+{
+ while (count-- > 0)
+ atomic_lock();
+}
+
+static void test_atomic_unlock(int count, void *arg)
+{
+ while (count-- > 0)
+ atomic_unlock();
+}
+
+static void test_atomic_lock_unlock(int count, void *arg)
+{
+ while (count-- > 0) {
+ atomic_lock();
+ atomic_unlock();
+ }
+}
+
+static void test_irq_save_restore(int count, void *arg)
+{
+ unsigned long flags;
+ while (count-- > 0) {
+ local_irq_save(flags);
+ local_irq_restore(flags);
+ }
+}
+
+static void test_preempt_disable_enable(int count, void *arg)
+{
+ while (count-- > 0) {
+ preempt_disable();
+ preempt_enable();
+ }
+}
+
+static void test_spin_lock_unlock_irqsave(int count, void *arg)
+{
+ unsigned long irqflags;
+ while (count-- > 0) {
+ spin_lock_irqsave(&test_spin_lock, irqflags);
+ spin_unlock_irqrestore(&test_spin_lock, irqflags);
+ }
+}
+
+static void test_pm_qos_update_no_change(int count, void *arg)
+{
+ char *qos_name = arg;
+ while (count-- > 0)
+ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, qos_name, 0);
+}
+
+static void test_pm_qos_update_pair(int count, void *arg)
+{
+ char *qos_name = arg;
+ while (count-- > 0) {
+ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, qos_name, 1);
+ pm_qos_update_requirement(PM_QOS_CPU_DMA_LATENCY, qos_name, 0);
+ }
+}
+
+
+static struct {
+ const char *name;
+ void (*func)(int count, void *arg);
+ void *arg;
+} test_func[] = {
+ { "test_wake_lock", test_wake_lock1 },
+ { "test_wake_unlock", test_wake_unlock },
+ { "test_wake_lock_unlock", test_wake_lock_unlock },
+ { "test_has_wake_lock", test_has_wake_lock },
+ { "test_atomic_inc", test_atomic_inc },
+ { "test_atomic_lock", test_atomic_lock },
+ { "test_atomic_unlock", test_atomic_unlock },
+ { "test_atomic_lock_unlock", test_atomic_lock_unlock },
+ { "test_irq_save_restore", test_irq_save_restore },
+ { "test_preempt_disable_enable", test_preempt_disable_enable },
+ { "test_spin_lock_unlock_irqsave", test_spin_lock_unlock_irqsave },
+ { "test_pm_qos_update_no_change test", test_pm_qos_update_no_change, "test" },
+ { "test_pm_qos_update_pair test", test_pm_qos_update_pair, "test" },
+ { "test_pm_qos_update_no_change test0", test_pm_qos_update_no_change, "test0" },
+ { "test_pm_qos_update_pair test0", test_pm_qos_update_pair, "test0" },
+ { "test_pm_qos_update_no_change test9", test_pm_qos_update_no_change, "test9" },
+ { "test_pm_qos_update_pair test9", test_pm_qos_update_pair, "test9" },
+};
+
+
+static ssize_t test_wake_lock_write(
+ struct file *file, const char __user *buf, size_t len, loff_t *off)
+{
+ int i;
+ int j;
+ int count;
+ ktime_t t1, t2, td;
+ u64 ti;
+ char string[256];
+
+ if (len >= sizeof(string))
+ len = sizeof(string) - 1;
+ if(copy_from_user(string, buf, len))
+ return -EFAULT;
+ string[len] = '\0';
+ count = simple_strtol(string, NULL, 0);
+
+ for(j = 0; j < 2; j++) {
+ if (j == 0)
+ pr_info("running test with interrupts enabled\n");
+ else if (j == 1) {
+ pr_info("running test with interrupts disabled\n");
+ local_irq_disable();
+ }
+ for (i = 0; i < ARRAY_SIZE(test_func); i++) {
+ t1 = ktime_get();
+ test_func[i].func(count, test_func[i].arg);
+ t2 = ktime_get();
+ td = ktime_sub(t2, t1);
+ ti = ktime_divns(td, count);
+ pr_info("%-35s: %7d iterations in %11lld ns, "
+ "%6lld ns per iteration\n",
+ test_func[i].name, count, ktime_to_ns(td), ti);
+ }
+ if (j == 1)
+ local_irq_enable();
+ }
+ return len;
+}
+
+static const struct file_operations test_wakelock_fops = {
+ .write = test_wake_lock_write
+};
+
+static struct miscdevice test_wakelock_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "wakelocktest",
+ .fops = &test_wakelock_fops,
+};
+
+static int __init test_wakelock_init(void)
+{
+ int i;
+ char qos_name[] = "test?";
+ wake_lock_init(&test_wake_lock, WAKE_LOCK_SUSPEND, "test-wake-lock");
+ for (i = 0; i < 10; i++) {
+ qos_name[4] = '0' + i;
+ pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, qos_name, 0);
+ }
+ return misc_register(&test_wakelock_device);
+}
+
+static void __exit test_wakelock_exit(void)
+{
+ int i;
+ char qos_name[] = "test?";
+ misc_deregister(&test_wakelock_device);
+ for (i = 0; i < 10; i++) {
+ qos_name[4] = '0' + i;
+ pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, qos_name);
+ }
+ wake_lock_destroy(&test_wake_lock);
+}
+
+module_init(test_wakelock_init);
+module_exit(test_wakelock_exit);