summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKelly Rossmoyer <krossmo@google.com>2020-04-07 12:25:33 -0700
committerAlistair Delva <adelva@google.com>2020-04-09 15:28:36 +0000
commit8c29afa6013897f201e7188d27eb2c60564070f9 (patch)
treeb4de19880bd3addccecebddb99471b6cfb75ad6e
parent3463750c5ba30e0fd1d2f10805568e569a6cb244 (diff)
downloadgoldfish-android-4.14.tar.gz
ANDROID: power: wakeup_reason: wake reason enhancementsb120914098android-4.14
These changes build upon the existing Android kernel wakeup reason code to: * improve the positioning of suspend abort logging calls in suspend flow * add logging of abnormal wakeup reasons like unexpected HW IRQs and IRQs configured as both wake-enabled and no-suspend * add support for capturing deferred-processing threaded nested IRQs as wakeup reasons rather than their synchronously-processed parents Bug: 150970830 Bug: 140217217 Signed-off-by: Kelly Rossmoyer <krossmo@google.com> Change-Id: I903b811a0fe11a605a25815c3a341668a23de700
-rw-r--r--drivers/base/power/main.c24
-rw-r--r--drivers/base/power/wakeup.c23
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--include/linux/wakeup_reason.h11
-rw-r--r--kernel/irq/chip.c17
-rw-r--r--kernel/power/process.c9
-rw-r--r--kernel/power/suspend.c20
-rw-r--r--kernel/power/wakeup_reason.c417
8 files changed, 374 insertions, 151 deletions
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index df97fb7cbfb5..f3b6afaf09c7 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1173,10 +1173,13 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a
}
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
+ if (!error) {
dev->power.is_noirq_suspended = true;
- else
+ } else {
async_error = error;
+ log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
+ dev_name(dev), callback, error);
+ }
Complete:
complete_all(&dev->power.completion);
@@ -1334,10 +1337,13 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as
}
error = dpm_run_callback(callback, dev, state, info);
- if (!error)
+ if (!error) {
dev->power.is_late_suspended = true;
- else
+ } else {
async_error = error;
+ log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
+ dev_name(dev), callback, error);
+ }
Complete:
TRACE_SUSPEND(error);
@@ -1495,7 +1501,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_callback_t callback = NULL;
const char *info = NULL;
int error = 0;
- char suspend_abort[MAX_SUSPEND_ABORT_LEN];
DECLARE_DPM_WATCHDOG_ON_STACK(wd);
TRACE_DEVICE(dev);
@@ -1518,9 +1523,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
pm_wakeup_event(dev, 0);
if (pm_wakeup_pending()) {
- pm_get_active_wakeup_sources(suspend_abort,
- MAX_SUSPEND_ABORT_LEN);
- log_suspend_abort_reason(suspend_abort);
dev->power.direct_complete = false;
async_error = -EBUSY;
goto Complete;
@@ -1599,7 +1601,6 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
dev->power.is_suspended = true;
if (parent) {
spin_lock_irq(&parent->power.lock);
-
dev->parent->power.direct_complete = false;
if (dev->power.wakeup_path
&& !dev->parent->power.ignore_children)
@@ -1608,6 +1609,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
spin_unlock_irq(&parent->power.lock);
}
dpm_clear_suppliers_direct_complete(dev);
+ } else {
+ log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
+ dev_name(dev), callback, error);
}
device_unlock(dev);
@@ -1817,6 +1821,8 @@ int dpm_prepare(pm_message_t state)
printk(KERN_INFO "PM: Device %s not prepared "
"for power transition: code %d\n",
dev_name(dev), error);
+ log_suspend_abort_reason("Device %s not prepared for power transition: code %d",
+ dev_name(dev), error);
dpm_save_failed_dev(dev_name(dev));
put_device(dev);
break;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 6f7e8f19c0a2..cf5ac5668ea6 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -15,7 +15,9 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/pm_wakeirq.h>
-#include <linux/types.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/wakeup_reason.h>
#include <trace/events/power.h>
#include "power.h"
@@ -925,6 +927,7 @@ bool pm_wakeup_pending(void)
{
unsigned long flags;
bool ret = false;
+ char suspend_abort[MAX_SUSPEND_ABORT_LEN];
spin_lock_irqsave(&events_lock, flags);
if (events_check_enabled) {
@@ -937,8 +940,10 @@ bool pm_wakeup_pending(void)
spin_unlock_irqrestore(&events_lock, flags);
if (ret) {
- pr_info("PM: Wakeup pending, aborting suspend\n");
- pm_print_active_wakeup_sources();
+ pm_get_active_wakeup_sources(suspend_abort,
+ MAX_SUSPEND_ABORT_LEN);
+ log_suspend_abort_reason(suspend_abort);
+ pr_info("PM: %s\n", suspend_abort);
}
return ret || atomic_read(&pm_abort_suspend) > 0;
@@ -966,6 +971,18 @@ void pm_wakeup_clear(bool reset)
void pm_system_irq_wakeup(unsigned int irq_number)
{
if (pm_wakeup_irq == 0) {
+ struct irq_desc *desc;
+ const char *name = "null";
+
+ desc = irq_to_desc(irq_number);
+ if (desc == NULL)
+ name = "stray irq";
+ else if (desc->action && desc->action->name)
+ name = desc->action->name;
+
+ log_irq_wakeup_reason(irq_number);
+ pr_warn("%s: %d triggered %s\n", __func__, irq_number, name);
+
pm_wakeup_irq = irq_number;
pm_system_wakeup();
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 7d34ab83bb7f..483f8ac27eea 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -28,6 +28,8 @@
#include <linux/of_irq.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/wakeup_reason.h>
+
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-common.h>
@@ -359,6 +361,8 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
err = handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
WARN_ONCE(true, "Unexpected interrupt received!\n");
+ log_abnormal_wakeup_reason(
+ "unexpected HW IRQ %u", irqnr);
if (static_key_true(&supports_deactivate)) {
if (irqnr < 8192)
gic_write_dir(irqnr);
diff --git a/include/linux/wakeup_reason.h b/include/linux/wakeup_reason.h
index d84d8c301546..54f5caaa5cde 100644
--- a/include/linux/wakeup_reason.h
+++ b/include/linux/wakeup_reason.h
@@ -20,13 +20,18 @@
#define MAX_SUSPEND_ABORT_LEN 256
-void log_wakeup_reason(int irq);
-int check_wakeup_reason(int irq);
-
#ifdef CONFIG_SUSPEND
+void log_irq_wakeup_reason(int irq);
+void log_threaded_irq_wakeup_reason(int irq, int parent_irq);
void log_suspend_abort_reason(const char *fmt, ...);
+void log_abnormal_wakeup_reason(const char *fmt, ...);
+void clear_wakeup_reasons(void);
#else
+static inline void log_irq_wakeup_reason(int irq) { }
+static inline void log_threaded_irq_wakeup_reason(int irq, int parent_irq) { }
static inline void log_suspend_abort_reason(const char *fmt, ...) { }
+static inline void log_abnormal_wakeup_reason(const char *fmt, ...) { }
+static inline void clear_wakeup_reasons(void) { }
#endif
#endif /* _LINUX_WAKEUP_REASON_H */
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 317fc759de76..9da08b53d06a 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -16,6 +16,7 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/irqdomain.h>
+#include <linux/wakeup_reason.h>
#include <trace/events/irq.h>
@@ -480,8 +481,22 @@ static bool irq_may_run(struct irq_desc *desc)
* If the interrupt is not in progress and is not an armed
* wakeup interrupt, proceed.
*/
- if (!irqd_has_set(&desc->irq_data, mask))
+ if (!irqd_has_set(&desc->irq_data, mask)) {
+#ifdef CONFIG_PM_SLEEP
+ if (unlikely(desc->no_suspend_depth &&
+ irqd_is_wakeup_set(&desc->irq_data))) {
+ unsigned int irq = irq_desc_get_irq(desc);
+ const char *name = "(unnamed)";
+
+ if (desc->action && desc->action->name)
+ name = desc->action->name;
+
+ log_abnormal_wakeup_reason("misconfigured IRQ %u %s",
+ irq, name);
+ }
+#endif
return true;
+ }
/*
* If the interrupt is an armed wakeup source, mark it pending
diff --git a/kernel/power/process.c b/kernel/power/process.c
index c366e3d34a07..d76e61606f51 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -22,7 +22,6 @@
#include <linux/kmod.h>
#include <trace/events/power.h>
#include <linux/cpuset.h>
-#include <linux/wakeup_reason.h>
/*
* Timeout for stopping processes
@@ -39,9 +38,6 @@ static int try_to_freeze_tasks(bool user_only)
unsigned int elapsed_msecs;
bool wakeup = false;
int sleep_usecs = USEC_PER_MSEC;
-#ifdef CONFIG_PM_SLEEP
- char suspend_abort[MAX_SUSPEND_ABORT_LEN];
-#endif
start = ktime_get_boottime();
@@ -71,11 +67,6 @@ static int try_to_freeze_tasks(bool user_only)
break;
if (pm_wakeup_pending()) {
-#ifdef CONFIG_PM_SLEEP
- pm_get_active_wakeup_sources(suspend_abort,
- MAX_SUSPEND_ABORT_LEN);
- log_suspend_abort_reason(suspend_abort);
-#endif
wakeup = true;
break;
}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 053b3014a220..dad1b8127560 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -148,6 +148,7 @@ static void s2idle_loop(void)
break;
pm_wakeup_clear(false);
+ clear_wakeup_reasons();
}
pm_pr_dbg("resume from suspend-to-idle\n");
@@ -361,6 +362,7 @@ static int suspend_prepare(suspend_state_t state)
if (!error)
return 0;
+ log_suspend_abort_reason("One or more tasks refusing to freeze");
suspend_stats.failed_freeze++;
dpm_save_failed_step(SUSPEND_FREEZE);
Finish:
@@ -390,7 +392,6 @@ void __weak arch_suspend_enable_irqs(void)
*/
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
- char suspend_abort[MAX_SUSPEND_ABORT_LEN];
int error, last_dev;
error = platform_suspend_prepare(state);
@@ -402,8 +403,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
last_dev %= REC_FAILED_NUM;
pr_err("late suspend of devices failed\n");
- log_suspend_abort_reason("%s device failed to power down",
- suspend_stats.failed_devs[last_dev]);
+ log_suspend_abort_reason("late suspend of %s device failed",
+ suspend_stats.failed_devs[last_dev]);
goto Platform_finish;
}
error = platform_suspend_prepare_late(state);
@@ -421,7 +422,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
last_dev %= REC_FAILED_NUM;
pr_err("noirq suspend of devices failed\n");
log_suspend_abort_reason("noirq suspend of %s device failed",
- suspend_stats.failed_devs[last_dev]);
+ suspend_stats.failed_devs[last_dev]);
goto Platform_early_resume;
}
error = platform_suspend_prepare_noirq(state);
@@ -450,9 +451,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
trace_suspend_resume(TPS("machine_suspend"),
state, false);
} else if (*wakeup) {
- pm_get_active_wakeup_sources(suspend_abort,
- MAX_SUSPEND_ABORT_LEN);
- log_suspend_abort_reason(suspend_abort);
error = -EBUSY;
}
syscore_resume();
@@ -485,7 +483,7 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
*/
int suspend_devices_and_enter(suspend_state_t state)
{
- int error, last_dev;
+ int error;
bool wakeup = false;
if (!sleep_state_supported(state))
@@ -501,11 +499,9 @@ int suspend_devices_and_enter(suspend_state_t state)
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
- last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
- last_dev %= REC_FAILED_NUM;
pr_err("Some devices failed to suspend, or early wake event detected\n");
- log_suspend_abort_reason("%s device failed to suspend, or early wake event detected",
- suspend_stats.failed_devs[last_dev]);
+ log_suspend_abort_reason(
+ "Some devices failed to suspend, or early wake event detected");
goto Recover_platform;
}
suspend_test_finish("suspend devices");
diff --git a/kernel/power/wakeup_reason.c b/kernel/power/wakeup_reason.c
index 252611fad2fe..503a71fc49fc 100644
--- a/kernel/power/wakeup_reason.c
+++ b/kernel/power/wakeup_reason.c
@@ -4,7 +4,7 @@
* Logs the reasons which caused the kernel to resume from
* the suspend mode.
*
- * Copyright (C) 2014 Google, Inc.
+ * Copyright (C) 2020 Google, Inc.
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
@@ -26,70 +26,312 @@
#include <linux/spinlock.h>
#include <linux/notifier.h>
#include <linux/suspend.h>
+#include <linux/slab.h>
+/*
+ * struct wakeup_irq_node - stores data and relationships for IRQs logged as
+ * either base or nested wakeup reasons during suspend/resume flow.
+ * @siblings - for membership on leaf or parent IRQ lists
+ * @irq - the IRQ number
+ * @irq_name - the name associated with the IRQ, or a default if none
+ */
+struct wakeup_irq_node {
+ struct list_head siblings;
+ int irq;
+ const char *irq_name;
+};
+
+static DEFINE_SPINLOCK(wakeup_reason_lock);
+
+static LIST_HEAD(leaf_irqs); /* kept in ascending IRQ sorted order */
+static LIST_HEAD(parent_irqs); /* unordered */
-#define MAX_WAKEUP_REASON_IRQS 32
-static int irq_list[MAX_WAKEUP_REASON_IRQS];
-static int irqcount;
+static struct kmem_cache *wakeup_irq_nodes_cache;
+
+static const char *default_irq_name = "(unnamed)";
+
+static struct kobject *kobj;
+
+static bool capture_reasons;
static bool suspend_abort;
-static char abort_reason[MAX_SUSPEND_ABORT_LEN];
-static struct kobject *wakeup_reason;
-static DEFINE_SPINLOCK(resume_reason_lock);
+static bool abnormal_wake;
+static char non_irq_wake_reason[MAX_SUSPEND_ABORT_LEN];
static ktime_t last_monotime; /* monotonic time before last suspend */
static ktime_t curr_monotime; /* monotonic time after last suspend */
static ktime_t last_stime; /* monotonic boottime offset before last suspend */
static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
-static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr,
- char *buf)
+static void init_node(struct wakeup_irq_node *p, int irq)
{
- int irq_no, buf_offset = 0;
struct irq_desc *desc;
- spin_lock(&resume_reason_lock);
- if (suspend_abort) {
- buf_offset = sprintf(buf, "Abort: %s", abort_reason);
- } else {
- for (irq_no = 0; irq_no < irqcount; irq_no++) {
- desc = irq_to_desc(irq_list[irq_no]);
- if (desc && desc->action && desc->action->name)
- buf_offset += sprintf(buf + buf_offset, "%d %s\n",
- irq_list[irq_no], desc->action->name);
+
+ INIT_LIST_HEAD(&p->siblings);
+
+ p->irq = irq;
+ desc = irq_to_desc(irq);
+ if (desc && desc->action && desc->action->name)
+ p->irq_name = desc->action->name;
+ else
+ p->irq_name = default_irq_name;
+}
+
+static struct wakeup_irq_node *create_node(int irq)
+{
+ struct wakeup_irq_node *result;
+
+ result = kmem_cache_alloc(wakeup_irq_nodes_cache, GFP_ATOMIC);
+ if (unlikely(!result))
+ pr_warn("Failed to log wakeup IRQ %d\n", irq);
+ else
+ init_node(result, irq);
+
+ return result;
+}
+
+static void delete_list(struct list_head *head)
+{
+ struct wakeup_irq_node *n;
+
+ while (!list_empty(head)) {
+ n = list_first_entry(head, struct wakeup_irq_node, siblings);
+ list_del(&n->siblings);
+ kmem_cache_free(wakeup_irq_nodes_cache, n);
+ }
+}
+
+static bool add_sibling_node_sorted(struct list_head *head, int irq)
+{
+ struct wakeup_irq_node *n;
+ struct list_head *predecessor = head;
+
+ if (unlikely(WARN_ON(!head)))
+ return NULL;
+
+ if (!list_empty(head))
+ list_for_each_entry(n, head, siblings) {
+ if (n->irq < irq)
+ predecessor = &n->siblings;
+ else if (n->irq == irq)
+ return true;
else
- buf_offset += sprintf(buf + buf_offset, "%d\n",
- irq_list[irq_no]);
+ break;
+ }
+
+ n = create_node(irq);
+ if (n) {
+ list_add(&n->siblings, predecessor);
+ return true;
+ }
+
+ return false;
+}
+
+static struct wakeup_irq_node *find_node_in_list(struct list_head *head,
+ int irq)
+{
+ struct wakeup_irq_node *n;
+
+ if (unlikely(WARN_ON(!head)))
+ return NULL;
+
+ list_for_each_entry(n, head, siblings)
+ if (n->irq == irq)
+ return n;
+
+ return NULL;
+}
+
+void log_irq_wakeup_reason(int irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+ if (!capture_reasons) {
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+ return;
+ }
+
+ if (find_node_in_list(&parent_irqs, irq) == NULL)
+ add_sibling_node_sorted(&leaf_irqs, irq);
+
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+void log_threaded_irq_wakeup_reason(int irq, int parent_irq)
+{
+ struct wakeup_irq_node *parent;
+ unsigned long flags;
+
+ /*
+ * Intentionally unsynchronized. Calls that come in after we have
+ * resumed should have a fast exit path since there's no work to be
+ * done, any any coherence issue that could cause a wrong value here is
+ * both highly improbable - given the set/clear timing - and very low
+ * impact (parent IRQ gets logged instead of the specific child).
+ */
+ if (!capture_reasons)
+ return;
+
+ spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+ if (!capture_reasons || (find_node_in_list(&leaf_irqs, irq) != NULL)) {
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+ return;
+ }
+
+ parent = find_node_in_list(&parent_irqs, parent_irq);
+ if (parent != NULL)
+ add_sibling_node_sorted(&leaf_irqs, irq);
+ else {
+ parent = find_node_in_list(&leaf_irqs, parent_irq);
+ if (parent != NULL) {
+ list_del_init(&parent->siblings);
+ list_add_tail(&parent->siblings, &parent_irqs);
+ add_sibling_node_sorted(&leaf_irqs, irq);
}
}
- spin_unlock(&resume_reason_lock);
+
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+void __log_abort_or_abnormal_wake(bool abort, const char *fmt, va_list args)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+ /* Suspend abort or abnormal wake reason has already been logged. */
+ if (suspend_abort || abnormal_wake) {
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+ return;
+ }
+
+ suspend_abort = abort;
+ abnormal_wake = !abort;
+ vsnprintf(non_irq_wake_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
+
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+void log_suspend_abort_reason(const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ __log_abort_or_abnormal_wake(true, fmt, args);
+ va_end(args);
+}
+
+void log_abnormal_wakeup_reason(const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ __log_abort_or_abnormal_wake(false, fmt, args);
+ va_end(args);
+}
+
+void clear_wakeup_reasons(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+ delete_list(&leaf_irqs);
+ delete_list(&parent_irqs);
+ suspend_abort = false;
+ abnormal_wake = false;
+ capture_reasons = true;
+
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+static void print_wakeup_sources(void)
+{
+ struct wakeup_irq_node *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+ capture_reasons = false;
+
+ if (suspend_abort) {
+ pr_info("Abort: %s\n", non_irq_wake_reason);
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+ return;
+ }
+
+ if (!list_empty(&leaf_irqs))
+ list_for_each_entry(n, &leaf_irqs, siblings)
+ pr_info("Resume caused by IRQ %d, %s\n", n->irq,
+ n->irq_name);
+ else if (abnormal_wake)
+ pr_info("Resume caused by %s\n", non_irq_wake_reason);
+ else
+ pr_info("Resume cause unknown\n");
+
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+static ssize_t last_resume_reason_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ ssize_t buf_offset = 0;
+ struct wakeup_irq_node *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+ if (suspend_abort) {
+ buf_offset = scnprintf(buf, PAGE_SIZE, "Abort: %s",
+ non_irq_wake_reason);
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+ return buf_offset;
+ }
+
+ if (!list_empty(&leaf_irqs))
+ list_for_each_entry(n, &leaf_irqs, siblings)
+ buf_offset += scnprintf(buf + buf_offset,
+ PAGE_SIZE - buf_offset,
+ "%d %s\n", n->irq, n->irq_name);
+ else if (abnormal_wake)
+ buf_offset = scnprintf(buf, PAGE_SIZE, "-1 %s",
+ non_irq_wake_reason);
+
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+
return buf_offset;
}
static ssize_t last_suspend_time_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- struct timespec sleep_time;
- struct timespec total_time;
- struct timespec suspend_resume_time;
+ struct timespec64 sleep_time;
+ struct timespec64 total_time;
+ struct timespec64 suspend_resume_time;
/*
* total_time is calculated from monotonic bootoffsets because
* unlike CLOCK_MONOTONIC it include the time spent in suspend state.
*/
- total_time = ktime_to_timespec(ktime_sub(curr_stime, last_stime));
+ total_time = ktime_to_timespec64(ktime_sub(curr_stime, last_stime));
/*
* suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
* time interval before entering suspend and post suspend.
*/
- suspend_resume_time = ktime_to_timespec(ktime_sub(curr_monotime, last_monotime));
+ suspend_resume_time =
+ ktime_to_timespec64(ktime_sub(curr_monotime, last_monotime));
/* sleep_time = total_time - suspend_resume_time */
- sleep_time = timespec_sub(total_time, suspend_resume_time);
+ sleep_time = timespec64_sub(total_time, suspend_resume_time);
/* Export suspend_resume_time and sleep_time in pair here. */
- return sprintf(buf, "%lu.%09lu %lu.%09lu\n",
- suspend_resume_time.tv_sec, suspend_resume_time.tv_nsec,
- sleep_time.tv_sec, sleep_time.tv_nsec);
+ return sprintf(buf, "%llu.%09lu %llu.%09lu\n",
+ suspend_resume_time.tv_sec, suspend_resume_time.tv_nsec,
+ sleep_time.tv_sec, sleep_time.tv_nsec);
}
static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
@@ -104,86 +346,24 @@ static struct attribute_group attr_group = {
.attrs = attrs,
};
-/*
- * logs all the wake up reasons to the kernel
- * stores the irqs to expose them to the userspace via sysfs
- */
-void log_wakeup_reason(int irq)
-{
- struct irq_desc *desc;
- desc = irq_to_desc(irq);
- if (desc && desc->action && desc->action->name)
- printk(KERN_INFO "Resume caused by IRQ %d, %s\n", irq,
- desc->action->name);
- else
- printk(KERN_INFO "Resume caused by IRQ %d\n", irq);
-
- spin_lock(&resume_reason_lock);
- if (irqcount == MAX_WAKEUP_REASON_IRQS) {
- spin_unlock(&resume_reason_lock);
- printk(KERN_WARNING "Resume caused by more than %d IRQs\n",
- MAX_WAKEUP_REASON_IRQS);
- return;
- }
-
- irq_list[irqcount++] = irq;
- spin_unlock(&resume_reason_lock);
-}
-
-int check_wakeup_reason(int irq)
-{
- int irq_no;
- int ret = false;
-
- spin_lock(&resume_reason_lock);
- for (irq_no = 0; irq_no < irqcount; irq_no++)
- if (irq_list[irq_no] == irq) {
- ret = true;
- break;
- }
- spin_unlock(&resume_reason_lock);
- return ret;
-}
-
-void log_suspend_abort_reason(const char *fmt, ...)
-{
- va_list args;
-
- spin_lock(&resume_reason_lock);
-
- //Suspend abort reason has already been logged.
- if (suspend_abort) {
- spin_unlock(&resume_reason_lock);
- return;
- }
-
- suspend_abort = true;
- va_start(args, fmt);
- vsnprintf(abort_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
- va_end(args);
- spin_unlock(&resume_reason_lock);
-}
-
/* Detects a suspend and clears all the previous wake up reasons*/
static int wakeup_reason_pm_event(struct notifier_block *notifier,
unsigned long pm_event, void *unused)
{
switch (pm_event) {
case PM_SUSPEND_PREPARE:
- spin_lock(&resume_reason_lock);
- irqcount = 0;
- suspend_abort = false;
- spin_unlock(&resume_reason_lock);
/* monotonic time since boot */
last_monotime = ktime_get();
/* monotonic time since boot including the time spent in suspend */
last_stime = ktime_get_boottime();
+ clear_wakeup_reasons();
break;
case PM_POST_SUSPEND:
/* monotonic time since boot */
curr_monotime = ktime_get();
/* monotonic time since boot including the time spent in suspend */
curr_stime = ktime_get_boottime();
+ print_wakeup_sources();
break;
default:
break;
@@ -195,31 +375,40 @@ static struct notifier_block wakeup_reason_pm_notifier_block = {
.notifier_call = wakeup_reason_pm_event,
};
-/* Initializes the sysfs parameter
- * registers the pm_event notifier
- */
int __init wakeup_reason_init(void)
{
- int retval;
-
- retval = register_pm_notifier(&wakeup_reason_pm_notifier_block);
- if (retval)
- printk(KERN_WARNING "[%s] failed to register PM notifier %d\n",
- __func__, retval);
+ if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) {
+ pr_warn("[%s] failed to register PM notifier\n", __func__);
+ goto fail;
+ }
- wakeup_reason = kobject_create_and_add("wakeup_reasons", kernel_kobj);
- if (!wakeup_reason) {
- printk(KERN_WARNING "[%s] failed to create a sysfs kobject\n",
- __func__);
- return 1;
+ kobj = kobject_create_and_add("wakeup_reasons", kernel_kobj);
+ if (!kobj) {
+ pr_warn("[%s] failed to create a sysfs kobject\n", __func__);
+ goto fail_unregister_pm_notifier;
}
- retval = sysfs_create_group(wakeup_reason, &attr_group);
- if (retval) {
- kobject_put(wakeup_reason);
- printk(KERN_WARNING "[%s] failed to create a sysfs group %d\n",
- __func__, retval);
+
+ if (sysfs_create_group(kobj, &attr_group)) {
+ pr_warn("[%s] failed to create a sysfs group\n", __func__);
+ goto fail_kobject_put;
}
+
+ wakeup_irq_nodes_cache =
+ kmem_cache_create("wakeup_irq_node_cache",
+ sizeof(struct wakeup_irq_node), 0, 0, NULL);
+ if (!wakeup_irq_nodes_cache)
+ goto fail_remove_group;
+
return 0;
+
+fail_remove_group:
+ sysfs_remove_group(kobj, &attr_group);
+fail_kobject_put:
+ kobject_put(kobj);
+fail_unregister_pm_notifier:
+ unregister_pm_notifier(&wakeup_reason_pm_notifier_block);
+fail:
+ return 1;
}
late_initcall(wakeup_reason_init);