2 * kernel/power/wakeup_reason.c
4 * Logs the reasons which caused the kernel to resume from
7 * Copyright (C) 2014 Google, Inc.
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/wakeup_reason.h>
19 #include <linux/kernel.h>
20 #include <linux/irq.h>
21 #include <linux/interrupt.h>
23 #include <linux/kobject.h>
24 #include <linux/sysfs.h>
25 #include <linux/init.h>
26 #include <linux/spinlock.h>
27 #include <linux/notifier.h>
28 #include <linux/suspend.h>
29 #include <linux/slab.h>
31 #define MAX_WAKEUP_REASON_IRQS 32
32 static bool suspend_abort;
33 static char abort_reason[MAX_SUSPEND_ABORT_LEN];
35 static struct wakeup_irq_node *base_irq_nodes;
36 static struct wakeup_irq_node *cur_irq_tree;
37 static int cur_irq_tree_depth;
38 static LIST_HEAD(wakeup_irqs);
40 static struct kmem_cache *wakeup_irq_nodes_cache;
41 static struct kobject *wakeup_reason;
42 static spinlock_t resume_reason_lock;
43 bool log_wakeups __read_mostly;
44 struct completion wakeups_completion;
46 static ktime_t last_monotime; /* monotonic time before last suspend */
47 static ktime_t curr_monotime; /* monotonic time after last suspend */
48 static ktime_t last_stime; /* monotonic boottime offset before last suspend */
49 static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
51 static void init_wakeup_irq_node(struct wakeup_irq_node *p, int irq)
54 p->desc = irq_to_desc(irq);
58 INIT_LIST_HEAD(&p->siblings);
59 INIT_LIST_HEAD(&p->next);
62 static struct wakeup_irq_node* alloc_irq_node(int irq)
64 struct wakeup_irq_node *n;
66 n = kmem_cache_alloc(wakeup_irq_nodes_cache, GFP_ATOMIC);
68 pr_warning("Failed to log chained wakeup IRQ %d\n",
73 init_wakeup_irq_node(n, irq);
77 static struct wakeup_irq_node *
78 search_siblings(struct wakeup_irq_node *root, int irq)
81 struct wakeup_irq_node *n = NULL;
87 list_for_each_entry(n, &root->siblings, siblings) {
94 return found ? n : NULL;
97 static struct wakeup_irq_node *
98 add_to_siblings(struct wakeup_irq_node *root, int irq)
100 struct wakeup_irq_node *n;
102 n = search_siblings(root, irq);
106 n = alloc_irq_node(irq);
109 list_add(&n->siblings, &root->siblings);
113 #ifdef CONFIG_DEDUCE_WAKEUP_REASONS
114 static struct wakeup_irq_node* add_child(struct wakeup_irq_node *root, int irq)
117 root->child = alloc_irq_node(irq);
120 root->child->parent = root;
124 return add_to_siblings(root->child, irq);
127 static struct wakeup_irq_node *find_first_sibling(struct wakeup_irq_node *node)
129 struct wakeup_irq_node *n;
132 list_for_each_entry(n, &node->siblings, siblings) {
139 static struct wakeup_irq_node *
140 get_base_node(struct wakeup_irq_node *node, unsigned depth)
146 node = find_first_sibling(node);
154 #endif /* CONFIG_DEDUCE_WAKEUP_REASONS */
156 static const struct list_head* get_wakeup_reasons_nosync(void);
158 static void print_wakeup_sources(void)
160 struct wakeup_irq_node *n;
161 const struct list_head *wakeups;
164 pr_info("Abort: %s\n", abort_reason);
168 wakeups = get_wakeup_reasons_nosync();
169 list_for_each_entry(n, wakeups, next) {
170 if (n->desc && n->desc->action && n->desc->action->name)
171 pr_info("Resume caused by IRQ %d, %s\n", n->irq,
172 n->desc->action->name);
174 pr_info("Resume caused by IRQ %d\n", n->irq);
178 static bool walk_irq_node_tree(struct wakeup_irq_node *root,
179 bool (*visit)(struct wakeup_irq_node *, void *),
182 struct wakeup_irq_node *n, *t;
187 list_for_each_entry_safe(n, t, &root->siblings, siblings) {
188 if (!walk_irq_node_tree(n->child, visit, cookie))
190 if (!visit(n, cookie))
194 if (!walk_irq_node_tree(root->child, visit, cookie))
196 return visit(root, cookie);
199 #ifdef CONFIG_DEDUCE_WAKEUP_REASONS
200 static bool is_node_handled(struct wakeup_irq_node *n, void *_p)
205 static bool base_irq_nodes_done(void)
207 return walk_irq_node_tree(base_irq_nodes, is_node_handled, NULL);
216 static bool print_leaf_node(struct wakeup_irq_node *n, void *_p)
218 struct buf_cookie *b = _p;
220 if (n->desc && n->desc->action && n->desc->action->name)
222 snprintf(b->buf + b->buf_offset,
223 PAGE_SIZE - b->buf_offset,
225 n->irq, n->desc->action->name);
228 snprintf(b->buf + b->buf_offset,
229 PAGE_SIZE - b->buf_offset,
236 static ssize_t last_resume_reason_show(struct kobject *kobj,
237 struct kobj_attribute *attr,
242 struct buf_cookie b = {
247 spin_lock_irqsave(&resume_reason_lock, flags);
249 b.buf_offset = snprintf(buf, PAGE_SIZE, "Abort: %s", abort_reason);
251 walk_irq_node_tree(base_irq_nodes, print_leaf_node, &b);
252 spin_unlock_irqrestore(&resume_reason_lock, flags);
257 static ssize_t last_suspend_time_show(struct kobject *kobj,
258 struct kobj_attribute *attr, char *buf)
260 struct timespec sleep_time;
261 struct timespec total_time;
262 struct timespec suspend_resume_time;
265 * total_time is calculated from monotonic bootoffsets because
266 * unlike CLOCK_MONOTONIC it include the time spent in suspend state.
268 total_time = ktime_to_timespec(ktime_sub(curr_stime, last_stime));
271 * suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
272 * time interval before entering suspend and post suspend.
274 suspend_resume_time = ktime_to_timespec(ktime_sub(curr_monotime, last_monotime));
276 /* sleep_time = total_time - suspend_resume_time */
277 sleep_time = timespec_sub(total_time, suspend_resume_time);
279 /* Export suspend_resume_time and sleep_time in pair here. */
280 return sprintf(buf, "%lu.%09lu %lu.%09lu\n",
281 suspend_resume_time.tv_sec, suspend_resume_time.tv_nsec,
282 sleep_time.tv_sec, sleep_time.tv_nsec);
285 static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
286 static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
288 static struct attribute *attrs[] = {
293 static struct attribute_group attr_group = {
297 static inline void stop_logging_wakeup_reasons(void)
299 ACCESS_ONCE(log_wakeups) = false;
304 * stores the immediate wakeup irqs; these often aren't the ones seen by
305 * the drivers that registered them, due to chained interrupt controllers,
306 * and multiple-interrupt dispatch.
308 void log_base_wakeup_reason(int irq)
310 /* No locking is needed, since this function is called within
311 * syscore_resume, with both nonboot CPUs and interrupts disabled.
313 base_irq_nodes = add_to_siblings(base_irq_nodes, irq);
314 BUG_ON(!base_irq_nodes);
315 #ifndef CONFIG_DEDUCE_WAKEUP_REASONS
316 base_irq_nodes->handled = true;
320 #ifdef CONFIG_DEDUCE_WAKEUP_REASONS
322 /* This function is called by generic_handle_irq, which may call itself
323 * recursively. This happens with interrupts disabled. Using
324 * log_possible_wakeup_reason, we build a tree of interrupts, tracing the call
325 * stack of generic_handle_irq, for each wakeup source containing the
326 * interrupts actually handled.
328 * Most of these "trees" would either have a single node (in the event that the
329 * wakeup source is the final interrupt), or consist of a list of two
330 * interrupts, with the wakeup source at the root, and the final dispatched
331 * interrupt at the leaf.
333 * When *all* wakeup sources have been thusly spoken for, this function will
334 * clear the log_wakeups flag, and print the wakeup reasons.
340 static struct wakeup_irq_node *
341 log_possible_wakeup_reason_start(int irq, struct irq_desc *desc, unsigned depth)
343 BUG_ON(!irqs_disabled());
344 BUG_ON((signed)depth < 0);
346 /* This function can race with a call to stop_logging_wakeup_reasons()
347 * from a thread context. If this happens, just exit silently, as we are no
348 * longer interested in logging interrupts.
350 if (!logging_wakeup_reasons())
353 /* If suspend was aborted, the base IRQ nodes are missing, and we stop
354 * logging interrupts immediately.
356 if (!base_irq_nodes) {
357 stop_logging_wakeup_reasons();
361 /* We assume wakeup interrupts are handlerd only by the first core. */
362 /* TODO: relax this by having percpu versions of the irq tree */
363 if (smp_processor_id() != 0) {
368 cur_irq_tree_depth = 0;
369 cur_irq_tree = search_siblings(base_irq_nodes, irq);
371 else if (cur_irq_tree) {
372 if (depth > cur_irq_tree_depth) {
373 BUG_ON(depth - cur_irq_tree_depth > 1);
374 cur_irq_tree = add_child(cur_irq_tree, irq);
376 cur_irq_tree_depth++;
379 cur_irq_tree = get_base_node(cur_irq_tree,
380 cur_irq_tree_depth - depth);
381 cur_irq_tree_depth = depth;
382 cur_irq_tree = add_to_siblings(cur_irq_tree, irq);
389 static void log_possible_wakeup_reason_complete(struct wakeup_irq_node *n,
395 n->handled = handled;
397 if (base_irq_nodes_done()) {
398 stop_logging_wakeup_reasons();
399 complete(&wakeups_completion);
400 print_wakeup_sources();
405 bool log_possible_wakeup_reason(int irq,
406 struct irq_desc *desc,
407 bool (*handler)(struct irq_desc *))
409 static DEFINE_PER_CPU(unsigned int, depth);
411 struct wakeup_irq_node *n;
415 d = get_cpu_var(depth)++;
418 n = log_possible_wakeup_reason_start(irq, desc, d);
420 handled = handler(desc);
422 d = --get_cpu_var(depth);
425 if (!handled && desc && desc->action)
426 pr_debug("%s: irq %d action %pF not handled\n", __func__,
427 irq, desc->action->handler);
429 log_possible_wakeup_reason_complete(n, d, handled);
434 #endif /* CONFIG_DEDUCE_WAKEUP_REASONS */
436 void log_suspend_abort_reason(const char *fmt, ...)
441 spin_lock_irqsave(&resume_reason_lock, flags);
443 //Suspend abort reason has already been logged.
445 spin_unlock_irqrestore(&resume_reason_lock, flags);
449 suspend_abort = true;
451 vsnprintf(abort_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
454 spin_unlock_irqrestore(&resume_reason_lock, flags);
457 static bool match_node(struct wakeup_irq_node *n, void *_p)
459 int irq = *((int *)_p);
460 return n->irq != irq;
463 int check_wakeup_reason(int irq)
467 spin_lock_irqsave(&resume_reason_lock, flags);
468 found = !walk_irq_node_tree(base_irq_nodes, match_node, &irq);
469 spin_unlock_irqrestore(&resume_reason_lock, flags);
473 static bool build_leaf_nodes(struct wakeup_irq_node *n, void *_p)
475 struct list_head *wakeups = _p;
477 list_add(&n->next, wakeups);
481 static const struct list_head* get_wakeup_reasons_nosync(void)
483 BUG_ON(logging_wakeup_reasons());
484 INIT_LIST_HEAD(&wakeup_irqs);
485 walk_irq_node_tree(base_irq_nodes, build_leaf_nodes, &wakeup_irqs);
489 static bool build_unfinished_nodes(struct wakeup_irq_node *n, void *_p)
491 struct list_head *unfinished = _p;
493 pr_warning("%s: wakeup irq %d was not handled\n",
495 list_add(&n->next, unfinished);
500 const struct list_head* get_wakeup_reasons(unsigned long timeout,
501 struct list_head *unfinished)
503 INIT_LIST_HEAD(unfinished);
505 if (logging_wakeup_reasons()) {
506 unsigned long signalled = 0;
508 signalled = wait_for_completion_timeout(&wakeups_completion, timeout);
509 if (WARN_ON(!signalled)) {
510 stop_logging_wakeup_reasons();
511 walk_irq_node_tree(base_irq_nodes, build_unfinished_nodes, unfinished);
514 pr_info("%s: waited for %u ms\n",
516 jiffies_to_msecs(timeout - signalled));
519 return get_wakeup_reasons_nosync();
522 static bool delete_node(struct wakeup_irq_node *n, void *unused)
524 list_del(&n->siblings);
525 kmem_cache_free(wakeup_irq_nodes_cache, n);
529 void clear_wakeup_reasons(void)
532 spin_lock_irqsave(&resume_reason_lock, flags);
534 BUG_ON(logging_wakeup_reasons());
535 walk_irq_node_tree(base_irq_nodes, delete_node, NULL);
536 base_irq_nodes = NULL;
538 cur_irq_tree_depth = 0;
539 INIT_LIST_HEAD(&wakeup_irqs);
540 suspend_abort = false;
542 spin_unlock_irqrestore(&resume_reason_lock, flags);
545 /* Detects a suspend and clears all the previous wake up reasons*/
546 static int wakeup_reason_pm_event(struct notifier_block *notifier,
547 unsigned long pm_event, void *unused)
551 case PM_SUSPEND_PREPARE:
552 spin_lock_irqsave(&resume_reason_lock, flags);
553 suspend_abort = false;
554 spin_unlock_irqrestore(&resume_reason_lock, flags);
555 /* monotonic time since boot */
556 last_monotime = ktime_get();
557 /* monotonic time since boot including the time spent in suspend */
558 last_stime = ktime_get_boottime();
559 clear_wakeup_reasons();
561 case PM_POST_SUSPEND:
562 /* monotonic time since boot */
563 curr_monotime = ktime_get();
564 /* monotonic time since boot including the time spent in suspend */
565 curr_stime = ktime_get_boottime();
566 #ifdef CONFIG_DEDUCE_WAKEUP_REASONS
567 /* log_wakeups should have been cleared by now. */
568 if (WARN_ON(logging_wakeup_reasons())) {
569 stop_logging_wakeup_reasons();
570 print_wakeup_sources();
573 print_wakeup_sources();
582 static struct notifier_block wakeup_reason_pm_notifier_block = {
583 .notifier_call = wakeup_reason_pm_event,
586 int __init wakeup_reason_init(void)
588 spin_lock_init(&resume_reason_lock);
590 if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) {
591 pr_warning("[%s] failed to register PM notifier\n",
596 wakeup_reason = kobject_create_and_add("wakeup_reasons", kernel_kobj);
597 if (!wakeup_reason) {
598 pr_warning("[%s] failed to create a sysfs kobject\n",
600 goto fail_unregister_pm_notifier;
603 if (sysfs_create_group(wakeup_reason, &attr_group)) {
604 pr_warning("[%s] failed to create a sysfs group\n",
606 goto fail_kobject_put;
609 wakeup_irq_nodes_cache =
610 kmem_cache_create("wakeup_irq_node_cache",
611 sizeof(struct wakeup_irq_node), 0,
613 if (!wakeup_irq_nodes_cache)
614 goto fail_remove_group;
619 sysfs_remove_group(wakeup_reason, &attr_group);
621 kobject_put(wakeup_reason);
622 fail_unregister_pm_notifier:
623 unregister_pm_notifier(&wakeup_reason_pm_notifier_block);
628 late_initcall(wakeup_reason_init);