OSDN Git Service

Merge tag 'v4.4.196' of https://shanghai.source.codeaurora.org/quic/la/kernel/msm...
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / kernel / power / wakeup_reason.c
1 /*
2  * kernel/power/wakeup_reason.c
3  *
4  * Logs the reasons which caused the kernel to resume from
5  * the suspend mode.
6  *
7  * Copyright (C) 2014 Google, Inc.
8  * This software is licensed under the terms of the GNU General Public
9  * License version 2, as published by the Free Software Foundation, and
10  * may be copied, distributed, and modified under those terms.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  */
17
18 #include <linux/wakeup_reason.h>
19 #include <linux/kernel.h>
20 #include <linux/irq.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/kobject.h>
24 #include <linux/sysfs.h>
25 #include <linux/init.h>
26 #include <linux/spinlock.h>
27 #include <linux/notifier.h>
28 #include <linux/suspend.h>
29 #include <linux/slab.h>
30
31 #define MAX_WAKEUP_REASON_IRQS 32
32 static bool suspend_abort;
33 static char abort_reason[MAX_SUSPEND_ABORT_LEN];
34
35 static struct wakeup_irq_node *base_irq_nodes;
36 static struct wakeup_irq_node *cur_irq_tree;
37 static int cur_irq_tree_depth;
38 static LIST_HEAD(wakeup_irqs);
39
40 static struct kmem_cache *wakeup_irq_nodes_cache;
41 static struct kobject *wakeup_reason;
42 static spinlock_t resume_reason_lock;
43 bool log_wakeups __read_mostly;
44 struct completion wakeups_completion;
45
46 static ktime_t last_monotime; /* monotonic time before last suspend */
47 static ktime_t curr_monotime; /* monotonic time after last suspend */
48 static ktime_t last_stime; /* monotonic boottime offset before last suspend */
49 static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
50
51 static void init_wakeup_irq_node(struct wakeup_irq_node *p, int irq)
52 {
53         p->irq = irq;
54         p->desc = irq_to_desc(irq);
55         p->child = NULL;
56         p->parent = NULL;
57         p->handled = false;
58         INIT_LIST_HEAD(&p->siblings);
59         INIT_LIST_HEAD(&p->next);
60 }
61
62 static struct wakeup_irq_node* alloc_irq_node(int irq)
63 {
64         struct wakeup_irq_node *n;
65
66         n = kmem_cache_alloc(wakeup_irq_nodes_cache, GFP_ATOMIC);
67         if (!n) {
68                 pr_warning("Failed to log chained wakeup IRQ %d\n",
69                         irq);
70                 return NULL;
71         }
72
73         init_wakeup_irq_node(n, irq);
74         return n;
75 }
76
77 static struct wakeup_irq_node *
78 search_siblings(struct wakeup_irq_node *root, int irq)
79 {
80         bool found = false;
81         struct wakeup_irq_node *n = NULL;
82         BUG_ON(!root);
83
84         if (root->irq == irq)
85                 return root;
86
87         list_for_each_entry(n, &root->siblings, siblings) {
88                 if (n->irq == irq) {
89                         found = true;
90                         break;
91                 }
92         }
93
94         return found ? n : NULL;
95 }
96
97 static struct wakeup_irq_node *
98 add_to_siblings(struct wakeup_irq_node *root, int irq)
99 {
100         struct wakeup_irq_node *n;
101         if (root) {
102                 n = search_siblings(root, irq);
103                 if (n)
104                         return n;
105         }
106         n = alloc_irq_node(irq);
107
108         if (n && root)
109                 list_add(&n->siblings, &root->siblings);
110         return n;
111 }
112
113 #ifdef CONFIG_DEDUCE_WAKEUP_REASONS
114 static struct wakeup_irq_node* add_child(struct wakeup_irq_node *root, int irq)
115 {
116         if (!root->child) {
117                 root->child = alloc_irq_node(irq);
118                 if (!root->child)
119                         return NULL;
120                 root->child->parent = root;
121                 return root->child;
122         }
123
124         return add_to_siblings(root->child, irq);
125 }
126
127 static struct wakeup_irq_node *find_first_sibling(struct wakeup_irq_node *node)
128 {
129         struct wakeup_irq_node *n;
130         if (node->parent)
131                 return node;
132         list_for_each_entry(n, &node->siblings, siblings) {
133                 if (n->parent)
134                         return n;
135         }
136         return NULL;
137 }
138
139 static struct wakeup_irq_node *
140 get_base_node(struct wakeup_irq_node *node, unsigned depth)
141 {
142         if (!node)
143                 return NULL;
144
145         while (depth) {
146                 node = find_first_sibling(node);
147                 BUG_ON(!node);
148                 node = node->parent;
149                 depth--;
150         }
151
152         return node;
153 }
154 #endif /* CONFIG_DEDUCE_WAKEUP_REASONS */
155
156 static const struct list_head* get_wakeup_reasons_nosync(void);
157
158 static void print_wakeup_sources(void)
159 {
160         struct wakeup_irq_node *n;
161         const struct list_head *wakeups;
162
163         if (suspend_abort) {
164                 pr_info("Abort: %s\n", abort_reason);
165                 return;
166         }
167
168         wakeups = get_wakeup_reasons_nosync();
169         list_for_each_entry(n, wakeups, next) {
170                 if (n->desc && n->desc->action && n->desc->action->name)
171                         pr_info("Resume caused by IRQ %d, %s\n", n->irq,
172                                 n->desc->action->name);
173                 else
174                         pr_info("Resume caused by IRQ %d\n", n->irq);
175         }
176 }
177
178 static bool walk_irq_node_tree(struct wakeup_irq_node *root,
179                 bool (*visit)(struct wakeup_irq_node *, void *),
180                 void *cookie)
181 {
182         struct wakeup_irq_node *n, *t;
183
184         if (!root)
185                 return true;
186
187         list_for_each_entry_safe(n, t, &root->siblings, siblings) {
188                 if (!walk_irq_node_tree(n->child, visit, cookie))
189                         return false;
190                 if (!visit(n, cookie))
191                         return false;
192         }
193
194         if (!walk_irq_node_tree(root->child, visit, cookie))
195                 return false;
196         return visit(root, cookie);
197 }
198
199 #ifdef CONFIG_DEDUCE_WAKEUP_REASONS
200 static bool is_node_handled(struct wakeup_irq_node *n, void *_p)
201 {
202         return n->handled;
203 }
204
205 static bool base_irq_nodes_done(void)
206 {
207         return walk_irq_node_tree(base_irq_nodes, is_node_handled, NULL);
208 }
209 #endif
210
211 struct buf_cookie {
212         char *buf;
213         int buf_offset;
214 };
215
216 static bool print_leaf_node(struct wakeup_irq_node *n, void *_p)
217 {
218         struct buf_cookie *b = _p;
219         if (!n->child) {
220                 if (n->desc && n->desc->action && n->desc->action->name)
221                         b->buf_offset +=
222                                 snprintf(b->buf + b->buf_offset,
223                                         PAGE_SIZE - b->buf_offset,
224                                         "%d %s\n",
225                                         n->irq, n->desc->action->name);
226                 else
227                         b->buf_offset +=
228                                 snprintf(b->buf + b->buf_offset,
229                                         PAGE_SIZE - b->buf_offset,
230                                         "%d\n",
231                                         n->irq);
232         }
233         return true;
234 }
235
236 static ssize_t last_resume_reason_show(struct kobject *kobj,
237                                         struct kobj_attribute *attr,
238                                         char *buf)
239 {
240         unsigned long flags;
241
242         struct buf_cookie b = {
243                 .buf = buf,
244                 .buf_offset = 0
245         };
246
247         spin_lock_irqsave(&resume_reason_lock, flags);
248         if (suspend_abort)
249                 b.buf_offset = snprintf(buf, PAGE_SIZE, "Abort: %s", abort_reason);
250         else
251                 walk_irq_node_tree(base_irq_nodes, print_leaf_node, &b);
252         spin_unlock_irqrestore(&resume_reason_lock, flags);
253
254         return b.buf_offset;
255 }
256
257 static ssize_t last_suspend_time_show(struct kobject *kobj,
258                         struct kobj_attribute *attr, char *buf)
259 {
260         struct timespec sleep_time;
261         struct timespec total_time;
262         struct timespec suspend_resume_time;
263
264         /*
265          * total_time is calculated from monotonic bootoffsets because
266          * unlike CLOCK_MONOTONIC it include the time spent in suspend state.
267          */
268         total_time = ktime_to_timespec(ktime_sub(curr_stime, last_stime));
269
270         /*
271          * suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
272          * time interval before entering suspend and post suspend.
273          */
274         suspend_resume_time = ktime_to_timespec(ktime_sub(curr_monotime, last_monotime));
275
276         /* sleep_time = total_time - suspend_resume_time */
277         sleep_time = timespec_sub(total_time, suspend_resume_time);
278
279         /* Export suspend_resume_time and sleep_time in pair here. */
280         return sprintf(buf, "%lu.%09lu %lu.%09lu\n",
281                                 suspend_resume_time.tv_sec, suspend_resume_time.tv_nsec,
282                                 sleep_time.tv_sec, sleep_time.tv_nsec);
283 }
284
285 static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
286 static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
287
288 static struct attribute *attrs[] = {
289         &resume_reason.attr,
290         &suspend_time.attr,
291         NULL,
292 };
293 static struct attribute_group attr_group = {
294         .attrs = attrs,
295 };
296
297 static inline void stop_logging_wakeup_reasons(void)
298 {
299         ACCESS_ONCE(log_wakeups) = false;
300         smp_wmb();
301 }
302
303 /*
304  * stores the immediate wakeup irqs; these often aren't the ones seen by
305  * the drivers that registered them, due to chained interrupt controllers,
306  * and multiple-interrupt dispatch.
307  */
308 void log_base_wakeup_reason(int irq)
309 {
310         /* No locking is needed, since this function is called within
311          * syscore_resume, with both nonboot CPUs and interrupts disabled.
312          */
313         base_irq_nodes = add_to_siblings(base_irq_nodes, irq);
314         BUG_ON(!base_irq_nodes);
315 #ifndef CONFIG_DEDUCE_WAKEUP_REASONS
316         base_irq_nodes->handled = true;
317 #endif
318 }
319
320 #ifdef CONFIG_DEDUCE_WAKEUP_REASONS
321
322 /* This function is called by generic_handle_irq, which may call itself
323  * recursively.  This happens with interrupts disabled.  Using
324  * log_possible_wakeup_reason, we build a tree of interrupts, tracing the call
325  * stack of generic_handle_irq, for each wakeup source containing the
326  * interrupts actually handled.
327  *
328  * Most of these "trees" would either have a single node (in the event that the
329  * wakeup source is the final interrupt), or consist of a list of two
330  * interrupts, with the wakeup source at the root, and the final dispatched
331  * interrupt at the leaf.
332  *
333  * When *all* wakeup sources have been thusly spoken for, this function will
334  * clear the log_wakeups flag, and print the wakeup reasons.
335
336    TODO: percpu
337
338  */
339
340 static struct wakeup_irq_node *
341 log_possible_wakeup_reason_start(int irq, struct irq_desc *desc, unsigned depth)
342 {
343         BUG_ON(!irqs_disabled());
344         BUG_ON((signed)depth < 0);
345
346         /* This function can race with a call to stop_logging_wakeup_reasons()
347          * from a thread context.  If this happens, just exit silently, as we are no
348          * longer interested in logging interrupts.
349          */
350         if (!logging_wakeup_reasons())
351                 return NULL;
352
353         /* If suspend was aborted, the base IRQ nodes are missing, and we stop
354          * logging interrupts immediately.
355          */
356         if (!base_irq_nodes) {
357                 stop_logging_wakeup_reasons();
358                 return NULL;
359         }
360
361         /* We assume wakeup interrupts are handlerd only by the first core. */
362         /* TODO: relax this by having percpu versions of the irq tree */
363         if (smp_processor_id() != 0) {
364                 return NULL;
365         }
366
367         if (depth == 0) {
368                 cur_irq_tree_depth = 0;
369                 cur_irq_tree = search_siblings(base_irq_nodes, irq);
370         }
371         else if (cur_irq_tree) {
372                 if (depth > cur_irq_tree_depth) {
373                         BUG_ON(depth - cur_irq_tree_depth > 1);
374                         cur_irq_tree = add_child(cur_irq_tree, irq);
375                         if (cur_irq_tree)
376                                 cur_irq_tree_depth++;
377                 }
378                 else {
379                         cur_irq_tree = get_base_node(cur_irq_tree,
380                                         cur_irq_tree_depth - depth);
381                         cur_irq_tree_depth = depth;
382                         cur_irq_tree = add_to_siblings(cur_irq_tree, irq);
383                 }
384         }
385
386         return cur_irq_tree;
387 }
388
389 static void log_possible_wakeup_reason_complete(struct wakeup_irq_node *n,
390                                         unsigned depth,
391                                         bool handled)
392 {
393         if (!n)
394                 return;
395         n->handled = handled;
396         if (depth == 0) {
397                 if (base_irq_nodes_done()) {
398                         stop_logging_wakeup_reasons();
399                         complete(&wakeups_completion);
400                         print_wakeup_sources();
401                 }
402         }
403 }
404
405 bool log_possible_wakeup_reason(int irq,
406                         struct irq_desc *desc,
407                         bool (*handler)(struct irq_desc *))
408 {
409         static DEFINE_PER_CPU(unsigned int, depth);
410
411         struct wakeup_irq_node *n;
412         bool handled;
413         unsigned d;
414
415         d = get_cpu_var(depth)++;
416         put_cpu_var(depth);
417
418         n = log_possible_wakeup_reason_start(irq, desc, d);
419
420         handled = handler(desc);
421
422         d = --get_cpu_var(depth);
423         put_cpu_var(depth);
424
425         if (!handled && desc && desc->action)
426                 pr_debug("%s: irq %d action %pF not handled\n", __func__,
427                         irq, desc->action->handler);
428
429         log_possible_wakeup_reason_complete(n, d, handled);
430
431         return handled;
432 }
433
434 #endif /* CONFIG_DEDUCE_WAKEUP_REASONS */
435
436 void log_suspend_abort_reason(const char *fmt, ...)
437 {
438         va_list args;
439         unsigned long flags;
440
441         spin_lock_irqsave(&resume_reason_lock, flags);
442
443         //Suspend abort reason has already been logged.
444         if (suspend_abort) {
445                 spin_unlock_irqrestore(&resume_reason_lock, flags);
446                 return;
447         }
448
449         suspend_abort = true;
450         va_start(args, fmt);
451         vsnprintf(abort_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
452         va_end(args);
453
454         spin_unlock_irqrestore(&resume_reason_lock, flags);
455 }
456
457 static bool match_node(struct wakeup_irq_node *n, void *_p)
458 {
459         int irq = *((int *)_p);
460         return n->irq != irq;
461 }
462
463 int check_wakeup_reason(int irq)
464 {
465         bool found;
466         unsigned long flags;
467         spin_lock_irqsave(&resume_reason_lock, flags);
468         found = !walk_irq_node_tree(base_irq_nodes, match_node, &irq);
469         spin_unlock_irqrestore(&resume_reason_lock, flags);
470         return found;
471 }
472
473 static bool build_leaf_nodes(struct wakeup_irq_node *n, void *_p)
474 {
475         struct list_head *wakeups = _p;
476         if (!n->child)
477                 list_add(&n->next, wakeups);
478         return true;
479 }
480
481 static const struct list_head* get_wakeup_reasons_nosync(void)
482 {
483         BUG_ON(logging_wakeup_reasons());
484         INIT_LIST_HEAD(&wakeup_irqs);
485         walk_irq_node_tree(base_irq_nodes, build_leaf_nodes, &wakeup_irqs);
486         return &wakeup_irqs;
487 }
488
489 static bool build_unfinished_nodes(struct wakeup_irq_node *n, void *_p)
490 {
491         struct list_head *unfinished = _p;
492         if (!n->handled) {
493                 pr_warning("%s: wakeup irq %d was not handled\n",
494                            __func__, n->irq);
495                 list_add(&n->next, unfinished);
496         }
497         return true;
498 }
499
500 const struct list_head* get_wakeup_reasons(unsigned long timeout,
501                                         struct list_head *unfinished)
502 {
503         INIT_LIST_HEAD(unfinished);
504
505         if (logging_wakeup_reasons()) {
506                 unsigned long signalled = 0;
507                 if (timeout)
508                         signalled = wait_for_completion_timeout(&wakeups_completion, timeout);
509                 if (WARN_ON(!signalled)) {
510                         stop_logging_wakeup_reasons();
511                         walk_irq_node_tree(base_irq_nodes, build_unfinished_nodes, unfinished);
512                         return NULL;
513                 }
514                 pr_info("%s: waited for %u ms\n",
515                                 __func__,
516                                 jiffies_to_msecs(timeout - signalled));
517         }
518
519         return get_wakeup_reasons_nosync();
520 }
521
522 static bool delete_node(struct wakeup_irq_node *n, void *unused)
523 {
524         list_del(&n->siblings);
525         kmem_cache_free(wakeup_irq_nodes_cache, n);
526         return true;
527 }
528
529 void clear_wakeup_reasons(void)
530 {
531         unsigned long flags;
532         spin_lock_irqsave(&resume_reason_lock, flags);
533
534         BUG_ON(logging_wakeup_reasons());
535         walk_irq_node_tree(base_irq_nodes, delete_node, NULL);
536         base_irq_nodes = NULL;
537         cur_irq_tree = NULL;
538         cur_irq_tree_depth = 0;
539         INIT_LIST_HEAD(&wakeup_irqs);
540         suspend_abort = false;
541
542         spin_unlock_irqrestore(&resume_reason_lock, flags);
543 }
544
545 /* Detects a suspend and clears all the previous wake up reasons*/
546 static int wakeup_reason_pm_event(struct notifier_block *notifier,
547                 unsigned long pm_event, void *unused)
548 {
549         unsigned long flags;
550         switch (pm_event) {
551         case PM_SUSPEND_PREPARE:
552                 spin_lock_irqsave(&resume_reason_lock, flags);
553                 suspend_abort = false;
554                 spin_unlock_irqrestore(&resume_reason_lock, flags);
555                 /* monotonic time since boot */
556                 last_monotime = ktime_get();
557                 /* monotonic time since boot including the time spent in suspend */
558                 last_stime = ktime_get_boottime();
559                 clear_wakeup_reasons();
560                 break;
561         case PM_POST_SUSPEND:
562                 /* monotonic time since boot */
563                 curr_monotime = ktime_get();
564                 /* monotonic time since boot including the time spent in suspend */
565                 curr_stime = ktime_get_boottime();
566 #ifdef CONFIG_DEDUCE_WAKEUP_REASONS
567                 /* log_wakeups should have been cleared by now. */
568                 if (WARN_ON(logging_wakeup_reasons())) {
569                         stop_logging_wakeup_reasons();
570                         print_wakeup_sources();
571                 }
572 #else
573                 print_wakeup_sources();
574 #endif
575                 break;
576         default:
577                 break;
578         }
579         return NOTIFY_DONE;
580 }
581
582 static struct notifier_block wakeup_reason_pm_notifier_block = {
583         .notifier_call = wakeup_reason_pm_event,
584 };
585
586 int __init wakeup_reason_init(void)
587 {
588         spin_lock_init(&resume_reason_lock);
589
590         if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) {
591                 pr_warning("[%s] failed to register PM notifier\n",
592                         __func__);
593                 goto fail;
594         }
595
596         wakeup_reason = kobject_create_and_add("wakeup_reasons", kernel_kobj);
597         if (!wakeup_reason) {
598                 pr_warning("[%s] failed to create a sysfs kobject\n",
599                                 __func__);
600                 goto fail_unregister_pm_notifier;
601         }
602
603         if (sysfs_create_group(wakeup_reason, &attr_group)) {
604                 pr_warning("[%s] failed to create a sysfs group\n",
605                         __func__);
606                 goto fail_kobject_put;
607         }
608
609         wakeup_irq_nodes_cache =
610                 kmem_cache_create("wakeup_irq_node_cache",
611                                         sizeof(struct wakeup_irq_node), 0,
612                                         0, NULL);
613         if (!wakeup_irq_nodes_cache)
614                 goto fail_remove_group;
615
616         return 0;
617
618 fail_remove_group:
619         sysfs_remove_group(wakeup_reason, &attr_group);
620 fail_kobject_put:
621         kobject_put(wakeup_reason);
622 fail_unregister_pm_notifier:
623         unregister_pm_notifier(&wakeup_reason_pm_notifier_block);
624 fail:
625         return 1;
626 }
627
628 late_initcall(wakeup_reason_init);