OSDN Git Service

Merge 4.4.161 into android-4.4
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / drivers / base / power / main.c
1 /*
2  * drivers/base/power/main.c - Where the driver meets power management.
3  *
4  * Copyright (c) 2003 Patrick Mochel
5  * Copyright (c) 2003 Open Source Development Lab
6  *
7  * This file is released under the GPLv2
8  *
9  *
10  * The driver model core calls device_pm_add() when a device is registered.
11  * This will initialize the embedded device_pm_info object in the device
12  * and add it to the list of power-controlled devices. sysfs entries for
13  * controlling device power management will also be added.
14  *
15  * A separate list is used for keeping track of power info, because the power
16  * domain dependencies may differ from the ancestral dependencies that the
17  * subsystem list maintains.
18  */
19
20 #include <linux/device.h>
21 #include <linux/kallsyms.h>
22 #include <linux/export.h>
23 #include <linux/mutex.h>
24 #include <linux/pm.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/pm-trace.h>
27 #include <linux/pm_wakeirq.h>
28 #include <linux/interrupt.h>
29 #include <linux/sched.h>
30 #include <linux/async.h>
31 #include <linux/suspend.h>
32 #include <trace/events/power.h>
33 #include <linux/cpufreq.h>
34 #include <linux/cpuidle.h>
35 #include <linux/timer.h>
36 #include <linux/wakeup_reason.h>
37
38 #include "../base.h"
39 #include "power.h"
40
41 typedef int (*pm_callback_t)(struct device *);
42
43 /*
44  * The entries in the dpm_list list are in a depth first order, simply
45  * because children are guaranteed to be discovered after parents, and
46  * are inserted at the back of the list on discovery.
47  *
48  * Since device_pm_add() may be called with a device lock held,
49  * we must never try to acquire a device lock while holding
50  * dpm_list_mutex.
51  */
52
53 LIST_HEAD(dpm_list);
54 static LIST_HEAD(dpm_prepared_list);
55 static LIST_HEAD(dpm_suspended_list);
56 static LIST_HEAD(dpm_late_early_list);
57 static LIST_HEAD(dpm_noirq_list);
58
59 struct suspend_stats suspend_stats;
60 static DEFINE_MUTEX(dpm_list_mtx);
61 static pm_message_t pm_transition;
62
63 static int async_error;
64
65 static char *pm_verb(int event)
66 {
67         switch (event) {
68         case PM_EVENT_SUSPEND:
69                 return "suspend";
70         case PM_EVENT_RESUME:
71                 return "resume";
72         case PM_EVENT_FREEZE:
73                 return "freeze";
74         case PM_EVENT_QUIESCE:
75                 return "quiesce";
76         case PM_EVENT_HIBERNATE:
77                 return "hibernate";
78         case PM_EVENT_THAW:
79                 return "thaw";
80         case PM_EVENT_RESTORE:
81                 return "restore";
82         case PM_EVENT_RECOVER:
83                 return "recover";
84         default:
85                 return "(unknown PM event)";
86         }
87 }
88
89 /**
90  * device_pm_sleep_init - Initialize system suspend-related device fields.
91  * @dev: Device object being initialized.
92  */
93 void device_pm_sleep_init(struct device *dev)
94 {
95         dev->power.is_prepared = false;
96         dev->power.is_suspended = false;
97         dev->power.is_noirq_suspended = false;
98         dev->power.is_late_suspended = false;
99         init_completion(&dev->power.completion);
100         complete_all(&dev->power.completion);
101         dev->power.wakeup = NULL;
102         INIT_LIST_HEAD(&dev->power.entry);
103 }
104
105 /**
106  * device_pm_lock - Lock the list of active devices used by the PM core.
107  */
108 void device_pm_lock(void)
109 {
110         mutex_lock(&dpm_list_mtx);
111 }
112
113 /**
114  * device_pm_unlock - Unlock the list of active devices used by the PM core.
115  */
116 void device_pm_unlock(void)
117 {
118         mutex_unlock(&dpm_list_mtx);
119 }
120
121 /**
122  * device_pm_add - Add a device to the PM core's list of active devices.
123  * @dev: Device to add to the list.
124  */
125 void device_pm_add(struct device *dev)
126 {
127         pr_debug("PM: Adding info for %s:%s\n",
128                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
129         device_pm_check_callbacks(dev);
130         mutex_lock(&dpm_list_mtx);
131         if (dev->parent && dev->parent->power.is_prepared)
132                 dev_warn(dev, "parent %s should not be sleeping\n",
133                         dev_name(dev->parent));
134         list_add_tail(&dev->power.entry, &dpm_list);
135         mutex_unlock(&dpm_list_mtx);
136 }
137
138 /**
139  * device_pm_remove - Remove a device from the PM core's list of active devices.
140  * @dev: Device to be removed from the list.
141  */
142 void device_pm_remove(struct device *dev)
143 {
144         pr_debug("PM: Removing info for %s:%s\n",
145                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
146         complete_all(&dev->power.completion);
147         mutex_lock(&dpm_list_mtx);
148         list_del_init(&dev->power.entry);
149         mutex_unlock(&dpm_list_mtx);
150         device_wakeup_disable(dev);
151         pm_runtime_remove(dev);
152         device_pm_check_callbacks(dev);
153 }
154
155 /**
156  * device_pm_move_before - Move device in the PM core's list of active devices.
157  * @deva: Device to move in dpm_list.
158  * @devb: Device @deva should come before.
159  */
160 void device_pm_move_before(struct device *deva, struct device *devb)
161 {
162         pr_debug("PM: Moving %s:%s before %s:%s\n",
163                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
164                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
165         /* Delete deva from dpm_list and reinsert before devb. */
166         list_move_tail(&deva->power.entry, &devb->power.entry);
167 }
168
169 /**
170  * device_pm_move_after - Move device in the PM core's list of active devices.
171  * @deva: Device to move in dpm_list.
172  * @devb: Device @deva should come after.
173  */
174 void device_pm_move_after(struct device *deva, struct device *devb)
175 {
176         pr_debug("PM: Moving %s:%s after %s:%s\n",
177                  deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
178                  devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
179         /* Delete deva from dpm_list and reinsert after devb. */
180         list_move(&deva->power.entry, &devb->power.entry);
181 }
182
183 /**
184  * device_pm_move_last - Move device to end of the PM core's list of devices.
185  * @dev: Device to move in dpm_list.
186  */
187 void device_pm_move_last(struct device *dev)
188 {
189         pr_debug("PM: Moving %s:%s to end of list\n",
190                  dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
191         list_move_tail(&dev->power.entry, &dpm_list);
192 }
193
194 static ktime_t initcall_debug_start(struct device *dev)
195 {
196         ktime_t calltime = ktime_set(0, 0);
197
198         if (pm_print_times_enabled) {
199                 pr_info("calling  %s+ @ %i, parent: %s\n",
200                         dev_name(dev), task_pid_nr(current),
201                         dev->parent ? dev_name(dev->parent) : "none");
202                 calltime = ktime_get();
203         }
204
205         return calltime;
206 }
207
208 static void initcall_debug_report(struct device *dev, ktime_t calltime,
209                                   int error, pm_message_t state, char *info)
210 {
211         ktime_t rettime;
212         s64 nsecs;
213
214         rettime = ktime_get();
215         nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
216
217         if (pm_print_times_enabled) {
218                 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
219                         error, (unsigned long long)nsecs >> 10);
220         }
221 }
222
223 /**
224  * dpm_wait - Wait for a PM operation to complete.
225  * @dev: Device to wait for.
226  * @async: If unset, wait only if the device's power.async_suspend flag is set.
227  */
228 static void dpm_wait(struct device *dev, bool async)
229 {
230         if (!dev)
231                 return;
232
233         if (async || (pm_async_enabled && dev->power.async_suspend))
234                 wait_for_completion(&dev->power.completion);
235 }
236
237 static int dpm_wait_fn(struct device *dev, void *async_ptr)
238 {
239         dpm_wait(dev, *((bool *)async_ptr));
240         return 0;
241 }
242
243 static void dpm_wait_for_children(struct device *dev, bool async)
244 {
245        device_for_each_child(dev, &async, dpm_wait_fn);
246 }
247
248 /**
249  * pm_op - Return the PM operation appropriate for given PM event.
250  * @ops: PM operations to choose from.
251  * @state: PM transition of the system being carried out.
252  */
253 static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
254 {
255         switch (state.event) {
256 #ifdef CONFIG_SUSPEND
257         case PM_EVENT_SUSPEND:
258                 return ops->suspend;
259         case PM_EVENT_RESUME:
260                 return ops->resume;
261 #endif /* CONFIG_SUSPEND */
262 #ifdef CONFIG_HIBERNATE_CALLBACKS
263         case PM_EVENT_FREEZE:
264         case PM_EVENT_QUIESCE:
265                 return ops->freeze;
266         case PM_EVENT_HIBERNATE:
267                 return ops->poweroff;
268         case PM_EVENT_THAW:
269         case PM_EVENT_RECOVER:
270                 return ops->thaw;
271                 break;
272         case PM_EVENT_RESTORE:
273                 return ops->restore;
274 #endif /* CONFIG_HIBERNATE_CALLBACKS */
275         }
276
277         return NULL;
278 }
279
280 /**
281  * pm_late_early_op - Return the PM operation appropriate for given PM event.
282  * @ops: PM operations to choose from.
283  * @state: PM transition of the system being carried out.
284  *
285  * Runtime PM is disabled for @dev while this function is being executed.
286  */
287 static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
288                                       pm_message_t state)
289 {
290         switch (state.event) {
291 #ifdef CONFIG_SUSPEND
292         case PM_EVENT_SUSPEND:
293                 return ops->suspend_late;
294         case PM_EVENT_RESUME:
295                 return ops->resume_early;
296 #endif /* CONFIG_SUSPEND */
297 #ifdef CONFIG_HIBERNATE_CALLBACKS
298         case PM_EVENT_FREEZE:
299         case PM_EVENT_QUIESCE:
300                 return ops->freeze_late;
301         case PM_EVENT_HIBERNATE:
302                 return ops->poweroff_late;
303         case PM_EVENT_THAW:
304         case PM_EVENT_RECOVER:
305                 return ops->thaw_early;
306         case PM_EVENT_RESTORE:
307                 return ops->restore_early;
308 #endif /* CONFIG_HIBERNATE_CALLBACKS */
309         }
310
311         return NULL;
312 }
313
314 /**
315  * pm_noirq_op - Return the PM operation appropriate for given PM event.
316  * @ops: PM operations to choose from.
317  * @state: PM transition of the system being carried out.
318  *
319  * The driver of @dev will not receive interrupts while this function is being
320  * executed.
321  */
322 static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
323 {
324         switch (state.event) {
325 #ifdef CONFIG_SUSPEND
326         case PM_EVENT_SUSPEND:
327                 return ops->suspend_noirq;
328         case PM_EVENT_RESUME:
329                 return ops->resume_noirq;
330 #endif /* CONFIG_SUSPEND */
331 #ifdef CONFIG_HIBERNATE_CALLBACKS
332         case PM_EVENT_FREEZE:
333         case PM_EVENT_QUIESCE:
334                 return ops->freeze_noirq;
335         case PM_EVENT_HIBERNATE:
336                 return ops->poweroff_noirq;
337         case PM_EVENT_THAW:
338         case PM_EVENT_RECOVER:
339                 return ops->thaw_noirq;
340         case PM_EVENT_RESTORE:
341                 return ops->restore_noirq;
342 #endif /* CONFIG_HIBERNATE_CALLBACKS */
343         }
344
345         return NULL;
346 }
347
348 static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
349 {
350         dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
351                 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
352                 ", may wakeup" : "");
353 }
354
355 static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
356                         int error)
357 {
358         printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
359                 dev_name(dev), pm_verb(state.event), info, error);
360 }
361
362 static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
363 {
364         ktime_t calltime;
365         u64 usecs64;
366         int usecs;
367
368         calltime = ktime_get();
369         usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
370         do_div(usecs64, NSEC_PER_USEC);
371         usecs = usecs64;
372         if (usecs == 0)
373                 usecs = 1;
374         pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
375                 info ?: "", info ? " " : "", pm_verb(state.event),
376                 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
377 }
378
379 static int dpm_run_callback(pm_callback_t cb, struct device *dev,
380                             pm_message_t state, char *info)
381 {
382         ktime_t calltime;
383         int error;
384
385         if (!cb)
386                 return 0;
387
388         calltime = initcall_debug_start(dev);
389
390         pm_dev_dbg(dev, state, info);
391         trace_device_pm_callback_start(dev, info, state.event);
392         error = cb(dev);
393         trace_device_pm_callback_end(dev, error);
394         suspend_report_result(cb, error);
395
396         initcall_debug_report(dev, calltime, error, state, info);
397
398         return error;
399 }
400
401 #ifdef CONFIG_DPM_WATCHDOG
402 struct dpm_watchdog {
403         struct device           *dev;
404         struct task_struct      *tsk;
405         struct timer_list       timer;
406 };
407
408 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
409         struct dpm_watchdog wd
410
411 /**
412  * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
413  * @data: Watchdog object address.
414  *
415  * Called when a driver has timed out suspending or resuming.
416  * There's not much we can do here to recover so panic() to
417  * capture a crash-dump in pstore.
418  */
419 static void dpm_watchdog_handler(unsigned long data)
420 {
421         struct dpm_watchdog *wd = (void *)data;
422
423         dev_emerg(wd->dev, "**** DPM device timeout ****\n");
424         show_stack(wd->tsk, NULL);
425         panic("%s %s: unrecoverable failure\n",
426                 dev_driver_string(wd->dev), dev_name(wd->dev));
427 }
428
429 /**
430  * dpm_watchdog_set - Enable pm watchdog for given device.
431  * @wd: Watchdog. Must be allocated on the stack.
432  * @dev: Device to handle.
433  */
434 static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
435 {
436         struct timer_list *timer = &wd->timer;
437
438         wd->dev = dev;
439         wd->tsk = current;
440
441         init_timer_on_stack(timer);
442         /* use same timeout value for both suspend and resume */
443         timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
444         timer->function = dpm_watchdog_handler;
445         timer->data = (unsigned long)wd;
446         add_timer(timer);
447 }
448
449 /**
450  * dpm_watchdog_clear - Disable suspend/resume watchdog.
451  * @wd: Watchdog to disable.
452  */
453 static void dpm_watchdog_clear(struct dpm_watchdog *wd)
454 {
455         struct timer_list *timer = &wd->timer;
456
457         del_timer_sync(timer);
458         destroy_timer_on_stack(timer);
459 }
460 #else
461 #define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
462 #define dpm_watchdog_set(x, y)
463 #define dpm_watchdog_clear(x)
464 #endif
465
466 /*------------------------- Resume routines -------------------------*/
467
468 /**
469  * device_resume_noirq - Execute an "early resume" callback for given device.
470  * @dev: Device to handle.
471  * @state: PM transition of the system being carried out.
472  * @async: If true, the device is being resumed asynchronously.
473  *
474  * The driver of @dev will not receive interrupts while this function is being
475  * executed.
476  */
477 static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
478 {
479         pm_callback_t callback = NULL;
480         char *info = NULL;
481         int error = 0;
482
483         TRACE_DEVICE(dev);
484         TRACE_RESUME(0);
485
486         if (dev->power.syscore || dev->power.direct_complete)
487                 goto Out;
488
489         if (!dev->power.is_noirq_suspended)
490                 goto Out;
491
492         dpm_wait(dev->parent, async);
493
494         if (dev->pm_domain) {
495                 info = "noirq power domain ";
496                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
497         } else if (dev->type && dev->type->pm) {
498                 info = "noirq type ";
499                 callback = pm_noirq_op(dev->type->pm, state);
500         } else if (dev->class && dev->class->pm) {
501                 info = "noirq class ";
502                 callback = pm_noirq_op(dev->class->pm, state);
503         } else if (dev->bus && dev->bus->pm) {
504                 info = "noirq bus ";
505                 callback = pm_noirq_op(dev->bus->pm, state);
506         }
507
508         if (!callback && dev->driver && dev->driver->pm) {
509                 info = "noirq driver ";
510                 callback = pm_noirq_op(dev->driver->pm, state);
511         }
512
513         error = dpm_run_callback(callback, dev, state, info);
514         dev->power.is_noirq_suspended = false;
515
516  Out:
517         complete_all(&dev->power.completion);
518         TRACE_RESUME(error);
519         return error;
520 }
521
522 static bool is_async(struct device *dev)
523 {
524         return dev->power.async_suspend && pm_async_enabled
525                 && !pm_trace_is_enabled();
526 }
527
528 static void async_resume_noirq(void *data, async_cookie_t cookie)
529 {
530         struct device *dev = (struct device *)data;
531         int error;
532
533         error = device_resume_noirq(dev, pm_transition, true);
534         if (error)
535                 pm_dev_err(dev, pm_transition, " async", error);
536
537         put_device(dev);
538 }
539
540 /**
541  * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
542  * @state: PM transition of the system being carried out.
543  *
544  * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
545  * enable device drivers to receive interrupts.
546  */
547 void dpm_resume_noirq(pm_message_t state)
548 {
549         struct device *dev;
550         ktime_t starttime = ktime_get();
551
552         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
553         mutex_lock(&dpm_list_mtx);
554         pm_transition = state;
555
556         /*
557          * Advanced the async threads upfront,
558          * in case the starting of async threads is
559          * delayed by non-async resuming devices.
560          */
561         list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
562                 reinit_completion(&dev->power.completion);
563                 if (is_async(dev)) {
564                         get_device(dev);
565                         async_schedule(async_resume_noirq, dev);
566                 }
567         }
568
569         while (!list_empty(&dpm_noirq_list)) {
570                 dev = to_device(dpm_noirq_list.next);
571                 get_device(dev);
572                 list_move_tail(&dev->power.entry, &dpm_late_early_list);
573                 mutex_unlock(&dpm_list_mtx);
574
575                 if (!is_async(dev)) {
576                         int error;
577
578                         error = device_resume_noirq(dev, state, false);
579                         if (error) {
580                                 suspend_stats.failed_resume_noirq++;
581                                 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
582                                 dpm_save_failed_dev(dev_name(dev));
583                                 pm_dev_err(dev, state, " noirq", error);
584                         }
585                 }
586
587                 mutex_lock(&dpm_list_mtx);
588                 put_device(dev);
589         }
590         mutex_unlock(&dpm_list_mtx);
591         async_synchronize_full();
592         dpm_show_time(starttime, state, "noirq");
593         resume_device_irqs();
594         device_wakeup_disarm_wake_irqs();
595         cpuidle_resume();
596         trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
597 }
598
599 /**
600  * device_resume_early - Execute an "early resume" callback for given device.
601  * @dev: Device to handle.
602  * @state: PM transition of the system being carried out.
603  * @async: If true, the device is being resumed asynchronously.
604  *
605  * Runtime PM is disabled for @dev while this function is being executed.
606  */
607 static int device_resume_early(struct device *dev, pm_message_t state, bool async)
608 {
609         pm_callback_t callback = NULL;
610         char *info = NULL;
611         int error = 0;
612
613         TRACE_DEVICE(dev);
614         TRACE_RESUME(0);
615
616         if (dev->power.syscore || dev->power.direct_complete)
617                 goto Out;
618
619         if (!dev->power.is_late_suspended)
620                 goto Out;
621
622         dpm_wait(dev->parent, async);
623
624         if (dev->pm_domain) {
625                 info = "early power domain ";
626                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
627         } else if (dev->type && dev->type->pm) {
628                 info = "early type ";
629                 callback = pm_late_early_op(dev->type->pm, state);
630         } else if (dev->class && dev->class->pm) {
631                 info = "early class ";
632                 callback = pm_late_early_op(dev->class->pm, state);
633         } else if (dev->bus && dev->bus->pm) {
634                 info = "early bus ";
635                 callback = pm_late_early_op(dev->bus->pm, state);
636         }
637
638         if (!callback && dev->driver && dev->driver->pm) {
639                 info = "early driver ";
640                 callback = pm_late_early_op(dev->driver->pm, state);
641         }
642
643         error = dpm_run_callback(callback, dev, state, info);
644         dev->power.is_late_suspended = false;
645
646  Out:
647         TRACE_RESUME(error);
648
649         pm_runtime_enable(dev);
650         complete_all(&dev->power.completion);
651         return error;
652 }
653
654 static void async_resume_early(void *data, async_cookie_t cookie)
655 {
656         struct device *dev = (struct device *)data;
657         int error;
658
659         error = device_resume_early(dev, pm_transition, true);
660         if (error)
661                 pm_dev_err(dev, pm_transition, " async", error);
662
663         put_device(dev);
664 }
665
666 /**
667  * dpm_resume_early - Execute "early resume" callbacks for all devices.
668  * @state: PM transition of the system being carried out.
669  */
670 void dpm_resume_early(pm_message_t state)
671 {
672         struct device *dev;
673         ktime_t starttime = ktime_get();
674
675         trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
676         mutex_lock(&dpm_list_mtx);
677         pm_transition = state;
678
679         /*
680          * Advanced the async threads upfront,
681          * in case the starting of async threads is
682          * delayed by non-async resuming devices.
683          */
684         list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
685                 reinit_completion(&dev->power.completion);
686                 if (is_async(dev)) {
687                         get_device(dev);
688                         async_schedule(async_resume_early, dev);
689                 }
690         }
691
692         while (!list_empty(&dpm_late_early_list)) {
693                 dev = to_device(dpm_late_early_list.next);
694                 get_device(dev);
695                 list_move_tail(&dev->power.entry, &dpm_suspended_list);
696                 mutex_unlock(&dpm_list_mtx);
697
698                 if (!is_async(dev)) {
699                         int error;
700
701                         error = device_resume_early(dev, state, false);
702                         if (error) {
703                                 suspend_stats.failed_resume_early++;
704                                 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
705                                 dpm_save_failed_dev(dev_name(dev));
706                                 pm_dev_err(dev, state, " early", error);
707                         }
708                 }
709                 mutex_lock(&dpm_list_mtx);
710                 put_device(dev);
711         }
712         mutex_unlock(&dpm_list_mtx);
713         async_synchronize_full();
714         dpm_show_time(starttime, state, "early");
715         trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
716 }
717
718 /**
719  * dpm_resume_start - Execute "noirq" and "early" device callbacks.
720  * @state: PM transition of the system being carried out.
721  */
722 void dpm_resume_start(pm_message_t state)
723 {
724         dpm_resume_noirq(state);
725         dpm_resume_early(state);
726 }
727 EXPORT_SYMBOL_GPL(dpm_resume_start);
728
729 /**
730  * device_resume - Execute "resume" callbacks for given device.
731  * @dev: Device to handle.
732  * @state: PM transition of the system being carried out.
733  * @async: If true, the device is being resumed asynchronously.
734  */
735 static int device_resume(struct device *dev, pm_message_t state, bool async)
736 {
737         pm_callback_t callback = NULL;
738         char *info = NULL;
739         int error = 0;
740         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
741
742         TRACE_DEVICE(dev);
743         TRACE_RESUME(0);
744
745         if (dev->power.syscore)
746                 goto Complete;
747
748         if (dev->power.direct_complete) {
749                 /* Match the pm_runtime_disable() in __device_suspend(). */
750                 pm_runtime_enable(dev);
751                 goto Complete;
752         }
753
754         dpm_wait(dev->parent, async);
755         dpm_watchdog_set(&wd, dev);
756         device_lock(dev);
757
758         /*
759          * This is a fib.  But we'll allow new children to be added below
760          * a resumed device, even if the device hasn't been completed yet.
761          */
762         dev->power.is_prepared = false;
763
764         if (!dev->power.is_suspended)
765                 goto Unlock;
766
767         if (dev->pm_domain) {
768                 info = "power domain ";
769                 callback = pm_op(&dev->pm_domain->ops, state);
770                 goto Driver;
771         }
772
773         if (dev->type && dev->type->pm) {
774                 info = "type ";
775                 callback = pm_op(dev->type->pm, state);
776                 goto Driver;
777         }
778
779         if (dev->class) {
780                 if (dev->class->pm) {
781                         info = "class ";
782                         callback = pm_op(dev->class->pm, state);
783                         goto Driver;
784                 } else if (dev->class->resume) {
785                         info = "legacy class ";
786                         callback = dev->class->resume;
787                         goto End;
788                 }
789         }
790
791         if (dev->bus) {
792                 if (dev->bus->pm) {
793                         info = "bus ";
794                         callback = pm_op(dev->bus->pm, state);
795                 } else if (dev->bus->resume) {
796                         info = "legacy bus ";
797                         callback = dev->bus->resume;
798                         goto End;
799                 }
800         }
801
802  Driver:
803         if (!callback && dev->driver && dev->driver->pm) {
804                 info = "driver ";
805                 callback = pm_op(dev->driver->pm, state);
806         }
807
808  End:
809         error = dpm_run_callback(callback, dev, state, info);
810         dev->power.is_suspended = false;
811
812  Unlock:
813         device_unlock(dev);
814         dpm_watchdog_clear(&wd);
815
816  Complete:
817         complete_all(&dev->power.completion);
818
819         TRACE_RESUME(error);
820
821         return error;
822 }
823
824 static void async_resume(void *data, async_cookie_t cookie)
825 {
826         struct device *dev = (struct device *)data;
827         int error;
828
829         error = device_resume(dev, pm_transition, true);
830         if (error)
831                 pm_dev_err(dev, pm_transition, " async", error);
832         put_device(dev);
833 }
834
835 /**
836  * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
837  * @state: PM transition of the system being carried out.
838  *
839  * Execute the appropriate "resume" callback for all devices whose status
840  * indicates that they are suspended.
841  */
842 void dpm_resume(pm_message_t state)
843 {
844         struct device *dev;
845         ktime_t starttime = ktime_get();
846
847         trace_suspend_resume(TPS("dpm_resume"), state.event, true);
848         might_sleep();
849
850         mutex_lock(&dpm_list_mtx);
851         pm_transition = state;
852         async_error = 0;
853
854         list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
855                 reinit_completion(&dev->power.completion);
856                 if (is_async(dev)) {
857                         get_device(dev);
858                         async_schedule(async_resume, dev);
859                 }
860         }
861
862         while (!list_empty(&dpm_suspended_list)) {
863                 dev = to_device(dpm_suspended_list.next);
864                 get_device(dev);
865                 if (!is_async(dev)) {
866                         int error;
867
868                         mutex_unlock(&dpm_list_mtx);
869
870                         error = device_resume(dev, state, false);
871                         if (error) {
872                                 suspend_stats.failed_resume++;
873                                 dpm_save_failed_step(SUSPEND_RESUME);
874                                 dpm_save_failed_dev(dev_name(dev));
875                                 pm_dev_err(dev, state, "", error);
876                         }
877
878                         mutex_lock(&dpm_list_mtx);
879                 }
880                 if (!list_empty(&dev->power.entry))
881                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
882                 put_device(dev);
883         }
884         mutex_unlock(&dpm_list_mtx);
885         async_synchronize_full();
886         dpm_show_time(starttime, state, NULL);
887
888         cpufreq_resume();
889         trace_suspend_resume(TPS("dpm_resume"), state.event, false);
890 }
891
892 /**
893  * device_complete - Complete a PM transition for given device.
894  * @dev: Device to handle.
895  * @state: PM transition of the system being carried out.
896  */
897 static void device_complete(struct device *dev, pm_message_t state)
898 {
899         void (*callback)(struct device *) = NULL;
900         char *info = NULL;
901
902         if (dev->power.syscore)
903                 return;
904
905         device_lock(dev);
906
907         if (dev->pm_domain) {
908                 info = "completing power domain ";
909                 callback = dev->pm_domain->ops.complete;
910         } else if (dev->type && dev->type->pm) {
911                 info = "completing type ";
912                 callback = dev->type->pm->complete;
913         } else if (dev->class && dev->class->pm) {
914                 info = "completing class ";
915                 callback = dev->class->pm->complete;
916         } else if (dev->bus && dev->bus->pm) {
917                 info = "completing bus ";
918                 callback = dev->bus->pm->complete;
919         }
920
921         if (!callback && dev->driver && dev->driver->pm) {
922                 info = "completing driver ";
923                 callback = dev->driver->pm->complete;
924         }
925
926         if (callback) {
927                 pm_dev_dbg(dev, state, info);
928                 callback(dev);
929         }
930
931         device_unlock(dev);
932
933         pm_runtime_put(dev);
934 }
935
936 /**
937  * dpm_complete - Complete a PM transition for all non-sysdev devices.
938  * @state: PM transition of the system being carried out.
939  *
940  * Execute the ->complete() callbacks for all devices whose PM status is not
941  * DPM_ON (this allows new devices to be registered).
942  */
943 void dpm_complete(pm_message_t state)
944 {
945         struct list_head list;
946
947         trace_suspend_resume(TPS("dpm_complete"), state.event, true);
948         might_sleep();
949
950         INIT_LIST_HEAD(&list);
951         mutex_lock(&dpm_list_mtx);
952         while (!list_empty(&dpm_prepared_list)) {
953                 struct device *dev = to_device(dpm_prepared_list.prev);
954
955                 get_device(dev);
956                 dev->power.is_prepared = false;
957                 list_move(&dev->power.entry, &list);
958                 mutex_unlock(&dpm_list_mtx);
959
960                 trace_device_pm_callback_start(dev, "", state.event);
961                 device_complete(dev, state);
962                 trace_device_pm_callback_end(dev, 0);
963
964                 mutex_lock(&dpm_list_mtx);
965                 put_device(dev);
966         }
967         list_splice(&list, &dpm_list);
968         mutex_unlock(&dpm_list_mtx);
969         trace_suspend_resume(TPS("dpm_complete"), state.event, false);
970 }
971
972 /**
973  * dpm_resume_end - Execute "resume" callbacks and complete system transition.
974  * @state: PM transition of the system being carried out.
975  *
976  * Execute "resume" callbacks for all devices and complete the PM transition of
977  * the system.
978  */
979 void dpm_resume_end(pm_message_t state)
980 {
981         dpm_resume(state);
982         dpm_complete(state);
983 }
984 EXPORT_SYMBOL_GPL(dpm_resume_end);
985
986
987 /*------------------------- Suspend routines -------------------------*/
988
989 /**
990  * resume_event - Return a "resume" message for given "suspend" sleep state.
991  * @sleep_state: PM message representing a sleep state.
992  *
993  * Return a PM message representing the resume event corresponding to given
994  * sleep state.
995  */
996 static pm_message_t resume_event(pm_message_t sleep_state)
997 {
998         switch (sleep_state.event) {
999         case PM_EVENT_SUSPEND:
1000                 return PMSG_RESUME;
1001         case PM_EVENT_FREEZE:
1002         case PM_EVENT_QUIESCE:
1003                 return PMSG_RECOVER;
1004         case PM_EVENT_HIBERNATE:
1005                 return PMSG_RESTORE;
1006         }
1007         return PMSG_ON;
1008 }
1009
1010 /**
1011  * device_suspend_noirq - Execute a "late suspend" callback for given device.
1012  * @dev: Device to handle.
1013  * @state: PM transition of the system being carried out.
1014  * @async: If true, the device is being suspended asynchronously.
1015  *
1016  * The driver of @dev will not receive interrupts while this function is being
1017  * executed.
1018  */
1019 static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1020 {
1021         pm_callback_t callback = NULL;
1022         char *info = NULL;
1023         int error = 0;
1024
1025         TRACE_DEVICE(dev);
1026         TRACE_SUSPEND(0);
1027
1028         dpm_wait_for_children(dev, async);
1029
1030         if (async_error)
1031                 goto Complete;
1032
1033         if (pm_wakeup_pending()) {
1034                 async_error = -EBUSY;
1035                 goto Complete;
1036         }
1037
1038         if (dev->power.syscore || dev->power.direct_complete)
1039                 goto Complete;
1040
1041         if (dev->pm_domain) {
1042                 info = "noirq power domain ";
1043                 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1044         } else if (dev->type && dev->type->pm) {
1045                 info = "noirq type ";
1046                 callback = pm_noirq_op(dev->type->pm, state);
1047         } else if (dev->class && dev->class->pm) {
1048                 info = "noirq class ";
1049                 callback = pm_noirq_op(dev->class->pm, state);
1050         } else if (dev->bus && dev->bus->pm) {
1051                 info = "noirq bus ";
1052                 callback = pm_noirq_op(dev->bus->pm, state);
1053         }
1054
1055         if (!callback && dev->driver && dev->driver->pm) {
1056                 info = "noirq driver ";
1057                 callback = pm_noirq_op(dev->driver->pm, state);
1058         }
1059
1060         error = dpm_run_callback(callback, dev, state, info);
1061         if (!error)
1062                 dev->power.is_noirq_suspended = true;
1063         else
1064                 async_error = error;
1065
1066 Complete:
1067         complete_all(&dev->power.completion);
1068         TRACE_SUSPEND(error);
1069         return error;
1070 }
1071
1072 static void async_suspend_noirq(void *data, async_cookie_t cookie)
1073 {
1074         struct device *dev = (struct device *)data;
1075         int error;
1076
1077         error = __device_suspend_noirq(dev, pm_transition, true);
1078         if (error) {
1079                 dpm_save_failed_dev(dev_name(dev));
1080                 pm_dev_err(dev, pm_transition, " async", error);
1081         }
1082
1083         put_device(dev);
1084 }
1085
1086 static int device_suspend_noirq(struct device *dev)
1087 {
1088         reinit_completion(&dev->power.completion);
1089
1090         if (is_async(dev)) {
1091                 get_device(dev);
1092                 async_schedule(async_suspend_noirq, dev);
1093                 return 0;
1094         }
1095         return __device_suspend_noirq(dev, pm_transition, false);
1096 }
1097
1098 /**
1099  * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1100  * @state: PM transition of the system being carried out.
1101  *
1102  * Prevent device drivers from receiving interrupts and call the "noirq" suspend
1103  * handlers for all non-sysdev devices.
1104  */
1105 int dpm_suspend_noirq(pm_message_t state)
1106 {
1107         ktime_t starttime = ktime_get();
1108         int error = 0;
1109
1110         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1111         cpuidle_pause();
1112         device_wakeup_arm_wake_irqs();
1113         suspend_device_irqs();
1114         mutex_lock(&dpm_list_mtx);
1115         pm_transition = state;
1116         async_error = 0;
1117
1118         while (!list_empty(&dpm_late_early_list)) {
1119                 struct device *dev = to_device(dpm_late_early_list.prev);
1120
1121                 get_device(dev);
1122                 mutex_unlock(&dpm_list_mtx);
1123
1124                 error = device_suspend_noirq(dev);
1125
1126                 mutex_lock(&dpm_list_mtx);
1127                 if (error) {
1128                         pm_dev_err(dev, state, " noirq", error);
1129                         dpm_save_failed_dev(dev_name(dev));
1130                         put_device(dev);
1131                         break;
1132                 }
1133                 if (!list_empty(&dev->power.entry))
1134                         list_move(&dev->power.entry, &dpm_noirq_list);
1135                 put_device(dev);
1136
1137                 if (async_error)
1138                         break;
1139         }
1140         mutex_unlock(&dpm_list_mtx);
1141         async_synchronize_full();
1142         if (!error)
1143                 error = async_error;
1144
1145         if (error) {
1146                 suspend_stats.failed_suspend_noirq++;
1147                 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1148                 dpm_resume_noirq(resume_event(state));
1149         } else {
1150                 dpm_show_time(starttime, state, "noirq");
1151         }
1152         trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1153         return error;
1154 }
1155
1156 /**
1157  * device_suspend_late - Execute a "late suspend" callback for given device.
1158  * @dev: Device to handle.
1159  * @state: PM transition of the system being carried out.
1160  * @async: If true, the device is being suspended asynchronously.
1161  *
1162  * Runtime PM is disabled for @dev while this function is being executed.
1163  */
1164 static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1165 {
1166         pm_callback_t callback = NULL;
1167         char *info = NULL;
1168         int error = 0;
1169
1170         TRACE_DEVICE(dev);
1171         TRACE_SUSPEND(0);
1172
1173         __pm_runtime_disable(dev, false);
1174
1175         dpm_wait_for_children(dev, async);
1176
1177         if (async_error)
1178                 goto Complete;
1179
1180         if (pm_wakeup_pending()) {
1181                 async_error = -EBUSY;
1182                 goto Complete;
1183         }
1184
1185         if (dev->power.syscore || dev->power.direct_complete)
1186                 goto Complete;
1187
1188         if (dev->pm_domain) {
1189                 info = "late power domain ";
1190                 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1191         } else if (dev->type && dev->type->pm) {
1192                 info = "late type ";
1193                 callback = pm_late_early_op(dev->type->pm, state);
1194         } else if (dev->class && dev->class->pm) {
1195                 info = "late class ";
1196                 callback = pm_late_early_op(dev->class->pm, state);
1197         } else if (dev->bus && dev->bus->pm) {
1198                 info = "late bus ";
1199                 callback = pm_late_early_op(dev->bus->pm, state);
1200         }
1201
1202         if (!callback && dev->driver && dev->driver->pm) {
1203                 info = "late driver ";
1204                 callback = pm_late_early_op(dev->driver->pm, state);
1205         }
1206
1207         error = dpm_run_callback(callback, dev, state, info);
1208         if (!error)
1209                 dev->power.is_late_suspended = true;
1210         else
1211                 async_error = error;
1212
1213 Complete:
1214         TRACE_SUSPEND(error);
1215         complete_all(&dev->power.completion);
1216         return error;
1217 }
1218
1219 static void async_suspend_late(void *data, async_cookie_t cookie)
1220 {
1221         struct device *dev = (struct device *)data;
1222         int error;
1223
1224         error = __device_suspend_late(dev, pm_transition, true);
1225         if (error) {
1226                 dpm_save_failed_dev(dev_name(dev));
1227                 pm_dev_err(dev, pm_transition, " async", error);
1228         }
1229         put_device(dev);
1230 }
1231
1232 static int device_suspend_late(struct device *dev)
1233 {
1234         reinit_completion(&dev->power.completion);
1235
1236         if (is_async(dev)) {
1237                 get_device(dev);
1238                 async_schedule(async_suspend_late, dev);
1239                 return 0;
1240         }
1241
1242         return __device_suspend_late(dev, pm_transition, false);
1243 }
1244
1245 /**
1246  * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1247  * @state: PM transition of the system being carried out.
1248  */
1249 int dpm_suspend_late(pm_message_t state)
1250 {
1251         ktime_t starttime = ktime_get();
1252         int error = 0;
1253
1254         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1255         mutex_lock(&dpm_list_mtx);
1256         pm_transition = state;
1257         async_error = 0;
1258
1259         while (!list_empty(&dpm_suspended_list)) {
1260                 struct device *dev = to_device(dpm_suspended_list.prev);
1261
1262                 get_device(dev);
1263                 mutex_unlock(&dpm_list_mtx);
1264
1265                 error = device_suspend_late(dev);
1266
1267                 mutex_lock(&dpm_list_mtx);
1268                 if (!list_empty(&dev->power.entry))
1269                         list_move(&dev->power.entry, &dpm_late_early_list);
1270
1271                 if (error) {
1272                         pm_dev_err(dev, state, " late", error);
1273                         dpm_save_failed_dev(dev_name(dev));
1274                         put_device(dev);
1275                         break;
1276                 }
1277                 put_device(dev);
1278
1279                 if (async_error)
1280                         break;
1281         }
1282         mutex_unlock(&dpm_list_mtx);
1283         async_synchronize_full();
1284         if (!error)
1285                 error = async_error;
1286         if (error) {
1287                 suspend_stats.failed_suspend_late++;
1288                 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1289                 dpm_resume_early(resume_event(state));
1290         } else {
1291                 dpm_show_time(starttime, state, "late");
1292         }
1293         trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1294         return error;
1295 }
1296
1297 /**
1298  * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1299  * @state: PM transition of the system being carried out.
1300  */
1301 int dpm_suspend_end(pm_message_t state)
1302 {
1303         int error = dpm_suspend_late(state);
1304         if (error)
1305                 return error;
1306
1307         error = dpm_suspend_noirq(state);
1308         if (error) {
1309                 dpm_resume_early(resume_event(state));
1310                 return error;
1311         }
1312
1313         return 0;
1314 }
1315 EXPORT_SYMBOL_GPL(dpm_suspend_end);
1316
1317 /**
1318  * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1319  * @dev: Device to suspend.
1320  * @state: PM transition of the system being carried out.
1321  * @cb: Suspend callback to execute.
1322  * @info: string description of caller.
1323  */
1324 static int legacy_suspend(struct device *dev, pm_message_t state,
1325                           int (*cb)(struct device *dev, pm_message_t state),
1326                           char *info)
1327 {
1328         int error;
1329         ktime_t calltime;
1330
1331         calltime = initcall_debug_start(dev);
1332
1333         trace_device_pm_callback_start(dev, info, state.event);
1334         error = cb(dev, state);
1335         trace_device_pm_callback_end(dev, error);
1336         suspend_report_result(cb, error);
1337
1338         initcall_debug_report(dev, calltime, error, state, info);
1339
1340         return error;
1341 }
1342
1343 /**
1344  * device_suspend - Execute "suspend" callbacks for given device.
1345  * @dev: Device to handle.
1346  * @state: PM transition of the system being carried out.
1347  * @async: If true, the device is being suspended asynchronously.
1348  */
1349 static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1350 {
1351         pm_callback_t callback = NULL;
1352         char *info = NULL;
1353         int error = 0;
1354         char suspend_abort[MAX_SUSPEND_ABORT_LEN];
1355         DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1356
1357         TRACE_DEVICE(dev);
1358         TRACE_SUSPEND(0);
1359
1360         dpm_wait_for_children(dev, async);
1361
1362         if (async_error) {
1363                 dev->power.direct_complete = false;
1364                 goto Complete;
1365         }
1366
1367         /*
1368          * If a device configured to wake up the system from sleep states
1369          * has been suspended at run time and there's a resume request pending
1370          * for it, this is equivalent to the device signaling wakeup, so the
1371          * system suspend operation should be aborted.
1372          */
1373         if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1374                 pm_wakeup_event(dev, 0);
1375
1376         if (pm_wakeup_pending()) {
1377                 pm_get_active_wakeup_sources(suspend_abort,
1378                         MAX_SUSPEND_ABORT_LEN);
1379                 log_suspend_abort_reason(suspend_abort);
1380                 dev->power.direct_complete = false;
1381                 async_error = -EBUSY;
1382                 goto Complete;
1383         }
1384
1385         if (dev->power.syscore)
1386                 goto Complete;
1387
1388         if (dev->power.direct_complete) {
1389                 if (pm_runtime_status_suspended(dev)) {
1390                         pm_runtime_disable(dev);
1391                         if (pm_runtime_status_suspended(dev))
1392                                 goto Complete;
1393
1394                         pm_runtime_enable(dev);
1395                 }
1396                 dev->power.direct_complete = false;
1397         }
1398
1399         dpm_watchdog_set(&wd, dev);
1400         device_lock(dev);
1401
1402         if (dev->pm_domain) {
1403                 info = "power domain ";
1404                 callback = pm_op(&dev->pm_domain->ops, state);
1405                 goto Run;
1406         }
1407
1408         if (dev->type && dev->type->pm) {
1409                 info = "type ";
1410                 callback = pm_op(dev->type->pm, state);
1411                 goto Run;
1412         }
1413
1414         if (dev->class) {
1415                 if (dev->class->pm) {
1416                         info = "class ";
1417                         callback = pm_op(dev->class->pm, state);
1418                         goto Run;
1419                 } else if (dev->class->suspend) {
1420                         pm_dev_dbg(dev, state, "legacy class ");
1421                         error = legacy_suspend(dev, state, dev->class->suspend,
1422                                                 "legacy class ");
1423                         goto End;
1424                 }
1425         }
1426
1427         if (dev->bus) {
1428                 if (dev->bus->pm) {
1429                         info = "bus ";
1430                         callback = pm_op(dev->bus->pm, state);
1431                 } else if (dev->bus->suspend) {
1432                         pm_dev_dbg(dev, state, "legacy bus ");
1433                         error = legacy_suspend(dev, state, dev->bus->suspend,
1434                                                 "legacy bus ");
1435                         goto End;
1436                 }
1437         }
1438
1439  Run:
1440         if (!callback && dev->driver && dev->driver->pm) {
1441                 info = "driver ";
1442                 callback = pm_op(dev->driver->pm, state);
1443         }
1444
1445         error = dpm_run_callback(callback, dev, state, info);
1446
1447  End:
1448         if (!error) {
1449                 struct device *parent = dev->parent;
1450
1451                 dev->power.is_suspended = true;
1452                 if (parent) {
1453                         spin_lock_irq(&parent->power.lock);
1454
1455                         dev->parent->power.direct_complete = false;
1456                         if (dev->power.wakeup_path
1457                             && !dev->parent->power.ignore_children)
1458                                 dev->parent->power.wakeup_path = true;
1459
1460                         spin_unlock_irq(&parent->power.lock);
1461                 }
1462         }
1463
1464         device_unlock(dev);
1465         dpm_watchdog_clear(&wd);
1466
1467  Complete:
1468         complete_all(&dev->power.completion);
1469         if (error)
1470                 async_error = error;
1471
1472         TRACE_SUSPEND(error);
1473         return error;
1474 }
1475
1476 static void async_suspend(void *data, async_cookie_t cookie)
1477 {
1478         struct device *dev = (struct device *)data;
1479         int error;
1480
1481         error = __device_suspend(dev, pm_transition, true);
1482         if (error) {
1483                 dpm_save_failed_dev(dev_name(dev));
1484                 pm_dev_err(dev, pm_transition, " async", error);
1485         }
1486
1487         put_device(dev);
1488 }
1489
1490 static int device_suspend(struct device *dev)
1491 {
1492         reinit_completion(&dev->power.completion);
1493
1494         if (is_async(dev)) {
1495                 get_device(dev);
1496                 async_schedule(async_suspend, dev);
1497                 return 0;
1498         }
1499
1500         return __device_suspend(dev, pm_transition, false);
1501 }
1502
1503 /**
1504  * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1505  * @state: PM transition of the system being carried out.
1506  */
1507 int dpm_suspend(pm_message_t state)
1508 {
1509         ktime_t starttime = ktime_get();
1510         int error = 0;
1511
1512         trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1513         might_sleep();
1514
1515         cpufreq_suspend();
1516
1517         mutex_lock(&dpm_list_mtx);
1518         pm_transition = state;
1519         async_error = 0;
1520         while (!list_empty(&dpm_prepared_list)) {
1521                 struct device *dev = to_device(dpm_prepared_list.prev);
1522
1523                 get_device(dev);
1524                 mutex_unlock(&dpm_list_mtx);
1525
1526                 error = device_suspend(dev);
1527
1528                 mutex_lock(&dpm_list_mtx);
1529                 if (error) {
1530                         pm_dev_err(dev, state, "", error);
1531                         dpm_save_failed_dev(dev_name(dev));
1532                         put_device(dev);
1533                         break;
1534                 }
1535                 if (!list_empty(&dev->power.entry))
1536                         list_move(&dev->power.entry, &dpm_suspended_list);
1537                 put_device(dev);
1538                 if (async_error)
1539                         break;
1540         }
1541         mutex_unlock(&dpm_list_mtx);
1542         async_synchronize_full();
1543         if (!error)
1544                 error = async_error;
1545         if (error) {
1546                 suspend_stats.failed_suspend++;
1547                 dpm_save_failed_step(SUSPEND_SUSPEND);
1548         } else
1549                 dpm_show_time(starttime, state, NULL);
1550         trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1551         return error;
1552 }
1553
1554 /**
1555  * device_prepare - Prepare a device for system power transition.
1556  * @dev: Device to handle.
1557  * @state: PM transition of the system being carried out.
1558  *
1559  * Execute the ->prepare() callback(s) for given device.  No new children of the
1560  * device may be registered after this function has returned.
1561  */
1562 static int device_prepare(struct device *dev, pm_message_t state)
1563 {
1564         int (*callback)(struct device *) = NULL;
1565         char *info = NULL;
1566         int ret = 0;
1567
1568         if (dev->power.syscore)
1569                 return 0;
1570
1571         /*
1572          * If a device's parent goes into runtime suspend at the wrong time,
1573          * it won't be possible to resume the device.  To prevent this we
1574          * block runtime suspend here, during the prepare phase, and allow
1575          * it again during the complete phase.
1576          */
1577         pm_runtime_get_noresume(dev);
1578
1579         device_lock(dev);
1580
1581         dev->power.wakeup_path = device_may_wakeup(dev);
1582
1583         if (dev->power.no_pm_callbacks) {
1584                 ret = 1;        /* Let device go direct_complete */
1585                 goto unlock;
1586         }
1587
1588         if (dev->pm_domain) {
1589                 info = "preparing power domain ";
1590                 callback = dev->pm_domain->ops.prepare;
1591         } else if (dev->type && dev->type->pm) {
1592                 info = "preparing type ";
1593                 callback = dev->type->pm->prepare;
1594         } else if (dev->class && dev->class->pm) {
1595                 info = "preparing class ";
1596                 callback = dev->class->pm->prepare;
1597         } else if (dev->bus && dev->bus->pm) {
1598                 info = "preparing bus ";
1599                 callback = dev->bus->pm->prepare;
1600         }
1601
1602         if (!callback && dev->driver && dev->driver->pm) {
1603                 info = "preparing driver ";
1604                 callback = dev->driver->pm->prepare;
1605         }
1606
1607         if (callback)
1608                 ret = callback(dev);
1609
1610 unlock:
1611         device_unlock(dev);
1612
1613         if (ret < 0) {
1614                 suspend_report_result(callback, ret);
1615                 pm_runtime_put(dev);
1616                 return ret;
1617         }
1618         /*
1619          * A positive return value from ->prepare() means "this device appears
1620          * to be runtime-suspended and its state is fine, so if it really is
1621          * runtime-suspended, you can leave it in that state provided that you
1622          * will do the same thing with all of its descendants".  This only
1623          * applies to suspend transitions, however.
1624          */
1625         spin_lock_irq(&dev->power.lock);
1626         dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1627         spin_unlock_irq(&dev->power.lock);
1628         return 0;
1629 }
1630
1631 /**
1632  * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1633  * @state: PM transition of the system being carried out.
1634  *
1635  * Execute the ->prepare() callback(s) for all devices.
1636  */
1637 int dpm_prepare(pm_message_t state)
1638 {
1639         int error = 0;
1640
1641         trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1642         might_sleep();
1643
1644         mutex_lock(&dpm_list_mtx);
1645         while (!list_empty(&dpm_list)) {
1646                 struct device *dev = to_device(dpm_list.next);
1647
1648                 get_device(dev);
1649                 mutex_unlock(&dpm_list_mtx);
1650
1651                 trace_device_pm_callback_start(dev, "", state.event);
1652                 error = device_prepare(dev, state);
1653                 trace_device_pm_callback_end(dev, error);
1654
1655                 mutex_lock(&dpm_list_mtx);
1656                 if (error) {
1657                         if (error == -EAGAIN) {
1658                                 put_device(dev);
1659                                 error = 0;
1660                                 continue;
1661                         }
1662                         printk(KERN_INFO "PM: Device %s not prepared "
1663                                 "for power transition: code %d\n",
1664                                 dev_name(dev), error);
1665                         put_device(dev);
1666                         break;
1667                 }
1668                 dev->power.is_prepared = true;
1669                 if (!list_empty(&dev->power.entry))
1670                         list_move_tail(&dev->power.entry, &dpm_prepared_list);
1671                 put_device(dev);
1672         }
1673         mutex_unlock(&dpm_list_mtx);
1674         trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1675         return error;
1676 }
1677
1678 /**
1679  * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1680  * @state: PM transition of the system being carried out.
1681  *
1682  * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1683  * callbacks for them.
1684  */
1685 int dpm_suspend_start(pm_message_t state)
1686 {
1687         int error;
1688
1689         error = dpm_prepare(state);
1690         if (error) {
1691                 suspend_stats.failed_prepare++;
1692                 dpm_save_failed_step(SUSPEND_PREPARE);
1693         } else
1694                 error = dpm_suspend(state);
1695         return error;
1696 }
1697 EXPORT_SYMBOL_GPL(dpm_suspend_start);
1698
1699 void __suspend_report_result(const char *function, void *fn, int ret)
1700 {
1701         if (ret)
1702                 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1703 }
1704 EXPORT_SYMBOL_GPL(__suspend_report_result);
1705
1706 /**
1707  * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1708  * @dev: Device to wait for.
1709  * @subordinate: Device that needs to wait for @dev.
1710  */
1711 int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1712 {
1713         dpm_wait(dev, subordinate->power.async_suspend);
1714         return async_error;
1715 }
1716 EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1717
1718 /**
1719  * dpm_for_each_dev - device iterator.
1720  * @data: data for the callback.
1721  * @fn: function to be called for each device.
1722  *
1723  * Iterate over devices in dpm_list, and call @fn for each device,
1724  * passing it @data.
1725  */
1726 void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1727 {
1728         struct device *dev;
1729
1730         if (!fn)
1731                 return;
1732
1733         device_pm_lock();
1734         list_for_each_entry(dev, &dpm_list, power.entry)
1735                 fn(dev, data);
1736         device_pm_unlock();
1737 }
1738 EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1739
1740 static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1741 {
1742         if (!ops)
1743                 return true;
1744
1745         return !ops->prepare &&
1746                !ops->suspend &&
1747                !ops->suspend_late &&
1748                !ops->suspend_noirq &&
1749                !ops->resume_noirq &&
1750                !ops->resume_early &&
1751                !ops->resume &&
1752                !ops->complete;
1753 }
1754
1755 void device_pm_check_callbacks(struct device *dev)
1756 {
1757         spin_lock_irq(&dev->power.lock);
1758         dev->power.no_pm_callbacks =
1759                 (!dev->bus || pm_ops_is_empty(dev->bus->pm)) &&
1760                 (!dev->class || pm_ops_is_empty(dev->class->pm)) &&
1761                 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1762                 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1763                 (!dev->driver || pm_ops_is_empty(dev->driver->pm));
1764         spin_unlock_irq(&dev->power.lock);
1765 }