blob: 77e3b97b9e8c9f836b309d22287e7ac2f9b22e0a [file] [log] [blame]
yuezonghe824eb0c2024-06-27 02:32:26 -07001/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
18 */
19
20#include <linux/device.h>
21#include <linux/kallsyms.h>
22#include <linux/export.h>
23#include <linux/mutex.h>
24#include <linux/pm.h>
25#include <linux/pm_runtime.h>
26#include <linux/resume-trace.h>
27#include <linux/interrupt.h>
28#include <linux/sched.h>
29#include <linux/async.h>
30#include <linux/suspend.h>
31#include <linux/timer.h>
32
33#include "../base.h"
34#include "power.h"
35
36typedef int (*pm_callback_t)(struct device *);
37
38/*
39 * The entries in the dpm_list list are in a depth first order, simply
40 * because children are guaranteed to be discovered after parents, and
41 * are inserted at the back of the list on discovery.
42 *
43 * Since device_pm_add() may be called with a device lock held,
44 * we must never try to acquire a device lock while holding
45 * dpm_list_mutex.
46 */
47
48LIST_HEAD(dpm_list);
49LIST_HEAD(dpm_prepared_list);
50LIST_HEAD(dpm_suspended_list);
51LIST_HEAD(dpm_late_early_list);
52LIST_HEAD(dpm_noirq_list);
53
54struct suspend_stats suspend_stats;
55static DEFINE_MUTEX(dpm_list_mtx);
56static pm_message_t pm_transition;
57
58struct dpm_watchdog {
59 struct device *dev;
60 struct task_struct *tsk;
61 struct timer_list timer;
62};
63
64static int async_error;
65
66/**
67 * device_pm_init - Initialize the PM-related part of a device object.
68 * @dev: Device object being initialized.
69 */
70void device_pm_init(struct device *dev)
71{
72 dev->power.is_prepared = false;
73 dev->power.is_suspended = false;
74 init_completion(&dev->power.completion);
75 complete_all(&dev->power.completion);
76 dev->power.wakeup = NULL;
77 spin_lock_init(&dev->power.lock);
78 pm_runtime_init(dev);
79 INIT_LIST_HEAD(&dev->power.entry);
80 dev->power.power_state = PMSG_INVALID;
81}
82
83/**
84 * device_pm_lock - Lock the list of active devices used by the PM core.
85 */
86void device_pm_lock(void)
87{
88 mutex_lock(&dpm_list_mtx);
89}
90
91/**
92 * device_pm_unlock - Unlock the list of active devices used by the PM core.
93 */
94void device_pm_unlock(void)
95{
96 mutex_unlock(&dpm_list_mtx);
97}
98
99/**
100 * device_pm_add - Add a device to the PM core's list of active devices.
101 * @dev: Device to add to the list.
102 */
103void device_pm_add(struct device *dev)
104{
105 pr_debug("PM: Adding info for %s:%s\n",
106 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
107 mutex_lock(&dpm_list_mtx);
108 if (dev->parent && dev->parent->power.is_prepared)
109 dev_warn(dev, "parent %s should not be sleeping\n",
110 dev_name(dev->parent));
111 list_add_tail(&dev->power.entry, &dpm_list);
112 dev_pm_qos_constraints_init(dev);
113 mutex_unlock(&dpm_list_mtx);
114}
115
116/**
117 * device_pm_remove - Remove a device from the PM core's list of active devices.
118 * @dev: Device to be removed from the list.
119 */
120void device_pm_remove(struct device *dev)
121{
122 pr_debug("PM: Removing info for %s:%s\n",
123 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
124 complete_all(&dev->power.completion);
125 mutex_lock(&dpm_list_mtx);
126 dev_pm_qos_constraints_destroy(dev);
127 list_del_init(&dev->power.entry);
128 mutex_unlock(&dpm_list_mtx);
129 device_wakeup_disable(dev);
130 pm_runtime_remove(dev);
131}
132
133/**
134 * device_pm_move_before - Move device in the PM core's list of active devices.
135 * @deva: Device to move in dpm_list.
136 * @devb: Device @deva should come before.
137 */
138void device_pm_move_before(struct device *deva, struct device *devb)
139{
140 pr_debug("PM: Moving %s:%s before %s:%s\n",
141 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
142 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
143 /* Delete deva from dpm_list and reinsert before devb. */
144 list_move_tail(&deva->power.entry, &devb->power.entry);
145}
146
147/**
148 * device_pm_move_after - Move device in the PM core's list of active devices.
149 * @deva: Device to move in dpm_list.
150 * @devb: Device @deva should come after.
151 */
152void device_pm_move_after(struct device *deva, struct device *devb)
153{
154 pr_debug("PM: Moving %s:%s after %s:%s\n",
155 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
156 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
157 /* Delete deva from dpm_list and reinsert after devb. */
158 list_move(&deva->power.entry, &devb->power.entry);
159}
160
161/**
162 * device_pm_move_last - Move device to end of the PM core's list of devices.
163 * @dev: Device to move in dpm_list.
164 */
165void device_pm_move_last(struct device *dev)
166{
167 pr_debug("PM: Moving %s:%s to end of list\n",
168 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
169 list_move_tail(&dev->power.entry, &dpm_list);
170}
171
172static ktime_t initcall_debug_start(struct device *dev)
173{
174 ktime_t calltime = ktime_set(0, 0);
175
176 if (initcall_debug) {
177 pr_info("calling %s+ @ %i, parent: %s\n",
178 dev_name(dev), task_pid_nr(current),
179 dev->parent ? dev_name(dev->parent) : "none");
180 calltime = ktime_get();
181 }
182
183 return calltime;
184}
185
186static void initcall_debug_report(struct device *dev, ktime_t calltime,
187 int error)
188{
189 ktime_t delta, rettime;
190
191 if (initcall_debug) {
192 rettime = ktime_get();
193 delta = ktime_sub(rettime, calltime);
194 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
195 error, (unsigned long long)ktime_to_ns(delta) >> 10);
196 }
197}
198
199/**
200 * dpm_wait - Wait for a PM operation to complete.
201 * @dev: Device to wait for.
202 * @async: If unset, wait only if the device's power.async_suspend flag is set.
203 */
204static void dpm_wait(struct device *dev, bool async)
205{
206 if (!dev)
207 return;
208
209 if (async || (pm_async_enabled && dev->power.async_suspend))
210 wait_for_completion(&dev->power.completion);
211}
212
213static int dpm_wait_fn(struct device *dev, void *async_ptr)
214{
215 dpm_wait(dev, *((bool *)async_ptr));
216 return 0;
217}
218
219static void dpm_wait_for_children(struct device *dev, bool async)
220{
221 device_for_each_child(dev, &async, dpm_wait_fn);
222}
223
224/**
225 * pm_op - Return the PM operation appropriate for given PM event.
226 * @ops: PM operations to choose from.
227 * @state: PM transition of the system being carried out.
228 */
229static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
230{
231 switch (state.event) {
232#ifdef CONFIG_SUSPEND
233 case PM_EVENT_SUSPEND:
234 return ops->suspend;
235 case PM_EVENT_RESUME:
236 return ops->resume;
237#endif /* CONFIG_SUSPEND */
238#ifdef CONFIG_HIBERNATE_CALLBACKS
239 case PM_EVENT_FREEZE:
240 case PM_EVENT_QUIESCE:
241 return ops->freeze;
242 case PM_EVENT_HIBERNATE:
243 return ops->poweroff;
244 case PM_EVENT_THAW:
245 case PM_EVENT_RECOVER:
246 return ops->thaw;
247 break;
248 case PM_EVENT_RESTORE:
249 return ops->restore;
250#endif /* CONFIG_HIBERNATE_CALLBACKS */
251 }
252
253 return NULL;
254}
255
256/**
257 * pm_late_early_op - Return the PM operation appropriate for given PM event.
258 * @ops: PM operations to choose from.
259 * @state: PM transition of the system being carried out.
260 *
261 * Runtime PM is disabled for @dev while this function is being executed.
262 */
263static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
264 pm_message_t state)
265{
266 switch (state.event) {
267#ifdef CONFIG_SUSPEND
268 case PM_EVENT_SUSPEND:
269 return ops->suspend_late;
270 case PM_EVENT_RESUME:
271 return ops->resume_early;
272#endif /* CONFIG_SUSPEND */
273#ifdef CONFIG_HIBERNATE_CALLBACKS
274 case PM_EVENT_FREEZE:
275 case PM_EVENT_QUIESCE:
276 return ops->freeze_late;
277 case PM_EVENT_HIBERNATE:
278 return ops->poweroff_late;
279 case PM_EVENT_THAW:
280 case PM_EVENT_RECOVER:
281 return ops->thaw_early;
282 case PM_EVENT_RESTORE:
283 return ops->restore_early;
284#endif /* CONFIG_HIBERNATE_CALLBACKS */
285 }
286
287 return NULL;
288}
289
290/**
291 * pm_noirq_op - Return the PM operation appropriate for given PM event.
292 * @ops: PM operations to choose from.
293 * @state: PM transition of the system being carried out.
294 *
295 * The driver of @dev will not receive interrupts while this function is being
296 * executed.
297 */
298static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
299{
300 switch (state.event) {
301#ifdef CONFIG_SUSPEND
302 case PM_EVENT_SUSPEND:
303 return ops->suspend_noirq;
304 case PM_EVENT_RESUME:
305 return ops->resume_noirq;
306#endif /* CONFIG_SUSPEND */
307#ifdef CONFIG_HIBERNATE_CALLBACKS
308 case PM_EVENT_FREEZE:
309 case PM_EVENT_QUIESCE:
310 return ops->freeze_noirq;
311 case PM_EVENT_HIBERNATE:
312 return ops->poweroff_noirq;
313 case PM_EVENT_THAW:
314 case PM_EVENT_RECOVER:
315 return ops->thaw_noirq;
316 case PM_EVENT_RESTORE:
317 return ops->restore_noirq;
318#endif /* CONFIG_HIBERNATE_CALLBACKS */
319 }
320
321 return NULL;
322}
323
324static char *pm_verb(int event)
325{
326 switch (event) {
327 case PM_EVENT_SUSPEND:
328 return "suspend";
329 case PM_EVENT_RESUME:
330 return "resume";
331 case PM_EVENT_FREEZE:
332 return "freeze";
333 case PM_EVENT_QUIESCE:
334 return "quiesce";
335 case PM_EVENT_HIBERNATE:
336 return "hibernate";
337 case PM_EVENT_THAW:
338 return "thaw";
339 case PM_EVENT_RESTORE:
340 return "restore";
341 case PM_EVENT_RECOVER:
342 return "recover";
343 default:
344 return "(unknown PM event)";
345 }
346}
347
348static void pm_dev_dbg(struct device *dev, pm_message_t state, char *info)
349{
350 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
351 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
352 ", may wakeup" : "");
353}
354
355static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
356 int error)
357{
358 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
359 dev_name(dev), pm_verb(state.event), info, error);
360}
361
362static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
363{
364 ktime_t calltime;
365 u64 usecs64;
366 int usecs;
367
368 calltime = ktime_get();
369 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
370 do_div(usecs64, NSEC_PER_USEC);
371 usecs = usecs64;
372 if (usecs == 0)
373 usecs = 1;
374 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
375 info ?: "", info ? " " : "", pm_verb(state.event),
376 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
377}
378
379static int dpm_run_callback(pm_callback_t cb, struct device *dev,
380 pm_message_t state, char *info)
381{
382 ktime_t calltime;
383 int error;
384
385 if (!cb)
386 return 0;
387
388 calltime = initcall_debug_start(dev);
389
390 pm_dev_dbg(dev, state, info);
391 error = cb(dev);
392 suspend_report_result(cb, error);
393
394 initcall_debug_report(dev, calltime, error);
395
396 return error;
397}
398
399/**
400 * dpm_wd_handler - Driver suspend / resume watchdog handler.
401 *
402 * Called when a driver has timed out suspending or resuming.
403 * There's not much we can do here to recover so BUG() out for
404 * a crash-dump
405 */
406static void dpm_wd_handler(unsigned long data)
407{
408 struct dpm_watchdog *wd = (void *)data;
409 struct device *dev = wd->dev;
410 struct task_struct *tsk = wd->tsk;
411
412 dev_emerg(dev, "**** DPM device timeout ****\n");
413 show_stack(tsk, NULL);
414
415 BUG();
416}
417
418/**
419 * dpm_wd_set - Enable pm watchdog for given device.
420 * @wd: Watchdog. Must be allocated on the stack.
421 * @dev: Device to handle.
422 */
423static void dpm_wd_set(struct dpm_watchdog *wd, struct device *dev)
424{
425 struct timer_list *timer = &wd->timer;
426
427 wd->dev = dev;
428 wd->tsk = get_current();
429
430 init_timer_on_stack(timer);
431 timer->expires = jiffies + HZ * 12;
432 timer->function = dpm_wd_handler;
433 timer->data = (unsigned long)wd;
434 add_timer(timer);
435}
436
437/**
438 * dpm_wd_clear - Disable pm watchdog.
439 * @wd: Watchdog to disable.
440 */
441static void dpm_wd_clear(struct dpm_watchdog *wd)
442{
443 struct timer_list *timer = &wd->timer;
444
445 del_timer_sync(timer);
446 destroy_timer_on_stack(timer);
447}
448
449/*------------------------- Resume routines -------------------------*/
450
451/**
452 * device_resume_noirq - Execute an "early resume" callback for given device.
453 * @dev: Device to handle.
454 * @state: PM transition of the system being carried out.
455 *
456 * The driver of @dev will not receive interrupts while this function is being
457 * executed.
458 */
459static int device_resume_noirq(struct device *dev, pm_message_t state)
460{
461 pm_callback_t callback = NULL;
462 char *info = NULL;
463 int error = 0;
464
465 TRACE_DEVICE(dev);
466 TRACE_RESUME(0);
467
468 if (dev->pm_domain) {
469 info = "noirq power domain ";
470 callback = pm_noirq_op(&dev->pm_domain->ops, state);
471 } else if (dev->type && dev->type->pm) {
472 info = "noirq type ";
473 callback = pm_noirq_op(dev->type->pm, state);
474 } else if (dev->class && dev->class->pm) {
475 info = "noirq class ";
476 callback = pm_noirq_op(dev->class->pm, state);
477 } else if (dev->bus && dev->bus->pm) {
478 info = "noirq bus ";
479 callback = pm_noirq_op(dev->bus->pm, state);
480 }
481
482 if (!callback && dev->driver && dev->driver->pm) {
483 info = "noirq driver ";
484 callback = pm_noirq_op(dev->driver->pm, state);
485 }
486
487 error = dpm_run_callback(callback, dev, state, info);
488
489 TRACE_RESUME(error);
490 return error;
491}
492
493/**
494 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
495 * @state: PM transition of the system being carried out.
496 *
497 * Call the "noirq" resume handlers for all devices in dpm_noirq_list and
498 * enable device drivers to receive interrupts.
499 */
500static void dpm_resume_noirq(pm_message_t state)
501{
502 ktime_t starttime = ktime_get();
503
504 mutex_lock(&dpm_list_mtx);
505 while (!list_empty(&dpm_noirq_list)) {
506 struct device *dev = to_device(dpm_noirq_list.next);
507 int error;
508
509 get_device(dev);
510 list_move_tail(&dev->power.entry, &dpm_late_early_list);
511 mutex_unlock(&dpm_list_mtx);
512
513 error = device_resume_noirq(dev, state);
514 if (error) {
515 suspend_stats.failed_resume_noirq++;
516 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
517 dpm_save_failed_dev(dev_name(dev));
518 pm_dev_err(dev, state, " noirq", error);
519 }
520
521 mutex_lock(&dpm_list_mtx);
522 put_device(dev);
523 }
524 mutex_unlock(&dpm_list_mtx);
525 dpm_show_time(starttime, state, "noirq");
526 resume_device_irqs();
527}
528
529/**
530 * device_resume_early - Execute an "early resume" callback for given device.
531 * @dev: Device to handle.
532 * @state: PM transition of the system being carried out.
533 *
534 * Runtime PM is disabled for @dev while this function is being executed.
535 */
536static int device_resume_early(struct device *dev, pm_message_t state)
537{
538 pm_callback_t callback = NULL;
539 char *info = NULL;
540 int error = 0;
541
542 TRACE_DEVICE(dev);
543 TRACE_RESUME(0);
544
545 if (dev->pm_domain) {
546 info = "early power domain ";
547 callback = pm_late_early_op(&dev->pm_domain->ops, state);
548 } else if (dev->type && dev->type->pm) {
549 info = "early type ";
550 callback = pm_late_early_op(dev->type->pm, state);
551 } else if (dev->class && dev->class->pm) {
552 info = "early class ";
553 callback = pm_late_early_op(dev->class->pm, state);
554 } else if (dev->bus && dev->bus->pm) {
555 info = "early bus ";
556 callback = pm_late_early_op(dev->bus->pm, state);
557 }
558
559 if (!callback && dev->driver && dev->driver->pm) {
560 info = "early driver ";
561 callback = pm_late_early_op(dev->driver->pm, state);
562 }
563
564 error = dpm_run_callback(callback, dev, state, info);
565
566 TRACE_RESUME(error);
567 return error;
568}
569
570/**
571 * dpm_resume_early - Execute "early resume" callbacks for all devices.
572 * @state: PM transition of the system being carried out.
573 */
574static void dpm_resume_early(pm_message_t state)
575{
576 ktime_t starttime = ktime_get();
577
578 mutex_lock(&dpm_list_mtx);
579 while (!list_empty(&dpm_late_early_list)) {
580 struct device *dev = to_device(dpm_late_early_list.next);
581 int error;
582
583 get_device(dev);
584 list_move_tail(&dev->power.entry, &dpm_suspended_list);
585 mutex_unlock(&dpm_list_mtx);
586
587 error = device_resume_early(dev, state);
588 if (error) {
589 suspend_stats.failed_resume_early++;
590 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
591 dpm_save_failed_dev(dev_name(dev));
592 pm_dev_err(dev, state, " early", error);
593 }
594
595 mutex_lock(&dpm_list_mtx);
596 put_device(dev);
597 }
598 mutex_unlock(&dpm_list_mtx);
599 dpm_show_time(starttime, state, "early");
600}
601
602/**
603 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
604 * @state: PM transition of the system being carried out.
605 */
606void dpm_resume_start(pm_message_t state)
607{
608 dpm_resume_noirq(state);
609 dpm_resume_early(state);
610}
611EXPORT_SYMBOL_GPL(dpm_resume_start);
612
613/**
614 * device_resume - Execute "resume" callbacks for given device.
615 * @dev: Device to handle.
616 * @state: PM transition of the system being carried out.
617 * @async: If true, the device is being resumed asynchronously.
618 */
619static int device_resume(struct device *dev, pm_message_t state, bool async)
620{
621 pm_callback_t callback = NULL;
622 char *info = NULL;
623 int error = 0;
624 bool put = false;
625 struct dpm_watchdog wd;
626
627 TRACE_DEVICE(dev);
628 TRACE_RESUME(0);
629
630 dpm_wait(dev->parent, async);
631 device_lock(dev);
632
633 /*
634 * This is a fib. But we'll allow new children to be added below
635 * a resumed device, even if the device hasn't been completed yet.
636 */
637 dev->power.is_prepared = false;
638 dpm_wd_set(&wd, dev);
639
640 if (!dev->power.is_suspended)
641 goto Unlock;
642
643 pm_runtime_enable(dev);
644 put = true;
645
646 if (dev->pm_domain) {
647 info = "power domain ";
648 callback = pm_op(&dev->pm_domain->ops, state);
649 goto Driver;
650 }
651
652 if (dev->type && dev->type->pm) {
653 info = "type ";
654 callback = pm_op(dev->type->pm, state);
655 goto Driver;
656 }
657
658 if (dev->class) {
659 if (dev->class->pm) {
660 info = "class ";
661 callback = pm_op(dev->class->pm, state);
662 goto Driver;
663 } else if (dev->class->resume) {
664 info = "legacy class ";
665 callback = dev->class->resume;
666 goto End;
667 }
668 }
669
670 if (dev->bus) {
671 if (dev->bus->pm) {
672 info = "bus ";
673 callback = pm_op(dev->bus->pm, state);
674 } else if (dev->bus->resume) {
675 info = "legacy bus ";
676 callback = dev->bus->resume;
677 goto End;
678 }
679 }
680
681 Driver:
682 if (!callback && dev->driver && dev->driver->pm) {
683 info = "driver ";
684 callback = pm_op(dev->driver->pm, state);
685 }
686
687 End:
688 error = dpm_run_callback(callback, dev, state, info);
689 dev->power.is_suspended = false;
690
691 Unlock:
692 device_unlock(dev);
693 dpm_wd_clear(&wd);
694 complete_all(&dev->power.completion);
695
696 TRACE_RESUME(error);
697
698 if (put)
699 pm_runtime_put_sync(dev);
700
701 return error;
702}
703
704static void async_resume(void *data, async_cookie_t cookie)
705{
706 struct device *dev = (struct device *)data;
707 int error;
708
709 error = device_resume(dev, pm_transition, true);
710 if (error)
711 pm_dev_err(dev, pm_transition, " async", error);
712 put_device(dev);
713}
714
715static bool is_async(struct device *dev)
716{
717 return dev->power.async_suspend && pm_async_enabled
718 && !pm_trace_is_enabled();
719}
720
721/**
722 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
723 * @state: PM transition of the system being carried out.
724 *
725 * Execute the appropriate "resume" callback for all devices whose status
726 * indicates that they are suspended.
727 */
728void dpm_resume(pm_message_t state)
729{
730 struct device *dev;
731 ktime_t starttime = ktime_get();
732
733 might_sleep();
734
735 mutex_lock(&dpm_list_mtx);
736 pm_transition = state;
737 async_error = 0;
738
739 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
740 INIT_COMPLETION(dev->power.completion);
741 if (is_async(dev)) {
742 get_device(dev);
743 async_schedule(async_resume, dev);
744 }
745 }
746
747 while (!list_empty(&dpm_suspended_list)) {
748 dev = to_device(dpm_suspended_list.next);
749 get_device(dev);
750 if (!is_async(dev)) {
751 int error;
752
753 mutex_unlock(&dpm_list_mtx);
754
755 error = device_resume(dev, state, false);
756 if (error) {
757 suspend_stats.failed_resume++;
758 dpm_save_failed_step(SUSPEND_RESUME);
759 dpm_save_failed_dev(dev_name(dev));
760 pm_dev_err(dev, state, "", error);
761 }
762
763 mutex_lock(&dpm_list_mtx);
764 }
765 if (!list_empty(&dev->power.entry))
766 list_move_tail(&dev->power.entry, &dpm_prepared_list);
767 put_device(dev);
768 }
769 mutex_unlock(&dpm_list_mtx);
770 async_synchronize_full();
771 dpm_show_time(starttime, state, NULL);
772}
773
774/**
775 * device_complete - Complete a PM transition for given device.
776 * @dev: Device to handle.
777 * @state: PM transition of the system being carried out.
778 */
779static void device_complete(struct device *dev, pm_message_t state)
780{
781 void (*callback)(struct device *) = NULL;
782 char *info = NULL;
783
784 device_lock(dev);
785
786 if (dev->pm_domain) {
787 info = "completing power domain ";
788 callback = dev->pm_domain->ops.complete;
789 } else if (dev->type && dev->type->pm) {
790 info = "completing type ";
791 callback = dev->type->pm->complete;
792 } else if (dev->class && dev->class->pm) {
793 info = "completing class ";
794 callback = dev->class->pm->complete;
795 } else if (dev->bus && dev->bus->pm) {
796 info = "completing bus ";
797 callback = dev->bus->pm->complete;
798 }
799
800 if (!callback && dev->driver && dev->driver->pm) {
801 info = "completing driver ";
802 callback = dev->driver->pm->complete;
803 }
804
805 if (callback) {
806 pm_dev_dbg(dev, state, info);
807 callback(dev);
808 }
809
810 device_unlock(dev);
811}
812
813/**
814 * dpm_complete - Complete a PM transition for all non-sysdev devices.
815 * @state: PM transition of the system being carried out.
816 *
817 * Execute the ->complete() callbacks for all devices whose PM status is not
818 * DPM_ON (this allows new devices to be registered).
819 */
820void dpm_complete(pm_message_t state)
821{
822 struct list_head list;
823
824 might_sleep();
825
826 INIT_LIST_HEAD(&list);
827 mutex_lock(&dpm_list_mtx);
828 while (!list_empty(&dpm_prepared_list)) {
829 struct device *dev = to_device(dpm_prepared_list.prev);
830
831 get_device(dev);
832 dev->power.is_prepared = false;
833 list_move(&dev->power.entry, &list);
834 mutex_unlock(&dpm_list_mtx);
835
836 device_complete(dev, state);
837
838 mutex_lock(&dpm_list_mtx);
839 put_device(dev);
840 }
841 list_splice(&list, &dpm_list);
842 mutex_unlock(&dpm_list_mtx);
843}
844
845/**
846 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
847 * @state: PM transition of the system being carried out.
848 *
849 * Execute "resume" callbacks for all devices and complete the PM transition of
850 * the system.
851 */
852void dpm_resume_end(pm_message_t state)
853{
854 dpm_resume(state);
855 dpm_complete(state);
856}
857EXPORT_SYMBOL_GPL(dpm_resume_end);
858
859
860/*------------------------- Suspend routines -------------------------*/
861
862/**
863 * resume_event - Return a "resume" message for given "suspend" sleep state.
864 * @sleep_state: PM message representing a sleep state.
865 *
866 * Return a PM message representing the resume event corresponding to given
867 * sleep state.
868 */
869static pm_message_t resume_event(pm_message_t sleep_state)
870{
871 switch (sleep_state.event) {
872 case PM_EVENT_SUSPEND:
873 return PMSG_RESUME;
874 case PM_EVENT_FREEZE:
875 case PM_EVENT_QUIESCE:
876 return PMSG_RECOVER;
877 case PM_EVENT_HIBERNATE:
878 return PMSG_RESTORE;
879 }
880 return PMSG_ON;
881}
882
883/**
884 * device_suspend_noirq - Execute a "late suspend" callback for given device.
885 * @dev: Device to handle.
886 * @state: PM transition of the system being carried out.
887 *
888 * The driver of @dev will not receive interrupts while this function is being
889 * executed.
890 */
891static int device_suspend_noirq(struct device *dev, pm_message_t state)
892{
893 pm_callback_t callback = NULL;
894 char *info = NULL;
895
896 if (dev->pm_domain) {
897 info = "noirq power domain ";
898 callback = pm_noirq_op(&dev->pm_domain->ops, state);
899 } else if (dev->type && dev->type->pm) {
900 info = "noirq type ";
901 callback = pm_noirq_op(dev->type->pm, state);
902 } else if (dev->class && dev->class->pm) {
903 info = "noirq class ";
904 callback = pm_noirq_op(dev->class->pm, state);
905 } else if (dev->bus && dev->bus->pm) {
906 info = "noirq bus ";
907 callback = pm_noirq_op(dev->bus->pm, state);
908 }
909
910 if (!callback && dev->driver && dev->driver->pm) {
911 info = "noirq driver ";
912 callback = pm_noirq_op(dev->driver->pm, state);
913 }
914
915 return dpm_run_callback(callback, dev, state, info);
916}
917
918/**
919 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
920 * @state: PM transition of the system being carried out.
921 *
922 * Prevent device drivers from receiving interrupts and call the "noirq" suspend
923 * handlers for all non-sysdev devices.
924 */
925static int dpm_suspend_noirq(pm_message_t state)
926{
927 ktime_t starttime = ktime_get();
928 int error = 0;
929
930 suspend_device_irqs();
931 mutex_lock(&dpm_list_mtx);
932 while (!list_empty(&dpm_late_early_list)) {
933 struct device *dev = to_device(dpm_late_early_list.prev);
934
935 get_device(dev);
936 mutex_unlock(&dpm_list_mtx);
937
938 error = device_suspend_noirq(dev, state);
939
940 mutex_lock(&dpm_list_mtx);
941 if (error) {
942 pm_dev_err(dev, state, " noirq", error);
943 suspend_stats.failed_suspend_noirq++;
944 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
945 dpm_save_failed_dev(dev_name(dev));
946 put_device(dev);
947 break;
948 }
949 if (!list_empty(&dev->power.entry))
950 list_move(&dev->power.entry, &dpm_noirq_list);
951 put_device(dev);
952
953 if (pm_wakeup_pending()) {
954 error = -EBUSY;
955 break;
956 }
957 }
958 mutex_unlock(&dpm_list_mtx);
959 if (error)
960 dpm_resume_noirq(resume_event(state));
961 else
962 dpm_show_time(starttime, state, "noirq");
963 return error;
964}
965
966/**
967 * device_suspend_late - Execute a "late suspend" callback for given device.
968 * @dev: Device to handle.
969 * @state: PM transition of the system being carried out.
970 *
971 * Runtime PM is disabled for @dev while this function is being executed.
972 */
973static int device_suspend_late(struct device *dev, pm_message_t state)
974{
975 pm_callback_t callback = NULL;
976 char *info = NULL;
977
978 if (dev->pm_domain) {
979 info = "late power domain ";
980 callback = pm_late_early_op(&dev->pm_domain->ops, state);
981 } else if (dev->type && dev->type->pm) {
982 info = "late type ";
983 callback = pm_late_early_op(dev->type->pm, state);
984 } else if (dev->class && dev->class->pm) {
985 info = "late class ";
986 callback = pm_late_early_op(dev->class->pm, state);
987 } else if (dev->bus && dev->bus->pm) {
988 info = "late bus ";
989 callback = pm_late_early_op(dev->bus->pm, state);
990 }
991
992 if (!callback && dev->driver && dev->driver->pm) {
993 info = "late driver ";
994 callback = pm_late_early_op(dev->driver->pm, state);
995 }
996
997 return dpm_run_callback(callback, dev, state, info);
998}
999
1000/**
1001 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1002 * @state: PM transition of the system being carried out.
1003 */
1004static int dpm_suspend_late(pm_message_t state)
1005{
1006 ktime_t starttime = ktime_get();
1007 int error = 0;
1008
1009 mutex_lock(&dpm_list_mtx);
1010 while (!list_empty(&dpm_suspended_list)) {
1011 struct device *dev = to_device(dpm_suspended_list.prev);
1012
1013 get_device(dev);
1014 mutex_unlock(&dpm_list_mtx);
1015
1016 error = device_suspend_late(dev, state);
1017
1018 mutex_lock(&dpm_list_mtx);
1019 if (error) {
1020 pm_dev_err(dev, state, " late", error);
1021 suspend_stats.failed_suspend_late++;
1022 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1023 dpm_save_failed_dev(dev_name(dev));
1024 put_device(dev);
1025 break;
1026 }
1027 if (!list_empty(&dev->power.entry))
1028 list_move(&dev->power.entry, &dpm_late_early_list);
1029 put_device(dev);
1030
1031 if (pm_wakeup_pending()) {
1032 error = -EBUSY;
1033 break;
1034 }
1035 }
1036 mutex_unlock(&dpm_list_mtx);
1037 if (error)
1038 dpm_resume_early(resume_event(state));
1039 else
1040 dpm_show_time(starttime, state, "late");
1041
1042 return error;
1043}
1044
1045/**
1046 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1047 * @state: PM transition of the system being carried out.
1048 */
1049int dpm_suspend_end(pm_message_t state)
1050{
1051 int error = dpm_suspend_late(state);
1052 if (error)
1053 return error;
1054
1055 error = dpm_suspend_noirq(state);
1056 if (error) {
1057 dpm_resume_early(resume_event(state));
1058 return error;
1059 }
1060
1061 return 0;
1062}
1063EXPORT_SYMBOL_GPL(dpm_suspend_end);
1064
1065/**
1066 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1067 * @dev: Device to suspend.
1068 * @state: PM transition of the system being carried out.
1069 * @cb: Suspend callback to execute.
1070 */
1071static int legacy_suspend(struct device *dev, pm_message_t state,
1072 int (*cb)(struct device *dev, pm_message_t state))
1073{
1074 int error;
1075 ktime_t calltime;
1076
1077 calltime = initcall_debug_start(dev);
1078
1079 error = cb(dev, state);
1080 suspend_report_result(cb, error);
1081
1082 initcall_debug_report(dev, calltime, error);
1083
1084 return error;
1085}
1086
1087/**
1088 * device_suspend - Execute "suspend" callbacks for given device.
1089 * @dev: Device to handle.
1090 * @state: PM transition of the system being carried out.
1091 * @async: If true, the device is being suspended asynchronously.
1092 */
1093static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1094{
1095 pm_callback_t callback = NULL;
1096 char *info = NULL;
1097 int error = 0;
1098 struct dpm_watchdog wd;
1099
1100 dpm_wait_for_children(dev, async);
1101
1102 if (async_error)
1103 goto Complete;
1104
1105 pm_runtime_get_noresume(dev);
1106 if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
1107 pm_wakeup_event(dev, 0);
1108
1109 if (pm_wakeup_pending()) {
1110 pm_runtime_put_sync(dev);
1111 async_error = -EBUSY;
1112 goto Complete;
1113 }
1114
1115 dpm_wd_set(&wd, dev);
1116
1117 device_lock(dev);
1118
1119 if (dev->pm_domain) {
1120 info = "power domain ";
1121 callback = pm_op(&dev->pm_domain->ops, state);
1122 goto Run;
1123 }
1124
1125 if (dev->type && dev->type->pm) {
1126 info = "type ";
1127 callback = pm_op(dev->type->pm, state);
1128 goto Run;
1129 }
1130
1131 if (dev->class) {
1132 if (dev->class->pm) {
1133 info = "class ";
1134 callback = pm_op(dev->class->pm, state);
1135 goto Run;
1136 } else if (dev->class->suspend) {
1137 pm_dev_dbg(dev, state, "legacy class ");
1138 error = legacy_suspend(dev, state, dev->class->suspend);
1139 goto End;
1140 }
1141 }
1142
1143 if (dev->bus) {
1144 if (dev->bus->pm) {
1145 info = "bus ";
1146 callback = pm_op(dev->bus->pm, state);
1147 } else if (dev->bus->suspend) {
1148 pm_dev_dbg(dev, state, "legacy bus ");
1149 error = legacy_suspend(dev, state, dev->bus->suspend);
1150 goto End;
1151 }
1152 }
1153
1154 Run:
1155 if (!callback && dev->driver && dev->driver->pm) {
1156 info = "driver ";
1157 callback = pm_op(dev->driver->pm, state);
1158 }
1159
1160 error = dpm_run_callback(callback, dev, state, info);
1161
1162 End:
1163 if (!error) {
1164 dev->power.is_suspended = true;
1165 if (dev->power.wakeup_path
1166 && dev->parent && !dev->parent->power.ignore_children)
1167 dev->parent->power.wakeup_path = true;
1168 }
1169
1170 device_unlock(dev);
1171
1172 dpm_wd_clear(&wd);
1173
1174 Complete:
1175 complete_all(&dev->power.completion);
1176
1177 if (error) {
1178 pm_runtime_put_sync(dev);
1179 async_error = error;
1180 } else if (dev->power.is_suspended) {
1181 __pm_runtime_disable(dev, false);
1182 }
1183
1184 return error;
1185}
1186
1187static void async_suspend(void *data, async_cookie_t cookie)
1188{
1189 struct device *dev = (struct device *)data;
1190 int error;
1191
1192 error = __device_suspend(dev, pm_transition, true);
1193 if (error) {
1194 dpm_save_failed_dev(dev_name(dev));
1195 pm_dev_err(dev, pm_transition, " async", error);
1196 }
1197
1198 put_device(dev);
1199}
1200
1201static int device_suspend(struct device *dev)
1202{
1203 INIT_COMPLETION(dev->power.completion);
1204
1205 if (pm_async_enabled && dev->power.async_suspend) {
1206 get_device(dev);
1207 async_schedule(async_suspend, dev);
1208 return 0;
1209 }
1210
1211 return __device_suspend(dev, pm_transition, false);
1212}
1213
1214/**
1215 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1216 * @state: PM transition of the system being carried out.
1217 */
1218int dpm_suspend(pm_message_t state)
1219{
1220 ktime_t starttime = ktime_get();
1221 int error = 0;
1222
1223 might_sleep();
1224
1225 mutex_lock(&dpm_list_mtx);
1226 pm_transition = state;
1227 async_error = 0;
1228 while (!list_empty(&dpm_prepared_list)) {
1229 struct device *dev = to_device(dpm_prepared_list.prev);
1230
1231 get_device(dev);
1232 mutex_unlock(&dpm_list_mtx);
1233
1234 error = device_suspend(dev);
1235
1236 mutex_lock(&dpm_list_mtx);
1237 if (error) {
1238 pm_dev_err(dev, state, "", error);
1239 dpm_save_failed_dev(dev_name(dev));
1240 put_device(dev);
1241 break;
1242 }
1243 if (!list_empty(&dev->power.entry))
1244 list_move(&dev->power.entry, &dpm_suspended_list);
1245 put_device(dev);
1246 if (async_error)
1247 break;
1248 }
1249 mutex_unlock(&dpm_list_mtx);
1250 async_synchronize_full();
1251 if (!error)
1252 error = async_error;
1253 if (error) {
1254 suspend_stats.failed_suspend++;
1255 dpm_save_failed_step(SUSPEND_SUSPEND);
1256 } else
1257 dpm_show_time(starttime, state, NULL);
1258 return error;
1259}
1260
1261/**
1262 * device_prepare - Prepare a device for system power transition.
1263 * @dev: Device to handle.
1264 * @state: PM transition of the system being carried out.
1265 *
1266 * Execute the ->prepare() callback(s) for given device. No new children of the
1267 * device may be registered after this function has returned.
1268 */
1269static int device_prepare(struct device *dev, pm_message_t state)
1270{
1271 int (*callback)(struct device *) = NULL;
1272 char *info = NULL;
1273 int error = 0;
1274
1275 device_lock(dev);
1276
1277 dev->power.wakeup_path = device_may_wakeup(dev);
1278
1279 if (dev->pm_domain) {
1280 info = "preparing power domain ";
1281 callback = dev->pm_domain->ops.prepare;
1282 } else if (dev->type && dev->type->pm) {
1283 info = "preparing type ";
1284 callback = dev->type->pm->prepare;
1285 } else if (dev->class && dev->class->pm) {
1286 info = "preparing class ";
1287 callback = dev->class->pm->prepare;
1288 } else if (dev->bus && dev->bus->pm) {
1289 info = "preparing bus ";
1290 callback = dev->bus->pm->prepare;
1291 }
1292
1293 if (!callback && dev->driver && dev->driver->pm) {
1294 info = "preparing driver ";
1295 callback = dev->driver->pm->prepare;
1296 }
1297
1298 if (callback) {
1299 error = callback(dev);
1300 suspend_report_result(callback, error);
1301 }
1302
1303 device_unlock(dev);
1304
1305 return error;
1306}
1307
1308/**
1309 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1310 * @state: PM transition of the system being carried out.
1311 *
1312 * Execute the ->prepare() callback(s) for all devices.
1313 */
1314int dpm_prepare(pm_message_t state)
1315{
1316 int error = 0;
1317
1318 might_sleep();
1319
1320 mutex_lock(&dpm_list_mtx);
1321 while (!list_empty(&dpm_list)) {
1322 struct device *dev = to_device(dpm_list.next);
1323
1324 get_device(dev);
1325 mutex_unlock(&dpm_list_mtx);
1326
1327 error = device_prepare(dev, state);
1328
1329 mutex_lock(&dpm_list_mtx);
1330 if (error) {
1331 if (error == -EAGAIN) {
1332 put_device(dev);
1333 error = 0;
1334 continue;
1335 }
1336 printk(KERN_INFO "PM: Device %s not prepared "
1337 "for power transition: code %d\n",
1338 dev_name(dev), error);
1339 put_device(dev);
1340 break;
1341 }
1342 dev->power.is_prepared = true;
1343 if (!list_empty(&dev->power.entry))
1344 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1345 put_device(dev);
1346 }
1347 mutex_unlock(&dpm_list_mtx);
1348 return error;
1349}
1350
1351/**
1352 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1353 * @state: PM transition of the system being carried out.
1354 *
1355 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1356 * callbacks for them.
1357 */
1358int dpm_suspend_start(pm_message_t state)
1359{
1360 int error;
1361
1362 error = dpm_prepare(state);
1363 if (error) {
1364 suspend_stats.failed_prepare++;
1365 dpm_save_failed_step(SUSPEND_PREPARE);
1366 } else
1367 error = dpm_suspend(state);
1368 return error;
1369}
1370EXPORT_SYMBOL_GPL(dpm_suspend_start);
1371
1372void __suspend_report_result(const char *function, void *fn, int ret)
1373{
1374 if (ret)
1375 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1376}
1377EXPORT_SYMBOL_GPL(__suspend_report_result);
1378
1379/**
1380 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1381 * @dev: Device to wait for.
1382 * @subordinate: Device that needs to wait for @dev.
1383 */
1384int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1385{
1386 dpm_wait(dev, subordinate->power.async_suspend);
1387 return async_error;
1388}
1389EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);