blob: 6aac14bca3c429d93e8c324cdf99a4fa07c49d0a [file] [log] [blame]
rjw1f884582022-01-06 17:20:42 +08001/*
2 * drivers/base/power/main.c - Where the driver meets power management.
3 *
4 * Copyright (c) 2003 Patrick Mochel
5 * Copyright (c) 2003 Open Source Development Lab
6 *
7 * This file is released under the GPLv2
8 *
9 *
10 * The driver model core calls device_pm_add() when a device is registered.
11 * This will initialize the embedded device_pm_info object in the device
12 * and add it to the list of power-controlled devices. sysfs entries for
13 * controlling device power management will also be added.
14 *
15 * A separate list is used for keeping track of power info, because the power
16 * domain dependencies may differ from the ancestral dependencies that the
17 * subsystem list maintains.
18 */
19
20#include <linux/device.h>
21#include <linux/kallsyms.h>
22#include <linux/export.h>
23#include <linux/mutex.h>
24#include <linux/pm.h>
25#include <linux/pm_runtime.h>
26#include <linux/pm-trace.h>
27#include <linux/pm_wakeirq.h>
28#include <linux/interrupt.h>
29#include <linux/sched.h>
30#include <linux/sched/debug.h>
31#include <linux/async.h>
32#include <linux/suspend.h>
33#include <trace/events/power.h>
34#include <linux/cpufreq.h>
35#include <linux/cpuidle.h>
36#include <linux/timer.h>
37#include <linux/wakeup_reason.h>
38
39#include "../base.h"
40#include "power.h"
41
42typedef int (*pm_callback_t)(struct device *);
43
44/*
45 * The entries in the dpm_list list are in a depth first order, simply
46 * because children are guaranteed to be discovered after parents, and
47 * are inserted at the back of the list on discovery.
48 *
49 * Since device_pm_add() may be called with a device lock held,
50 * we must never try to acquire a device lock while holding
51 * dpm_list_mutex.
52 */
53
54LIST_HEAD(dpm_list);
55static LIST_HEAD(dpm_prepared_list);
56static LIST_HEAD(dpm_suspended_list);
57static LIST_HEAD(dpm_late_early_list);
58static LIST_HEAD(dpm_noirq_list);
59
60struct suspend_stats suspend_stats;
61static DEFINE_MUTEX(dpm_list_mtx);
62static pm_message_t pm_transition;
63
64static int async_error;
65
66static const char *pm_verb(int event)
67{
68 switch (event) {
69 case PM_EVENT_SUSPEND:
70 return "suspend";
71 case PM_EVENT_RESUME:
72 return "resume";
73 case PM_EVENT_FREEZE:
74 return "freeze";
75 case PM_EVENT_QUIESCE:
76 return "quiesce";
77 case PM_EVENT_HIBERNATE:
78 return "hibernate";
79 case PM_EVENT_THAW:
80 return "thaw";
81 case PM_EVENT_RESTORE:
82 return "restore";
83 case PM_EVENT_RECOVER:
84 return "recover";
85 default:
86 return "(unknown PM event)";
87 }
88}
89
90/**
91 * device_pm_sleep_init - Initialize system suspend-related device fields.
92 * @dev: Device object being initialized.
93 */
94void device_pm_sleep_init(struct device *dev)
95{
96 dev->power.is_prepared = false;
97 dev->power.is_suspended = false;
98 dev->power.is_noirq_suspended = false;
99 dev->power.is_late_suspended = false;
100 init_completion(&dev->power.completion);
101 complete_all(&dev->power.completion);
102 dev->power.wakeup = NULL;
103 INIT_LIST_HEAD(&dev->power.entry);
104}
105
106/**
107 * device_pm_lock - Lock the list of active devices used by the PM core.
108 */
109void device_pm_lock(void)
110{
111 mutex_lock(&dpm_list_mtx);
112}
113
114/**
115 * device_pm_unlock - Unlock the list of active devices used by the PM core.
116 */
117void device_pm_unlock(void)
118{
119 mutex_unlock(&dpm_list_mtx);
120}
121
122/**
123 * device_pm_add - Add a device to the PM core's list of active devices.
124 * @dev: Device to add to the list.
125 */
126void device_pm_add(struct device *dev)
127{
128 pr_debug("PM: Adding info for %s:%s\n",
129 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
130 device_pm_check_callbacks(dev);
131 mutex_lock(&dpm_list_mtx);
132 if (dev->parent && dev->parent->power.is_prepared)
133 dev_warn(dev, "parent %s should not be sleeping\n",
134 dev_name(dev->parent));
135 list_add_tail(&dev->power.entry, &dpm_list);
136 dev->power.in_dpm_list = true;
137 mutex_unlock(&dpm_list_mtx);
138}
139
140/**
141 * device_pm_remove - Remove a device from the PM core's list of active devices.
142 * @dev: Device to be removed from the list.
143 */
144void device_pm_remove(struct device *dev)
145{
146 pr_debug("PM: Removing info for %s:%s\n",
147 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
148 complete_all(&dev->power.completion);
149 mutex_lock(&dpm_list_mtx);
150 list_del_init(&dev->power.entry);
151 dev->power.in_dpm_list = false;
152 mutex_unlock(&dpm_list_mtx);
153 device_wakeup_disable(dev);
154 pm_runtime_remove(dev);
155 device_pm_check_callbacks(dev);
156}
157
158/**
159 * device_pm_move_before - Move device in the PM core's list of active devices.
160 * @deva: Device to move in dpm_list.
161 * @devb: Device @deva should come before.
162 */
163void device_pm_move_before(struct device *deva, struct device *devb)
164{
165 pr_debug("PM: Moving %s:%s before %s:%s\n",
166 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
167 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
168 /* Delete deva from dpm_list and reinsert before devb. */
169 list_move_tail(&deva->power.entry, &devb->power.entry);
170}
171
172/**
173 * device_pm_move_after - Move device in the PM core's list of active devices.
174 * @deva: Device to move in dpm_list.
175 * @devb: Device @deva should come after.
176 */
177void device_pm_move_after(struct device *deva, struct device *devb)
178{
179 pr_debug("PM: Moving %s:%s after %s:%s\n",
180 deva->bus ? deva->bus->name : "No Bus", dev_name(deva),
181 devb->bus ? devb->bus->name : "No Bus", dev_name(devb));
182 /* Delete deva from dpm_list and reinsert after devb. */
183 list_move(&deva->power.entry, &devb->power.entry);
184}
185
186/**
187 * device_pm_move_last - Move device to end of the PM core's list of devices.
188 * @dev: Device to move in dpm_list.
189 */
190void device_pm_move_last(struct device *dev)
191{
192 pr_debug("PM: Moving %s:%s to end of list\n",
193 dev->bus ? dev->bus->name : "No Bus", dev_name(dev));
194 list_move_tail(&dev->power.entry, &dpm_list);
195}
196
197static ktime_t initcall_debug_start(struct device *dev)
198{
199 ktime_t calltime = 0;
200
201 if (pm_print_times_enabled) {
202 pr_info("calling %s+ @ %i, parent: %s\n",
203 dev_name(dev), task_pid_nr(current),
204 dev->parent ? dev_name(dev->parent) : "none");
205 calltime = ktime_get();
206 }
207
208 return calltime;
209}
210
211static void initcall_debug_report(struct device *dev, ktime_t calltime,
212 int error, pm_message_t state,
213 const char *info)
214{
215 ktime_t rettime;
216 s64 nsecs;
217
218 rettime = ktime_get();
219 nsecs = (s64) ktime_to_ns(ktime_sub(rettime, calltime));
220
221 if (pm_print_times_enabled) {
222 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
223 error, (unsigned long long)nsecs >> 10);
224 }
225}
226
227/**
228 * dpm_wait - Wait for a PM operation to complete.
229 * @dev: Device to wait for.
230 * @async: If unset, wait only if the device's power.async_suspend flag is set.
231 */
232static void dpm_wait(struct device *dev, bool async)
233{
234 if (!dev)
235 return;
236
237 if (async || (pm_async_enabled && dev->power.async_suspend))
238 wait_for_completion(&dev->power.completion);
239}
240
241static int dpm_wait_fn(struct device *dev, void *async_ptr)
242{
243 dpm_wait(dev, *((bool *)async_ptr));
244 return 0;
245}
246
247static void dpm_wait_for_children(struct device *dev, bool async)
248{
249 device_for_each_child(dev, &async, dpm_wait_fn);
250}
251
252static void dpm_wait_for_suppliers(struct device *dev, bool async)
253{
254 struct device_link *link;
255 int idx;
256
257 idx = device_links_read_lock();
258
259 /*
260 * If the supplier goes away right after we've checked the link to it,
261 * we'll wait for its completion to change the state, but that's fine,
262 * because the only things that will block as a result are the SRCU
263 * callbacks freeing the link objects for the links in the list we're
264 * walking.
265 */
266 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node)
267 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
268 dpm_wait(link->supplier, async);
269
270 device_links_read_unlock(idx);
271}
272
273static bool dpm_wait_for_superior(struct device *dev, bool async)
274{
275 struct device *parent;
276
277 /*
278 * If the device is resumed asynchronously and the parent's callback
279 * deletes both the device and the parent itself, the parent object may
280 * be freed while this function is running, so avoid that by reference
281 * counting the parent once more unless the device has been deleted
282 * already (in which case return right away).
283 */
284 mutex_lock(&dpm_list_mtx);
285
286 if (!device_pm_initialized(dev)) {
287 mutex_unlock(&dpm_list_mtx);
288 return false;
289 }
290
291 parent = get_device(dev->parent);
292
293 mutex_unlock(&dpm_list_mtx);
294
295 dpm_wait(parent, async);
296 put_device(parent);
297
298 dpm_wait_for_suppliers(dev, async);
299
300 /*
301 * If the parent's callback has deleted the device, attempting to resume
302 * it would be invalid, so avoid doing that then.
303 */
304 return device_pm_initialized(dev);
305}
306
307static void dpm_wait_for_consumers(struct device *dev, bool async)
308{
309 struct device_link *link;
310 int idx;
311
312 idx = device_links_read_lock();
313
314 /*
315 * The status of a device link can only be changed from "dormant" by a
316 * probe, but that cannot happen during system suspend/resume. In
317 * theory it can change to "dormant" at that time, but then it is
318 * reasonable to wait for the target device anyway (eg. if it goes
319 * away, it's better to wait for it to go away completely and then
320 * continue instead of trying to continue in parallel with its
321 * unregistration).
322 */
323 list_for_each_entry_rcu(link, &dev->links.consumers, s_node)
324 if (READ_ONCE(link->status) != DL_STATE_DORMANT)
325 dpm_wait(link->consumer, async);
326
327 device_links_read_unlock(idx);
328}
329
330static void dpm_wait_for_subordinate(struct device *dev, bool async)
331{
332 dpm_wait_for_children(dev, async);
333 dpm_wait_for_consumers(dev, async);
334}
335
336/**
337 * pm_op - Return the PM operation appropriate for given PM event.
338 * @ops: PM operations to choose from.
339 * @state: PM transition of the system being carried out.
340 */
341static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
342{
343 switch (state.event) {
344#ifdef CONFIG_SUSPEND
345 case PM_EVENT_SUSPEND:
346 return ops->suspend;
347 case PM_EVENT_RESUME:
348 return ops->resume;
349#endif /* CONFIG_SUSPEND */
350#ifdef CONFIG_HIBERNATE_CALLBACKS
351 case PM_EVENT_FREEZE:
352 case PM_EVENT_QUIESCE:
353 return ops->freeze;
354 case PM_EVENT_HIBERNATE:
355 return ops->poweroff;
356 case PM_EVENT_THAW:
357 case PM_EVENT_RECOVER:
358 return ops->thaw;
359 break;
360 case PM_EVENT_RESTORE:
361 return ops->restore;
362#endif /* CONFIG_HIBERNATE_CALLBACKS */
363 }
364
365 return NULL;
366}
367
368/**
369 * pm_late_early_op - Return the PM operation appropriate for given PM event.
370 * @ops: PM operations to choose from.
371 * @state: PM transition of the system being carried out.
372 *
373 * Runtime PM is disabled for @dev while this function is being executed.
374 */
375static pm_callback_t pm_late_early_op(const struct dev_pm_ops *ops,
376 pm_message_t state)
377{
378 switch (state.event) {
379#ifdef CONFIG_SUSPEND
380 case PM_EVENT_SUSPEND:
381 return ops->suspend_late;
382 case PM_EVENT_RESUME:
383 return ops->resume_early;
384#endif /* CONFIG_SUSPEND */
385#ifdef CONFIG_HIBERNATE_CALLBACKS
386 case PM_EVENT_FREEZE:
387 case PM_EVENT_QUIESCE:
388 return ops->freeze_late;
389 case PM_EVENT_HIBERNATE:
390 return ops->poweroff_late;
391 case PM_EVENT_THAW:
392 case PM_EVENT_RECOVER:
393 return ops->thaw_early;
394 case PM_EVENT_RESTORE:
395 return ops->restore_early;
396#endif /* CONFIG_HIBERNATE_CALLBACKS */
397 }
398
399 return NULL;
400}
401
402/**
403 * pm_noirq_op - Return the PM operation appropriate for given PM event.
404 * @ops: PM operations to choose from.
405 * @state: PM transition of the system being carried out.
406 *
407 * The driver of @dev will not receive interrupts while this function is being
408 * executed.
409 */
410static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
411{
412 switch (state.event) {
413#ifdef CONFIG_SUSPEND
414 case PM_EVENT_SUSPEND:
415 return ops->suspend_noirq;
416 case PM_EVENT_RESUME:
417 return ops->resume_noirq;
418#endif /* CONFIG_SUSPEND */
419#ifdef CONFIG_HIBERNATE_CALLBACKS
420 case PM_EVENT_FREEZE:
421 case PM_EVENT_QUIESCE:
422 return ops->freeze_noirq;
423 case PM_EVENT_HIBERNATE:
424 return ops->poweroff_noirq;
425 case PM_EVENT_THAW:
426 case PM_EVENT_RECOVER:
427 return ops->thaw_noirq;
428 case PM_EVENT_RESTORE:
429 return ops->restore_noirq;
430#endif /* CONFIG_HIBERNATE_CALLBACKS */
431 }
432
433 return NULL;
434}
435
436static void pm_dev_dbg(struct device *dev, pm_message_t state, const char *info)
437{
438 dev_dbg(dev, "%s%s%s\n", info, pm_verb(state.event),
439 ((state.event & PM_EVENT_SLEEP) && device_may_wakeup(dev)) ?
440 ", may wakeup" : "");
441}
442
443static void pm_dev_err(struct device *dev, pm_message_t state, const char *info,
444 int error)
445{
446 printk(KERN_ERR "PM: Device %s failed to %s%s: error %d\n",
447 dev_name(dev), pm_verb(state.event), info, error);
448}
449
450static void dpm_show_time(ktime_t starttime, pm_message_t state, int error,
451 const char *info)
452{
453 ktime_t calltime;
454 u64 usecs64;
455 int usecs;
456
457 calltime = ktime_get();
458 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
459 do_div(usecs64, NSEC_PER_USEC);
460 usecs = usecs64;
461 if (usecs == 0)
462 usecs = 1;
463
464 pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n",
465 info ?: "", info ? " " : "", pm_verb(state.event),
466 error ? "aborted" : "complete",
467 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
468}
469
470static int dpm_run_callback(pm_callback_t cb, struct device *dev,
471 pm_message_t state, const char *info)
472{
473 ktime_t calltime;
474 int error;
475
476 if (!cb)
477 return 0;
478
479 calltime = initcall_debug_start(dev);
480
481 pm_dev_dbg(dev, state, info);
482 trace_device_pm_callback_start(dev, info, state.event);
483 error = cb(dev);
484 trace_device_pm_callback_end(dev, error);
485 suspend_report_result(cb, error);
486
487 initcall_debug_report(dev, calltime, error, state, info);
488
489 return error;
490}
491
492#ifdef CONFIG_DPM_WATCHDOG
493struct dpm_watchdog {
494 struct device *dev;
495 struct task_struct *tsk;
496 struct timer_list timer;
497};
498
499#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
500 struct dpm_watchdog wd
501
502/**
503 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
504 * @data: Watchdog object address.
505 *
506 * Called when a driver has timed out suspending or resuming.
507 * There's not much we can do here to recover so panic() to
508 * capture a crash-dump in pstore.
509 */
510static void dpm_watchdog_handler(unsigned long data)
511{
512 struct dpm_watchdog *wd = (void *)data;
513
514 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
515 show_stack(wd->tsk, NULL);
516 panic("%s %s: unrecoverable failure\n",
517 dev_driver_string(wd->dev), dev_name(wd->dev));
518}
519
520/**
521 * dpm_watchdog_set - Enable pm watchdog for given device.
522 * @wd: Watchdog. Must be allocated on the stack.
523 * @dev: Device to handle.
524 */
525static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
526{
527 struct timer_list *timer = &wd->timer;
528
529 wd->dev = dev;
530 wd->tsk = current;
531
532 init_timer_on_stack(timer);
533 /* use same timeout value for both suspend and resume */
534 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
535 timer->function = dpm_watchdog_handler;
536 timer->data = (unsigned long)wd;
537 add_timer(timer);
538}
539
540/**
541 * dpm_watchdog_clear - Disable suspend/resume watchdog.
542 * @wd: Watchdog to disable.
543 */
544static void dpm_watchdog_clear(struct dpm_watchdog *wd)
545{
546 struct timer_list *timer = &wd->timer;
547
548 del_timer_sync(timer);
549 destroy_timer_on_stack(timer);
550}
551#else
552#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
553#define dpm_watchdog_set(x, y)
554#define dpm_watchdog_clear(x)
555#endif
556
557/*------------------------- Resume routines -------------------------*/
558
559/**
560 * device_resume_noirq - Execute an "early resume" callback for given device.
561 * @dev: Device to handle.
562 * @state: PM transition of the system being carried out.
563 * @async: If true, the device is being resumed asynchronously.
564 *
565 * The driver of @dev will not receive interrupts while this function is being
566 * executed.
567 */
568static int device_resume_noirq(struct device *dev, pm_message_t state, bool async)
569{
570 pm_callback_t callback = NULL;
571 const char *info = NULL;
572 int error = 0;
573
574 TRACE_DEVICE(dev);
575 TRACE_RESUME(0);
576
577 if (dev->power.syscore || dev->power.direct_complete)
578 goto Out;
579
580 if (!dev->power.is_noirq_suspended)
581 goto Out;
582
583 if (!dpm_wait_for_superior(dev, async))
584 goto Out;
585
586 if (dev->pm_domain) {
587 info = "noirq power domain ";
588 callback = pm_noirq_op(&dev->pm_domain->ops, state);
589 } else if (dev->type && dev->type->pm) {
590 info = "noirq type ";
591 callback = pm_noirq_op(dev->type->pm, state);
592 } else if (dev->class && dev->class->pm) {
593 info = "noirq class ";
594 callback = pm_noirq_op(dev->class->pm, state);
595 } else if (dev->bus && dev->bus->pm) {
596 info = "noirq bus ";
597 callback = pm_noirq_op(dev->bus->pm, state);
598 }
599
600 if (!callback && dev->driver && dev->driver->pm) {
601 info = "noirq driver ";
602 callback = pm_noirq_op(dev->driver->pm, state);
603 }
604
605 error = dpm_run_callback(callback, dev, state, info);
606 dev->power.is_noirq_suspended = false;
607
608 Out:
609 complete_all(&dev->power.completion);
610 TRACE_RESUME(error);
611 return error;
612}
613
614static bool is_async(struct device *dev)
615{
616 return dev->power.async_suspend && pm_async_enabled
617 && !pm_trace_is_enabled();
618}
619
620static void async_resume_noirq(void *data, async_cookie_t cookie)
621{
622 struct device *dev = (struct device *)data;
623 int error;
624
625 error = device_resume_noirq(dev, pm_transition, true);
626 if (error)
627 pm_dev_err(dev, pm_transition, " async", error);
628
629 put_device(dev);
630}
631
632void dpm_noirq_resume_devices(pm_message_t state)
633{
634 struct device *dev;
635 ktime_t starttime = ktime_get();
636
637 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
638 mutex_lock(&dpm_list_mtx);
639 pm_transition = state;
640
641 /*
642 * Advanced the async threads upfront,
643 * in case the starting of async threads is
644 * delayed by non-async resuming devices.
645 */
646 list_for_each_entry(dev, &dpm_noirq_list, power.entry) {
647 reinit_completion(&dev->power.completion);
648 if (is_async(dev)) {
649 get_device(dev);
650 async_schedule(async_resume_noirq, dev);
651 }
652 }
653
654 while (!list_empty(&dpm_noirq_list)) {
655 dev = to_device(dpm_noirq_list.next);
656 get_device(dev);
657 list_move_tail(&dev->power.entry, &dpm_late_early_list);
658 mutex_unlock(&dpm_list_mtx);
659
660 if (!is_async(dev)) {
661 int error;
662
663 error = device_resume_noirq(dev, state, false);
664 if (error) {
665 suspend_stats.failed_resume_noirq++;
666 dpm_save_failed_step(SUSPEND_RESUME_NOIRQ);
667 dpm_save_failed_dev(dev_name(dev));
668 pm_dev_err(dev, state, " noirq", error);
669 }
670 }
671
672 mutex_lock(&dpm_list_mtx);
673 put_device(dev);
674 }
675 mutex_unlock(&dpm_list_mtx);
676 async_synchronize_full();
677 dpm_show_time(starttime, state, 0, "noirq");
678 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
679}
680
681void dpm_noirq_end(void)
682{
683 resume_device_irqs();
684 device_wakeup_disarm_wake_irqs();
685 cpuidle_resume();
686}
687
688/**
689 * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices.
690 * @state: PM transition of the system being carried out.
691 *
692 * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and
693 * allow device drivers' interrupt handlers to be called.
694 */
695void dpm_resume_noirq(pm_message_t state)
696{
697 dpm_noirq_resume_devices(state);
698 dpm_noirq_end();
699}
700
701/**
702 * device_resume_early - Execute an "early resume" callback for given device.
703 * @dev: Device to handle.
704 * @state: PM transition of the system being carried out.
705 * @async: If true, the device is being resumed asynchronously.
706 *
707 * Runtime PM is disabled for @dev while this function is being executed.
708 */
709static int device_resume_early(struct device *dev, pm_message_t state, bool async)
710{
711 pm_callback_t callback = NULL;
712 const char *info = NULL;
713 int error = 0;
714
715 TRACE_DEVICE(dev);
716 TRACE_RESUME(0);
717
718 if (dev->power.syscore || dev->power.direct_complete)
719 goto Out;
720
721 if (!dev->power.is_late_suspended)
722 goto Out;
723
724 if (!dpm_wait_for_superior(dev, async))
725 goto Out;
726
727 if (dev->pm_domain) {
728 info = "early power domain ";
729 callback = pm_late_early_op(&dev->pm_domain->ops, state);
730 } else if (dev->type && dev->type->pm) {
731 info = "early type ";
732 callback = pm_late_early_op(dev->type->pm, state);
733 } else if (dev->class && dev->class->pm) {
734 info = "early class ";
735 callback = pm_late_early_op(dev->class->pm, state);
736 } else if (dev->bus && dev->bus->pm) {
737 info = "early bus ";
738 callback = pm_late_early_op(dev->bus->pm, state);
739 }
740
741 if (!callback && dev->driver && dev->driver->pm) {
742 info = "early driver ";
743 callback = pm_late_early_op(dev->driver->pm, state);
744 }
745
746 error = dpm_run_callback(callback, dev, state, info);
747 dev->power.is_late_suspended = false;
748
749 Out:
750 TRACE_RESUME(error);
751
752 pm_runtime_enable(dev);
753 complete_all(&dev->power.completion);
754 return error;
755}
756
757static void async_resume_early(void *data, async_cookie_t cookie)
758{
759 struct device *dev = (struct device *)data;
760 int error;
761
762 error = device_resume_early(dev, pm_transition, true);
763 if (error)
764 pm_dev_err(dev, pm_transition, " async", error);
765
766 put_device(dev);
767}
768
769/**
770 * dpm_resume_early - Execute "early resume" callbacks for all devices.
771 * @state: PM transition of the system being carried out.
772 */
773void dpm_resume_early(pm_message_t state)
774{
775 struct device *dev;
776 ktime_t starttime = ktime_get();
777
778 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
779 mutex_lock(&dpm_list_mtx);
780 pm_transition = state;
781
782 /*
783 * Advanced the async threads upfront,
784 * in case the starting of async threads is
785 * delayed by non-async resuming devices.
786 */
787 list_for_each_entry(dev, &dpm_late_early_list, power.entry) {
788 reinit_completion(&dev->power.completion);
789 if (is_async(dev)) {
790 get_device(dev);
791 async_schedule(async_resume_early, dev);
792 }
793 }
794
795 while (!list_empty(&dpm_late_early_list)) {
796 dev = to_device(dpm_late_early_list.next);
797 get_device(dev);
798 list_move_tail(&dev->power.entry, &dpm_suspended_list);
799 mutex_unlock(&dpm_list_mtx);
800
801 if (!is_async(dev)) {
802 int error;
803
804 error = device_resume_early(dev, state, false);
805 if (error) {
806 suspend_stats.failed_resume_early++;
807 dpm_save_failed_step(SUSPEND_RESUME_EARLY);
808 dpm_save_failed_dev(dev_name(dev));
809 pm_dev_err(dev, state, " early", error);
810 }
811 }
812 mutex_lock(&dpm_list_mtx);
813 put_device(dev);
814 }
815 mutex_unlock(&dpm_list_mtx);
816 async_synchronize_full();
817 dpm_show_time(starttime, state, 0, "early");
818 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
819}
820
821/**
822 * dpm_resume_start - Execute "noirq" and "early" device callbacks.
823 * @state: PM transition of the system being carried out.
824 */
825void dpm_resume_start(pm_message_t state)
826{
827 dpm_resume_noirq(state);
828 dpm_resume_early(state);
829}
830EXPORT_SYMBOL_GPL(dpm_resume_start);
831
832/**
833 * device_resume - Execute "resume" callbacks for given device.
834 * @dev: Device to handle.
835 * @state: PM transition of the system being carried out.
836 * @async: If true, the device is being resumed asynchronously.
837 */
838static int device_resume(struct device *dev, pm_message_t state, bool async)
839{
840 pm_callback_t callback = NULL;
841 const char *info = NULL;
842 int error = 0;
843 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
844
845 TRACE_DEVICE(dev);
846 TRACE_RESUME(0);
847
848 if (dev->power.syscore)
849 goto Complete;
850
851 if (dev->power.direct_complete) {
852 /* Match the pm_runtime_disable() in __device_suspend(). */
853 pm_runtime_enable(dev);
854 goto Complete;
855 }
856
857 if (!dpm_wait_for_superior(dev, async))
858 goto Complete;
859
860 dpm_watchdog_set(&wd, dev);
861 device_lock(dev);
862
863 /*
864 * This is a fib. But we'll allow new children to be added below
865 * a resumed device, even if the device hasn't been completed yet.
866 */
867 dev->power.is_prepared = false;
868
869 if (!dev->power.is_suspended)
870 goto Unlock;
871
872 if (dev->pm_domain) {
873 info = "power domain ";
874 callback = pm_op(&dev->pm_domain->ops, state);
875 goto Driver;
876 }
877
878 if (dev->type && dev->type->pm) {
879 info = "type ";
880 callback = pm_op(dev->type->pm, state);
881 goto Driver;
882 }
883
884 if (dev->class) {
885 if (dev->class->pm) {
886 info = "class ";
887 callback = pm_op(dev->class->pm, state);
888 goto Driver;
889 } else if (dev->class->resume) {
890 info = "legacy class ";
891 callback = dev->class->resume;
892 goto End;
893 }
894 }
895
896 if (dev->bus) {
897 if (dev->bus->pm) {
898 info = "bus ";
899 callback = pm_op(dev->bus->pm, state);
900 } else if (dev->bus->resume) {
901 info = "legacy bus ";
902 callback = dev->bus->resume;
903 goto End;
904 }
905 }
906
907 Driver:
908 if (!callback && dev->driver && dev->driver->pm) {
909 info = "driver ";
910 callback = pm_op(dev->driver->pm, state);
911 }
912
913 End:
914 error = dpm_run_callback(callback, dev, state, info);
915 dev->power.is_suspended = false;
916
917 Unlock:
918 device_unlock(dev);
919 dpm_watchdog_clear(&wd);
920
921 Complete:
922 complete_all(&dev->power.completion);
923
924 TRACE_RESUME(error);
925
926 return error;
927}
928
929static void async_resume(void *data, async_cookie_t cookie)
930{
931 struct device *dev = (struct device *)data;
932 int error;
933
934 error = device_resume(dev, pm_transition, true);
935 if (error)
936 pm_dev_err(dev, pm_transition, " async", error);
937 put_device(dev);
938}
939
940/**
941 * dpm_resume - Execute "resume" callbacks for non-sysdev devices.
942 * @state: PM transition of the system being carried out.
943 *
944 * Execute the appropriate "resume" callback for all devices whose status
945 * indicates that they are suspended.
946 */
947void dpm_resume(pm_message_t state)
948{
949 struct device *dev;
950 ktime_t starttime = ktime_get();
951
952 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
953 might_sleep();
954
955 mutex_lock(&dpm_list_mtx);
956 pm_transition = state;
957 async_error = 0;
958
959 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
960 reinit_completion(&dev->power.completion);
961 if (is_async(dev)) {
962 get_device(dev);
963 async_schedule(async_resume, dev);
964 }
965 }
966
967 while (!list_empty(&dpm_suspended_list)) {
968 dev = to_device(dpm_suspended_list.next);
969 get_device(dev);
970 if (!is_async(dev)) {
971 int error;
972
973 mutex_unlock(&dpm_list_mtx);
974
975 error = device_resume(dev, state, false);
976 if (error) {
977 suspend_stats.failed_resume++;
978 dpm_save_failed_step(SUSPEND_RESUME);
979 dpm_save_failed_dev(dev_name(dev));
980 pm_dev_err(dev, state, "", error);
981 }
982
983 mutex_lock(&dpm_list_mtx);
984 }
985 if (!list_empty(&dev->power.entry))
986 list_move_tail(&dev->power.entry, &dpm_prepared_list);
987 put_device(dev);
988 }
989 mutex_unlock(&dpm_list_mtx);
990 async_synchronize_full();
991 dpm_show_time(starttime, state, 0, NULL);
992
993 cpufreq_resume();
994 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
995}
996
997/**
998 * device_complete - Complete a PM transition for given device.
999 * @dev: Device to handle.
1000 * @state: PM transition of the system being carried out.
1001 */
1002static void device_complete(struct device *dev, pm_message_t state)
1003{
1004 void (*callback)(struct device *) = NULL;
1005 const char *info = NULL;
1006
1007 if (dev->power.syscore)
1008 return;
1009
1010 device_lock(dev);
1011
1012 if (dev->pm_domain) {
1013 info = "completing power domain ";
1014 callback = dev->pm_domain->ops.complete;
1015 } else if (dev->type && dev->type->pm) {
1016 info = "completing type ";
1017 callback = dev->type->pm->complete;
1018 } else if (dev->class && dev->class->pm) {
1019 info = "completing class ";
1020 callback = dev->class->pm->complete;
1021 } else if (dev->bus && dev->bus->pm) {
1022 info = "completing bus ";
1023 callback = dev->bus->pm->complete;
1024 }
1025
1026 if (!callback && dev->driver && dev->driver->pm) {
1027 info = "completing driver ";
1028 callback = dev->driver->pm->complete;
1029 }
1030
1031 if (callback) {
1032 pm_dev_dbg(dev, state, info);
1033 callback(dev);
1034 }
1035
1036 device_unlock(dev);
1037
1038 pm_runtime_put(dev);
1039}
1040
1041/**
1042 * dpm_complete - Complete a PM transition for all non-sysdev devices.
1043 * @state: PM transition of the system being carried out.
1044 *
1045 * Execute the ->complete() callbacks for all devices whose PM status is not
1046 * DPM_ON (this allows new devices to be registered).
1047 */
1048void dpm_complete(pm_message_t state)
1049{
1050 struct list_head list;
1051
1052 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
1053 might_sleep();
1054
1055 INIT_LIST_HEAD(&list);
1056 mutex_lock(&dpm_list_mtx);
1057 while (!list_empty(&dpm_prepared_list)) {
1058 struct device *dev = to_device(dpm_prepared_list.prev);
1059
1060 get_device(dev);
1061 dev->power.is_prepared = false;
1062 list_move(&dev->power.entry, &list);
1063 mutex_unlock(&dpm_list_mtx);
1064
1065 trace_device_pm_callback_start(dev, "", state.event);
1066 device_complete(dev, state);
1067 trace_device_pm_callback_end(dev, 0);
1068
1069 mutex_lock(&dpm_list_mtx);
1070 put_device(dev);
1071 }
1072 list_splice(&list, &dpm_list);
1073 mutex_unlock(&dpm_list_mtx);
1074
1075 /* Allow device probing and trigger re-probing of deferred devices */
1076 device_unblock_probing();
1077 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
1078}
1079
1080/**
1081 * dpm_resume_end - Execute "resume" callbacks and complete system transition.
1082 * @state: PM transition of the system being carried out.
1083 *
1084 * Execute "resume" callbacks for all devices and complete the PM transition of
1085 * the system.
1086 */
1087void dpm_resume_end(pm_message_t state)
1088{
1089 dpm_resume(state);
1090 dpm_complete(state);
1091}
1092EXPORT_SYMBOL_GPL(dpm_resume_end);
1093
1094
1095/*------------------------- Suspend routines -------------------------*/
1096
1097/**
1098 * resume_event - Return a "resume" message for given "suspend" sleep state.
1099 * @sleep_state: PM message representing a sleep state.
1100 *
1101 * Return a PM message representing the resume event corresponding to given
1102 * sleep state.
1103 */
1104static pm_message_t resume_event(pm_message_t sleep_state)
1105{
1106 switch (sleep_state.event) {
1107 case PM_EVENT_SUSPEND:
1108 return PMSG_RESUME;
1109 case PM_EVENT_FREEZE:
1110 case PM_EVENT_QUIESCE:
1111 return PMSG_RECOVER;
1112 case PM_EVENT_HIBERNATE:
1113 return PMSG_RESTORE;
1114 }
1115 return PMSG_ON;
1116}
1117
1118/**
1119 * device_suspend_noirq - Execute a "late suspend" callback for given device.
1120 * @dev: Device to handle.
1121 * @state: PM transition of the system being carried out.
1122 * @async: If true, the device is being suspended asynchronously.
1123 *
1124 * The driver of @dev will not receive interrupts while this function is being
1125 * executed.
1126 */
1127static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async)
1128{
1129 pm_callback_t callback = NULL;
1130 const char *info = NULL;
1131 int error = 0;
1132
1133 TRACE_DEVICE(dev);
1134 TRACE_SUSPEND(0);
1135
1136 dpm_wait_for_subordinate(dev, async);
1137
1138 if (async_error)
1139 goto Complete;
1140
1141 if (pm_wakeup_pending()) {
1142 async_error = -EBUSY;
1143 goto Complete;
1144 }
1145
1146 if (dev->power.syscore || dev->power.direct_complete)
1147 goto Complete;
1148
1149 if (dev->pm_domain) {
1150 info = "noirq power domain ";
1151 callback = pm_noirq_op(&dev->pm_domain->ops, state);
1152 } else if (dev->type && dev->type->pm) {
1153 info = "noirq type ";
1154 callback = pm_noirq_op(dev->type->pm, state);
1155 } else if (dev->class && dev->class->pm) {
1156 info = "noirq class ";
1157 callback = pm_noirq_op(dev->class->pm, state);
1158 } else if (dev->bus && dev->bus->pm) {
1159 info = "noirq bus ";
1160 callback = pm_noirq_op(dev->bus->pm, state);
1161 }
1162
1163 if (!callback && dev->driver && dev->driver->pm) {
1164 info = "noirq driver ";
1165 callback = pm_noirq_op(dev->driver->pm, state);
1166 }
1167
1168 error = dpm_run_callback(callback, dev, state, info);
1169 if (!error)
1170 dev->power.is_noirq_suspended = true;
1171 else
1172 async_error = error;
1173
1174Complete:
1175 complete_all(&dev->power.completion);
1176 TRACE_SUSPEND(error);
1177 return error;
1178}
1179
1180static void async_suspend_noirq(void *data, async_cookie_t cookie)
1181{
1182 struct device *dev = (struct device *)data;
1183 int error;
1184
1185 error = __device_suspend_noirq(dev, pm_transition, true);
1186 if (error) {
1187 dpm_save_failed_dev(dev_name(dev));
1188 pm_dev_err(dev, pm_transition, " async", error);
1189 }
1190
1191 put_device(dev);
1192}
1193
1194static int device_suspend_noirq(struct device *dev)
1195{
1196 reinit_completion(&dev->power.completion);
1197
1198 if (is_async(dev)) {
1199 get_device(dev);
1200 async_schedule(async_suspend_noirq, dev);
1201 return 0;
1202 }
1203 return __device_suspend_noirq(dev, pm_transition, false);
1204}
1205
1206void dpm_noirq_begin(void)
1207{
1208 cpuidle_pause();
1209 device_wakeup_arm_wake_irqs();
1210 suspend_device_irqs();
1211}
1212
1213int dpm_noirq_suspend_devices(pm_message_t state)
1214{
1215 ktime_t starttime = ktime_get();
1216 int error = 0;
1217
1218 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1219 mutex_lock(&dpm_list_mtx);
1220 pm_transition = state;
1221 async_error = 0;
1222
1223 while (!list_empty(&dpm_late_early_list)) {
1224 struct device *dev = to_device(dpm_late_early_list.prev);
1225
1226 get_device(dev);
1227 mutex_unlock(&dpm_list_mtx);
1228
1229 error = device_suspend_noirq(dev);
1230
1231 mutex_lock(&dpm_list_mtx);
1232 if (error) {
1233 pm_dev_err(dev, state, " noirq", error);
1234 dpm_save_failed_dev(dev_name(dev));
1235 put_device(dev);
1236 break;
1237 }
1238 if (!list_empty(&dev->power.entry))
1239 list_move(&dev->power.entry, &dpm_noirq_list);
1240 put_device(dev);
1241
1242 if (async_error)
1243 break;
1244 }
1245 mutex_unlock(&dpm_list_mtx);
1246 async_synchronize_full();
1247 if (!error)
1248 error = async_error;
1249
1250 if (error) {
1251 suspend_stats.failed_suspend_noirq++;
1252 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ);
1253 }
1254 dpm_show_time(starttime, state, error, "noirq");
1255 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1256 return error;
1257}
1258
1259/**
1260 * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices.
1261 * @state: PM transition of the system being carried out.
1262 *
1263 * Prevent device drivers' interrupt handlers from being called and invoke
1264 * "noirq" suspend callbacks for all non-sysdev devices.
1265 */
1266int dpm_suspend_noirq(pm_message_t state)
1267{
1268 int ret;
1269
1270 dpm_noirq_begin();
1271 ret = dpm_noirq_suspend_devices(state);
1272 if (ret)
1273 dpm_resume_noirq(resume_event(state));
1274
1275 return ret;
1276}
1277
1278/**
1279 * device_suspend_late - Execute a "late suspend" callback for given device.
1280 * @dev: Device to handle.
1281 * @state: PM transition of the system being carried out.
1282 * @async: If true, the device is being suspended asynchronously.
1283 *
1284 * Runtime PM is disabled for @dev while this function is being executed.
1285 */
1286static int __device_suspend_late(struct device *dev, pm_message_t state, bool async)
1287{
1288 pm_callback_t callback = NULL;
1289 const char *info = NULL;
1290 int error = 0;
1291
1292 TRACE_DEVICE(dev);
1293 TRACE_SUSPEND(0);
1294
1295 __pm_runtime_disable(dev, false);
1296
1297 dpm_wait_for_subordinate(dev, async);
1298
1299 if (async_error)
1300 goto Complete;
1301
1302 if (pm_wakeup_pending()) {
1303 async_error = -EBUSY;
1304 goto Complete;
1305 }
1306
1307 if (dev->power.syscore || dev->power.direct_complete)
1308 goto Complete;
1309
1310 if (dev->pm_domain) {
1311 info = "late power domain ";
1312 callback = pm_late_early_op(&dev->pm_domain->ops, state);
1313 } else if (dev->type && dev->type->pm) {
1314 info = "late type ";
1315 callback = pm_late_early_op(dev->type->pm, state);
1316 } else if (dev->class && dev->class->pm) {
1317 info = "late class ";
1318 callback = pm_late_early_op(dev->class->pm, state);
1319 } else if (dev->bus && dev->bus->pm) {
1320 info = "late bus ";
1321 callback = pm_late_early_op(dev->bus->pm, state);
1322 }
1323
1324 if (!callback && dev->driver && dev->driver->pm) {
1325 info = "late driver ";
1326 callback = pm_late_early_op(dev->driver->pm, state);
1327 }
1328
1329 error = dpm_run_callback(callback, dev, state, info);
1330 if (!error)
1331 dev->power.is_late_suspended = true;
1332 else
1333 async_error = error;
1334
1335Complete:
1336 TRACE_SUSPEND(error);
1337 complete_all(&dev->power.completion);
1338 return error;
1339}
1340
1341static void async_suspend_late(void *data, async_cookie_t cookie)
1342{
1343 struct device *dev = (struct device *)data;
1344 int error;
1345
1346 error = __device_suspend_late(dev, pm_transition, true);
1347 if (error) {
1348 dpm_save_failed_dev(dev_name(dev));
1349 pm_dev_err(dev, pm_transition, " async", error);
1350 }
1351 put_device(dev);
1352}
1353
1354static int device_suspend_late(struct device *dev)
1355{
1356 reinit_completion(&dev->power.completion);
1357
1358 if (is_async(dev)) {
1359 get_device(dev);
1360 async_schedule(async_suspend_late, dev);
1361 return 0;
1362 }
1363
1364 return __device_suspend_late(dev, pm_transition, false);
1365}
1366
1367/**
1368 * dpm_suspend_late - Execute "late suspend" callbacks for all devices.
1369 * @state: PM transition of the system being carried out.
1370 */
1371int dpm_suspend_late(pm_message_t state)
1372{
1373 ktime_t starttime = ktime_get();
1374 int error = 0;
1375
1376 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1377 mutex_lock(&dpm_list_mtx);
1378 pm_transition = state;
1379 async_error = 0;
1380
1381 while (!list_empty(&dpm_suspended_list)) {
1382 struct device *dev = to_device(dpm_suspended_list.prev);
1383
1384 get_device(dev);
1385 mutex_unlock(&dpm_list_mtx);
1386
1387 error = device_suspend_late(dev);
1388
1389 mutex_lock(&dpm_list_mtx);
1390 if (!list_empty(&dev->power.entry))
1391 list_move(&dev->power.entry, &dpm_late_early_list);
1392
1393 if (error) {
1394 pm_dev_err(dev, state, " late", error);
1395 dpm_save_failed_dev(dev_name(dev));
1396 put_device(dev);
1397 break;
1398 }
1399 put_device(dev);
1400
1401 if (async_error)
1402 break;
1403 }
1404 mutex_unlock(&dpm_list_mtx);
1405 async_synchronize_full();
1406 if (!error)
1407 error = async_error;
1408 if (error) {
1409 suspend_stats.failed_suspend_late++;
1410 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
1411 dpm_resume_early(resume_event(state));
1412 }
1413 dpm_show_time(starttime, state, error, "late");
1414 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1415 return error;
1416}
1417
1418/**
1419 * dpm_suspend_end - Execute "late" and "noirq" device suspend callbacks.
1420 * @state: PM transition of the system being carried out.
1421 */
1422int dpm_suspend_end(pm_message_t state)
1423{
1424 int error = dpm_suspend_late(state);
1425 if (error)
1426 return error;
1427
1428 error = dpm_suspend_noirq(state);
1429 if (error) {
1430 dpm_resume_early(resume_event(state));
1431 return error;
1432 }
1433
1434 return 0;
1435}
1436EXPORT_SYMBOL_GPL(dpm_suspend_end);
1437
1438/**
1439 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
1440 * @dev: Device to suspend.
1441 * @state: PM transition of the system being carried out.
1442 * @cb: Suspend callback to execute.
1443 * @info: string description of caller.
1444 */
1445static int legacy_suspend(struct device *dev, pm_message_t state,
1446 int (*cb)(struct device *dev, pm_message_t state),
1447 const char *info)
1448{
1449 int error;
1450 ktime_t calltime;
1451
1452 calltime = initcall_debug_start(dev);
1453
1454 trace_device_pm_callback_start(dev, info, state.event);
1455 error = cb(dev, state);
1456 trace_device_pm_callback_end(dev, error);
1457 suspend_report_result(cb, error);
1458
1459 initcall_debug_report(dev, calltime, error, state, info);
1460
1461 return error;
1462}
1463
1464static void dpm_clear_suppliers_direct_complete(struct device *dev)
1465{
1466 struct device_link *link;
1467 int idx;
1468
1469 idx = device_links_read_lock();
1470
1471 list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) {
1472 spin_lock_irq(&link->supplier->power.lock);
1473 link->supplier->power.direct_complete = false;
1474 spin_unlock_irq(&link->supplier->power.lock);
1475 }
1476
1477 device_links_read_unlock(idx);
1478}
1479
1480/**
1481 * device_suspend - Execute "suspend" callbacks for given device.
1482 * @dev: Device to handle.
1483 * @state: PM transition of the system being carried out.
1484 * @async: If true, the device is being suspended asynchronously.
1485 */
1486static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1487{
1488 pm_callback_t callback = NULL;
1489 const char *info = NULL;
1490 int error = 0;
1491 char suspend_abort[MAX_SUSPEND_ABORT_LEN];
1492 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1493
1494 TRACE_DEVICE(dev);
1495 TRACE_SUSPEND(0);
1496
1497 dpm_wait_for_subordinate(dev, async);
1498
1499 if (async_error) {
1500 dev->power.direct_complete = false;
1501 goto Complete;
1502 }
1503
1504 /*
1505 * Wait for possible runtime PM transitions of the device in progress
1506 * to complete and if there's a runtime resume request pending for it,
1507 * resume it before proceeding with invoking the system-wide suspend
1508 * callbacks for it.
1509 *
1510 * If the system-wide suspend callbacks below change the configuration
1511 * of the device, they must disable runtime PM for it or otherwise
1512 * ensure that its runtime-resume callbacks will not be confused by that
1513 * change in case they are invoked going forward.
1514 */
1515 pm_runtime_barrier(dev);
1516
1517 if (pm_wakeup_pending()) {
1518 pm_get_active_wakeup_sources(suspend_abort,
1519 MAX_SUSPEND_ABORT_LEN);
1520 log_suspend_abort_reason(suspend_abort);
1521 dev->power.direct_complete = false;
1522 async_error = -EBUSY;
1523 goto Complete;
1524 }
1525
1526 if (dev->power.syscore)
1527 goto Complete;
1528
1529 /* Avoid direct_complete to let wakeup_path propagate. */
1530 if (device_may_wakeup(dev) || dev->power.wakeup_path)
1531 dev->power.direct_complete = false;
1532
1533 if (dev->power.direct_complete) {
1534 if (pm_runtime_status_suspended(dev)) {
1535 pm_runtime_disable(dev);
1536 if (pm_runtime_status_suspended(dev))
1537 goto Complete;
1538
1539 pm_runtime_enable(dev);
1540 }
1541 dev->power.direct_complete = false;
1542 }
1543
1544 dpm_watchdog_set(&wd, dev);
1545 device_lock(dev);
1546
1547 if (dev->pm_domain) {
1548 info = "power domain ";
1549 callback = pm_op(&dev->pm_domain->ops, state);
1550 goto Run;
1551 }
1552
1553 if (dev->type && dev->type->pm) {
1554 info = "type ";
1555 callback = pm_op(dev->type->pm, state);
1556 goto Run;
1557 }
1558
1559 if (dev->class) {
1560 if (dev->class->pm) {
1561 info = "class ";
1562 callback = pm_op(dev->class->pm, state);
1563 goto Run;
1564 } else if (dev->class->suspend) {
1565 pm_dev_dbg(dev, state, "legacy class ");
1566 error = legacy_suspend(dev, state, dev->class->suspend,
1567 "legacy class ");
1568 goto End;
1569 }
1570 }
1571
1572 if (dev->bus) {
1573 if (dev->bus->pm) {
1574 info = "bus ";
1575 callback = pm_op(dev->bus->pm, state);
1576 } else if (dev->bus->suspend) {
1577 pm_dev_dbg(dev, state, "legacy bus ");
1578 error = legacy_suspend(dev, state, dev->bus->suspend,
1579 "legacy bus ");
1580 goto End;
1581 }
1582 }
1583
1584 Run:
1585 if (!callback && dev->driver && dev->driver->pm) {
1586 info = "driver ";
1587 callback = pm_op(dev->driver->pm, state);
1588 }
1589
1590 error = dpm_run_callback(callback, dev, state, info);
1591
1592 End:
1593 if (!error) {
1594 struct device *parent = dev->parent;
1595
1596 dev->power.is_suspended = true;
1597 if (parent) {
1598 spin_lock_irq(&parent->power.lock);
1599
1600 dev->parent->power.direct_complete = false;
1601 if (dev->power.wakeup_path
1602 && !dev->parent->power.ignore_children)
1603 dev->parent->power.wakeup_path = true;
1604
1605 spin_unlock_irq(&parent->power.lock);
1606 }
1607 dpm_clear_suppliers_direct_complete(dev);
1608 }
1609
1610 device_unlock(dev);
1611 dpm_watchdog_clear(&wd);
1612
1613 Complete:
1614 if (error)
1615 async_error = error;
1616
1617 complete_all(&dev->power.completion);
1618 TRACE_SUSPEND(error);
1619 return error;
1620}
1621
1622static void async_suspend(void *data, async_cookie_t cookie)
1623{
1624 struct device *dev = (struct device *)data;
1625 int error;
1626
1627 error = __device_suspend(dev, pm_transition, true);
1628 if (error) {
1629 dpm_save_failed_dev(dev_name(dev));
1630 pm_dev_err(dev, pm_transition, " async", error);
1631 }
1632
1633 put_device(dev);
1634}
1635
1636static int device_suspend(struct device *dev)
1637{
1638 reinit_completion(&dev->power.completion);
1639
1640 if (is_async(dev)) {
1641 get_device(dev);
1642 async_schedule(async_suspend, dev);
1643 return 0;
1644 }
1645
1646 return __device_suspend(dev, pm_transition, false);
1647}
1648
1649/**
1650 * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices.
1651 * @state: PM transition of the system being carried out.
1652 */
1653int dpm_suspend(pm_message_t state)
1654{
1655 ktime_t starttime = ktime_get();
1656 int error = 0;
1657
1658 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1659 might_sleep();
1660
1661 cpufreq_suspend();
1662
1663 mutex_lock(&dpm_list_mtx);
1664 pm_transition = state;
1665 async_error = 0;
1666 while (!list_empty(&dpm_prepared_list)) {
1667 struct device *dev = to_device(dpm_prepared_list.prev);
1668
1669 get_device(dev);
1670 mutex_unlock(&dpm_list_mtx);
1671
1672 error = device_suspend(dev);
1673
1674 mutex_lock(&dpm_list_mtx);
1675 if (error) {
1676 pm_dev_err(dev, state, "", error);
1677 dpm_save_failed_dev(dev_name(dev));
1678 put_device(dev);
1679 break;
1680 }
1681 if (!list_empty(&dev->power.entry))
1682 list_move(&dev->power.entry, &dpm_suspended_list);
1683 put_device(dev);
1684 if (async_error)
1685 break;
1686 }
1687 mutex_unlock(&dpm_list_mtx);
1688 async_synchronize_full();
1689 if (!error)
1690 error = async_error;
1691 if (error) {
1692 suspend_stats.failed_suspend++;
1693 dpm_save_failed_step(SUSPEND_SUSPEND);
1694 }
1695 dpm_show_time(starttime, state, error, NULL);
1696 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1697 return error;
1698}
1699
1700/**
1701 * device_prepare - Prepare a device for system power transition.
1702 * @dev: Device to handle.
1703 * @state: PM transition of the system being carried out.
1704 *
1705 * Execute the ->prepare() callback(s) for given device. No new children of the
1706 * device may be registered after this function has returned.
1707 */
1708static int device_prepare(struct device *dev, pm_message_t state)
1709{
1710 int (*callback)(struct device *) = NULL;
1711 int ret = 0;
1712
1713 if (dev->power.syscore)
1714 return 0;
1715
1716 /*
1717 * If a device's parent goes into runtime suspend at the wrong time,
1718 * it won't be possible to resume the device. To prevent this we
1719 * block runtime suspend here, during the prepare phase, and allow
1720 * it again during the complete phase.
1721 */
1722 pm_runtime_get_noresume(dev);
1723
1724 device_lock(dev);
1725
1726 dev->power.wakeup_path = device_may_wakeup(dev);
1727
1728 if (dev->power.no_pm_callbacks) {
1729 ret = 1; /* Let device go direct_complete */
1730 goto unlock;
1731 }
1732
1733 if (dev->pm_domain)
1734 callback = dev->pm_domain->ops.prepare;
1735 else if (dev->type && dev->type->pm)
1736 callback = dev->type->pm->prepare;
1737 else if (dev->class && dev->class->pm)
1738 callback = dev->class->pm->prepare;
1739 else if (dev->bus && dev->bus->pm)
1740 callback = dev->bus->pm->prepare;
1741
1742 if (!callback && dev->driver && dev->driver->pm)
1743 callback = dev->driver->pm->prepare;
1744
1745 if (callback)
1746 ret = callback(dev);
1747
1748unlock:
1749 device_unlock(dev);
1750
1751 if (ret < 0) {
1752 suspend_report_result(callback, ret);
1753 pm_runtime_put(dev);
1754 return ret;
1755 }
1756 /*
1757 * A positive return value from ->prepare() means "this device appears
1758 * to be runtime-suspended and its state is fine, so if it really is
1759 * runtime-suspended, you can leave it in that state provided that you
1760 * will do the same thing with all of its descendants". This only
1761 * applies to suspend transitions, however.
1762 */
1763 spin_lock_irq(&dev->power.lock);
1764 dev->power.direct_complete = ret > 0 && state.event == PM_EVENT_SUSPEND;
1765 spin_unlock_irq(&dev->power.lock);
1766 return 0;
1767}
1768
1769/**
1770 * dpm_prepare - Prepare all non-sysdev devices for a system PM transition.
1771 * @state: PM transition of the system being carried out.
1772 *
1773 * Execute the ->prepare() callback(s) for all devices.
1774 */
1775int dpm_prepare(pm_message_t state)
1776{
1777 int error = 0;
1778
1779 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1780 might_sleep();
1781
1782 /*
1783 * Give a chance for the known devices to complete their probes, before
1784 * disable probing of devices. This sync point is important at least
1785 * at boot time + hibernation restore.
1786 */
1787 wait_for_device_probe();
1788 /*
1789 * It is unsafe if probing of devices will happen during suspend or
1790 * hibernation and system behavior will be unpredictable in this case.
1791 * So, let's prohibit device's probing here and defer their probes
1792 * instead. The normal behavior will be restored in dpm_complete().
1793 */
1794 device_block_probing();
1795
1796 mutex_lock(&dpm_list_mtx);
1797 while (!list_empty(&dpm_list)) {
1798 struct device *dev = to_device(dpm_list.next);
1799
1800 get_device(dev);
1801 mutex_unlock(&dpm_list_mtx);
1802
1803 trace_device_pm_callback_start(dev, "", state.event);
1804 error = device_prepare(dev, state);
1805 trace_device_pm_callback_end(dev, error);
1806
1807 mutex_lock(&dpm_list_mtx);
1808 if (error) {
1809 if (error == -EAGAIN) {
1810 put_device(dev);
1811 error = 0;
1812 continue;
1813 }
1814 printk(KERN_INFO "PM: Device %s not prepared "
1815 "for power transition: code %d\n",
1816 dev_name(dev), error);
1817 put_device(dev);
1818 break;
1819 }
1820 dev->power.is_prepared = true;
1821 if (!list_empty(&dev->power.entry))
1822 list_move_tail(&dev->power.entry, &dpm_prepared_list);
1823 put_device(dev);
1824 }
1825 mutex_unlock(&dpm_list_mtx);
1826 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1827 return error;
1828}
1829
1830/**
1831 * dpm_suspend_start - Prepare devices for PM transition and suspend them.
1832 * @state: PM transition of the system being carried out.
1833 *
1834 * Prepare all non-sysdev devices for system PM transition and execute "suspend"
1835 * callbacks for them.
1836 */
1837int dpm_suspend_start(pm_message_t state)
1838{
1839 int error;
1840
1841 error = dpm_prepare(state);
1842 if (error) {
1843 suspend_stats.failed_prepare++;
1844 dpm_save_failed_step(SUSPEND_PREPARE);
1845 } else
1846 error = dpm_suspend(state);
1847 return error;
1848}
1849EXPORT_SYMBOL_GPL(dpm_suspend_start);
1850
1851void __suspend_report_result(const char *function, void *fn, int ret)
1852{
1853 if (ret)
1854 printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret);
1855}
1856EXPORT_SYMBOL_GPL(__suspend_report_result);
1857
1858/**
1859 * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete.
1860 * @dev: Device to wait for.
1861 * @subordinate: Device that needs to wait for @dev.
1862 */
1863int device_pm_wait_for_dev(struct device *subordinate, struct device *dev)
1864{
1865 dpm_wait(dev, subordinate->power.async_suspend);
1866 return async_error;
1867}
1868EXPORT_SYMBOL_GPL(device_pm_wait_for_dev);
1869
1870/**
1871 * dpm_for_each_dev - device iterator.
1872 * @data: data for the callback.
1873 * @fn: function to be called for each device.
1874 *
1875 * Iterate over devices in dpm_list, and call @fn for each device,
1876 * passing it @data.
1877 */
1878void dpm_for_each_dev(void *data, void (*fn)(struct device *, void *))
1879{
1880 struct device *dev;
1881
1882 if (!fn)
1883 return;
1884
1885 device_pm_lock();
1886 list_for_each_entry(dev, &dpm_list, power.entry)
1887 fn(dev, data);
1888 device_pm_unlock();
1889}
1890EXPORT_SYMBOL_GPL(dpm_for_each_dev);
1891
1892static bool pm_ops_is_empty(const struct dev_pm_ops *ops)
1893{
1894 if (!ops)
1895 return true;
1896
1897 return !ops->prepare &&
1898 !ops->suspend &&
1899 !ops->suspend_late &&
1900 !ops->suspend_noirq &&
1901 !ops->resume_noirq &&
1902 !ops->resume_early &&
1903 !ops->resume &&
1904 !ops->complete;
1905}
1906
1907void device_pm_check_callbacks(struct device *dev)
1908{
1909 spin_lock_irq(&dev->power.lock);
1910 dev->power.no_pm_callbacks =
1911 (!dev->bus || (pm_ops_is_empty(dev->bus->pm) &&
1912 !dev->bus->suspend && !dev->bus->resume)) &&
1913 (!dev->class || (pm_ops_is_empty(dev->class->pm) &&
1914 !dev->class->suspend && !dev->class->resume)) &&
1915 (!dev->type || pm_ops_is_empty(dev->type->pm)) &&
1916 (!dev->pm_domain || pm_ops_is_empty(&dev->pm_domain->ops)) &&
1917 (!dev->driver || (pm_ops_is_empty(dev->driver->pm) &&
1918 !dev->driver->suspend && !dev->driver->resume));
1919 spin_unlock_irq(&dev->power.lock);
1920}